aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-08 14:07:29 +0100
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-12-08 14:10:05 +0100
commit6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9 (patch)
tree1b1fb3f903b2282e261ade69e3c17952b3fd3464 /drivers/crypto
parent32e04ea00cd159613e04acef75e52bfca6eeff2f (diff)
Imported Upstream version 16.11
Change-Id: I1944c65ddc88a9ad70f8c0eb6731552b84fbcb77 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c33
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c22
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c10
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c22
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c22
-rw-r--r--drivers/crypto/openssl/Makefile60
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c1062
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c708
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h174
-rw-r--r--drivers/crypto/openssl/rte_pmd_openssl_version.map3
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h15
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h22
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c692
-rw-r--r--drivers/crypto/qat/qat_crypto.c524
-rw-r--r--drivers/crypto/qat/qat_qp.c2
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c26
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c24
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c6
-rw-r--r--drivers/crypto/zuc/Makefile69
-rw-r--r--drivers/crypto/zuc/rte_pmd_zuc_version.map3
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c550
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c342
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h108
24 files changed, 4006 insertions, 495 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index dc4ef7f9..745c6146 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -33,9 +33,11 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index dc0b0337..dba5e158 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -37,7 +37,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -230,11 +230,20 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
op->cipher.data.offset);
/* sanity checks */
- if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
+ op->cipher.iv.length != 0) {
GCM_LOG_ERR("iv");
return -1;
}
+ /*
+ * GCM working in 12B IV mode => 16B pre-counter block we need
+ * to set BE LSB to 1, driver expects that 16B is allocated
+ */
+ if (op->cipher.iv.length == 12) {
+ op->cipher.iv.data[15] = 1;
+ }
+
if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
op->auth.aad.length != 0) {
GCM_LOG_ERR("iv");
@@ -395,7 +404,7 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int aesni_gcm_uninit(const char *name);
+static int aesni_gcm_remove(const char *name);
static int
aesni_gcm_create(const char *name,
@@ -477,12 +486,12 @@ aesni_gcm_create(const char *name,
init_error:
GCM_LOG_ERR("driver %s: create failed", name);
- aesni_gcm_uninit(crypto_dev_name);
+ aesni_gcm_remove(crypto_dev_name);
return -EFAULT;
}
static int
-aesni_gcm_init(const char *name, const char *input_args)
+aesni_gcm_probe(const char *name, const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
@@ -503,7 +512,7 @@ aesni_gcm_init(const char *name, const char *input_args)
}
static int
-aesni_gcm_uninit(const char *name)
+aesni_gcm_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -514,14 +523,14 @@ aesni_gcm_uninit(const char *name)
return 0;
}
-static struct rte_driver aesni_gcm_pmd_drv = {
- .type = PMD_VDEV,
- .init = aesni_gcm_init,
- .uninit = aesni_gcm_uninit
+static struct rte_vdev_driver aesni_gcm_pmd_drv = {
+ .probe = aesni_gcm_probe,
+ .remove = aesni_gcm_remove
};
-PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv, CRYPTODEV_NAME_AESNI_GCM_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index b2d0c8ca..f07cd077 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -34,7 +34,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -595,7 +595,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
-static int cryptodev_aesni_mb_uninit(const char *name);
+static int cryptodev_aesni_mb_remove(const char *name);
static int
cryptodev_aesni_mb_create(const char *name,
@@ -675,13 +675,13 @@ cryptodev_aesni_mb_create(const char *name,
init_error:
MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
- cryptodev_aesni_mb_uninit(crypto_dev_name);
+ cryptodev_aesni_mb_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_aesni_mb_init(const char *name,
+cryptodev_aesni_mb_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -703,7 +703,7 @@ cryptodev_aesni_mb_init(const char *name,
}
static int
-cryptodev_aesni_mb_uninit(const char *name)
+cryptodev_aesni_mb_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -714,14 +714,14 @@ cryptodev_aesni_mb_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_aesni_mb_init,
- .uninit = cryptodev_aesni_mb_uninit
+static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
+ .probe = cryptodev_aesni_mb_probe,
+ .remove = cryptodev_aesni_mb_remove
};
-PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv, CRYPTODEV_NAME_AESNI_MB_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index d3c46ace..3d49e2ae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -311,8 +311,14 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
static int
aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
{
- if (dev->data->queue_pairs[qp_id] != NULL) {
- rte_free(dev->data->queue_pairs[qp_id]);
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+ struct rte_ring *r = NULL;
+
+ if (qp != NULL) {
+ r = rte_ring_lookup(qp->name);
+ if (r)
+ rte_ring_free(r);
+ rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
}
return 0;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index df1eb529..b119da28 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -35,7 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -556,7 +556,7 @@ kasumi_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int cryptodev_kasumi_uninit(const char *name);
+static int cryptodev_kasumi_remove(const char *name);
static int
cryptodev_kasumi_create(const char *name,
@@ -611,12 +611,12 @@ cryptodev_kasumi_create(const char *name,
init_error:
KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name);
- cryptodev_kasumi_uninit(crypto_dev_name);
+ cryptodev_kasumi_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_kasumi_init(const char *name,
+cryptodev_kasumi_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -638,7 +638,7 @@ cryptodev_kasumi_init(const char *name,
}
static int
-cryptodev_kasumi_uninit(const char *name)
+cryptodev_kasumi_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -650,14 +650,14 @@ cryptodev_kasumi_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_kasumi_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_kasumi_init,
- .uninit = cryptodev_kasumi_uninit
+static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
+ .probe = cryptodev_kasumi_probe,
+ .remove = cryptodev_kasumi_remove
};
-PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv, CRYPTODEV_NAME_KASUMI_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 909b04f9..c69606b3 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -33,7 +33,7 @@
#include <rte_common.h>
#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
@@ -182,7 +182,7 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
-static int cryptodev_null_uninit(const char *name);
+static int cryptodev_null_remove(const char *name);
/** Create crypto device */
static int
@@ -227,14 +227,14 @@ cryptodev_null_create(const char *name,
init_error:
NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed", name);
- cryptodev_null_uninit(crypto_dev_name);
+ cryptodev_null_remove(crypto_dev_name);
return -EFAULT;
}
/** Initialise null crypto device */
static int
-cryptodev_null_init(const char *name,
+cryptodev_null_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -257,7 +257,7 @@ cryptodev_null_init(const char *name,
/** Uninitialise null crypto device */
static int
-cryptodev_null_uninit(const char *name)
+cryptodev_null_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
@@ -268,14 +268,14 @@ cryptodev_null_uninit(const char *name)
return 0;
}
-static struct rte_driver cryptodev_null_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_null_init,
- .uninit = cryptodev_null_uninit
+static struct rte_vdev_driver cryptodev_null_pmd_drv = {
+ .probe = cryptodev_null_probe,
+ .remove = cryptodev_null_remove
};
-PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv, CRYPTODEV_NAME_NULL_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
new file mode 100644
index 00000000..8c4250c8
--- /dev/null
+++ b/drivers/crypto/openssl/Makefile
@@ -0,0 +1,60 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_openssl.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_openssl_version.map
+
+# external library dependencies
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
new file mode 100644
index 00000000..5f8fa331
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -0,0 +1,1062 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include <openssl/evp.h>
+
+#include "rte_openssl_pmd_private.h"
+
+static int cryptodev_openssl_remove(const char *name);
+
+/*----------------------------------------------------------------------------*/
+
+/**
+ * Global static parameter used to create a unique name for each
+ * OPENSSL crypto device.
+ */
+static unsigned int unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u",
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD),
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/**
+ * Increment counter by 1
+ * Counter is 64 bit array, big-endian
+ */
+static void
+ctr_inc(uint8_t *ctr)
+{
+ uint64_t *ctr64 = (uint64_t *)ctr;
+
+ *ctr64 = __builtin_bswap64(*ctr64);
+ (*ctr64)++;
+ *ctr64 = __builtin_bswap64(*ctr64);
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Session Prepare
+ *------------------------------------------------------------------------------
+ */
+
+/** Get xform chain order */
+static enum openssl_chain_order
+openssl_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ enum openssl_chain_order res = OPENSSL_CHAIN_NOT_SUPPORTED;
+
+ if (xform != NULL) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_AUTH;
+ else if (xform->next->type ==
+ RTE_CRYPTO_SYM_XFORM_CIPHER)
+ res = OPENSSL_CHAIN_AUTH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ res = OPENSSL_CHAIN_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ res = OPENSSL_CHAIN_CIPHER_AUTH;
+ }
+ }
+
+ return res;
+}
+
+/** Get session cipher key from input cipher key */
+static void
+get_cipher_key(uint8_t *input_key, int keylen, uint8_t *session_key)
+{
+ memcpy(session_key, input_key, keylen);
+}
+
+/** Get key ede 24 bytes standard from input key */
+static int
+get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede)
+{
+ int res = 0;
+
+ /* Initialize keys - 24 bytes: [key1-key2-key3] */
+ switch (keylen) {
+ case 24:
+ memcpy(key_ede, key, 24);
+ break;
+ case 16:
+ /* K3 = K1 */
+ memcpy(key_ede, key, 16);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ case 8:
+ /* K1 = K2 = K3 (DES compatibility) */
+ memcpy(key_ede, key, 8);
+ memcpy(key_ede + 8, key, 8);
+ memcpy(key_ede + 16, key, 8);
+ break;
+ default:
+ OPENSSL_LOG_ERR("Unsupported key size");
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input cipher algorithm */
+static uint8_t
+get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen,
+ const EVP_CIPHER **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sess_algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_des_ede_cbc();
+ break;
+ case 24:
+ *algo = EVP_des_ede3_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_cbc();
+ break;
+ case 24:
+ *algo = EVP_aes_192_cbc();
+ break;
+ case 32:
+ *algo = EVP_aes_256_cbc();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ctr();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ctr();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ctr();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_gcm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_gcm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_gcm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Get adequate openssl function for input auth algorithm */
+static uint8_t
+get_auth_algo(enum rte_crypto_auth_algorithm sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ if (algo != NULL) {
+ switch (sessalgo) {
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ } else {
+ res = -EINVAL;
+ }
+
+ return res;
+}
+
+/** Set session cipher parameters */
+static int
+openssl_set_session_cipher_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select cipher direction */
+ sess->cipher.direction = xform->cipher.op;
+ /* Select cipher key */
+ sess->cipher.key.length = xform->cipher.key.length;
+
+ /* Select cipher algo */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_algo(sess->cipher.algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+
+ break;
+
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ sess->cipher.mode = OPENSSL_CIPHER_DES3CTR;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_cipher_key_ede(xform->cipher.key.data,
+ sess->cipher.key.length,
+ sess->cipher.key.data) != 0)
+ return -EINVAL;
+ break;
+
+ default:
+ sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Set session auth parameters */
+static int
+openssl_set_session_auth_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ /* Select auth generate/verify */
+ sess->auth.operation = xform->auth.op;
+ sess->auth.algo = xform->auth.algo;
+
+ /* Select auth algo */
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ /* Check additional condition for AES_GMAC/GCM */
+ if (sess->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM)
+ return -EINVAL;
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.mode = OPENSSL_AUTH_AS_AUTH;
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.auth.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.auth.ctx = EVP_MD_CTX_create();
+ break;
+
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
+ sess->auth.hmac.ctx = EVP_MD_CTX_create();
+ if (get_auth_algo(xform->auth.algo,
+ &sess->auth.hmac.evp_algo) != 0)
+ return -EINVAL;
+ sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
+ xform->auth.key.data, xform->auth.key.length);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+
+ sess->chain_order = openssl_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* cipher_xform must be check before auth_xform */
+ if (cipher_xform) {
+ if (openssl_set_session_cipher_parameters(
+ sess, cipher_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+ }
+
+ if (auth_xform) {
+ if (openssl_set_session_auth_parameters(sess, auth_xform)) {
+ OPENSSL_LOG_ERR(
+ "Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/** Reset private session parameters */
+void
+openssl_reset_session(struct openssl_session *sess)
+{
+ EVP_CIPHER_CTX_free(sess->cipher.ctx);
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ EVP_MD_CTX_destroy(sess->auth.auth.ctx);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ EVP_PKEY_free(sess->auth.hmac.pkey);
+ EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
+ break;
+ default:
+ break;
+ }
+}
+
+/** Provide session for operation */
+static struct openssl_session *
+get_session(struct openssl_qp *qp, struct rte_crypto_op *op)
+{
+ struct openssl_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ /* get existing session */
+ if (likely(op->sym->session != NULL &&
+ op->sym->session->dev_type ==
+ RTE_CRYPTODEV_OPENSSL_PMD))
+ sess = (struct openssl_session *)
+ op->sym->session->_private;
+ } else {
+ /* provide internal session */
+ void *_sess = NULL;
+
+ if (!rte_mempool_get(qp->sess_mp, (void **)&_sess)) {
+ sess = (struct openssl_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)
+ ->_private;
+
+ if (unlikely(openssl_set_session_parameters(
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ } else
+ op->sym->session = _sess;
+ }
+ }
+
+ if (sess == NULL)
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+
+ return sess;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * Process Operations
+ *------------------------------------------------------------------------------
+ */
+
+/** Process standard openssl cipher encryption */
+static int
+process_openssl_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int dstlen, totlen;
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto process_cipher_encrypt_err;
+
+ return 0;
+
+process_cipher_encrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher encrypt failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl cipher decryption */
+static int
+process_openssl_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int dstlen, totlen;
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptUpdate(ctx, dst, &dstlen, src, srclen) <= 0)
+ goto process_cipher_decrypt_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto process_cipher_decrypt_err;
+
+ return 0;
+
+process_cipher_decrypt_err:
+ OPENSSL_LOG_ERR("Process openssl cipher decrypt failed");
+ return -EINVAL;
+}
+
+/** Process cipher des 3 ctr encryption, decryption algorithm */
+static int
+process_openssl_cipher_des3ctr(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, uint8_t *key, int srclen, EVP_CIPHER_CTX *ctx)
+{
+ uint8_t ebuf[8], ctr[8];
+ int unused, n;
+
+ /* We use 3DES encryption also for decryption.
+ * IV is not important for 3DES ecb
+ */
+ if (EVP_EncryptInit_ex(ctx, EVP_des_ede3_ecb(), NULL, key, NULL) <= 0)
+ goto process_cipher_des3ctr_err;
+
+ memcpy(ctr, iv, 8);
+ n = 0;
+
+ while (n < srclen) {
+ if (n % 8 == 0) {
+ if (EVP_EncryptUpdate(ctx,
+ (unsigned char *)&ebuf, &unused,
+ (const unsigned char *)&ctr, 8) <= 0)
+ goto process_cipher_des3ctr_err;
+ ctr_inc(ctr);
+ }
+ dst[n] = src[n] ^ ebuf[n % 8];
+ n++;
+ }
+
+ return 0;
+
+process_cipher_des3ctr_err:
+ OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed");
+ return -EINVAL;
+}
+
+/** Process auth/encription aes-gcm algorithm */
+static int
+process_openssl_auth_encryption_gcm(uint8_t *src, int srclen,
+ uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (aadlen > 0) {
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_EncryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_encryption_gcm_err;
+ }
+
+ if (srclen > 0)
+ if (EVP_EncryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst + len, &len) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag) <= 0)
+ goto process_auth_encryption_gcm_err;
+
+ return 0;
+
+process_auth_encryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed");
+ return -EINVAL;
+}
+
+static int
+process_openssl_auth_decryption_gcm(uint8_t *src, int srclen,
+ uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
+ uint8_t *key, uint8_t *dst, uint8_t *tag,
+ EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+{
+ int len = 0, unused = 0;
+ uint8_t empty[] = {};
+
+ if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (aadlen > 0) {
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad, aadlen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ /* Workaround open ssl bug in version less then 1.0.1f */
+ if (EVP_DecryptUpdate(ctx, empty, &unused, empty, 0) <= 0)
+ goto process_auth_decryption_gcm_err;
+ }
+
+ if (srclen > 0)
+ if (EVP_DecryptUpdate(ctx, dst, &len, src, srclen) <= 0)
+ goto process_auth_decryption_gcm_err;
+
+ if (EVP_DecryptFinal_ex(ctx, dst + len, &len) <= 0)
+ goto process_auth_decryption_gcm_final_err;
+
+ return 0;
+
+process_auth_decryption_gcm_err:
+ OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
+ return -EINVAL;
+
+process_auth_decryption_gcm_final_err:
+ return -EFAULT;
+}
+
+/** Process standard openssl auth algorithms */
+static int
+process_openssl_auth(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv, __rte_unused EVP_PKEY * pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+
+ if (EVP_DigestInit_ex(ctx, algo, NULL) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestFinal_ex(ctx, dst, (unsigned int *)&dstlen) <= 0)
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/** Process standard openssl auth algorithms with hmac */
+static int
+process_openssl_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv, EVP_PKEY *pkey,
+ int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+{
+ size_t dstlen;
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ return 0;
+
+process_auth_err:
+ OPENSSL_LOG_ERR("Process openssl auth failed");
+ return -EINVAL;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/** Process auth/cipher combined operation */
+static void
+process_openssl_combined_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ /* cipher */
+ uint8_t *src = NULL, *dst = NULL, *iv, *tag, *aad;
+ int srclen, ivlen, aadlen, status = -1;
+
+ iv = op->sym->cipher.iv.data;
+ ivlen = op->sym->cipher.iv.length;
+ aad = op->sym->auth.aad.data;
+ aadlen = op->sym->auth.aad.length;
+
+ tag = op->sym->auth.digest.data;
+ if (tag == NULL)
+ tag = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset +
+ op->sym->cipher.data.length);
+
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ srclen = 0;
+ else {
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+ }
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_auth_encryption_gcm(
+ src, srclen, aad, aadlen, iv, ivlen,
+ sess->cipher.key.data, dst, tag,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+ else
+ status = process_openssl_auth_decryption_gcm(
+ src, srclen, aad, aadlen, iv, ivlen,
+ sess->cipher.key.data, dst, tag,
+ sess->cipher.ctx, sess->cipher.evp_algo);
+
+ if (status != 0) {
+ if (status == (-EFAULT) &&
+ sess->auth.operation ==
+ RTE_CRYPTO_AUTH_OP_VERIFY)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ else
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+}
+
+/** Process cipher operation */
+static void
+process_openssl_cipher_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst, *iv;
+ int srclen, status;
+
+ srclen = op->sym->cipher.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->cipher.data.offset);
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->cipher.data.offset);
+
+ iv = op->sym->cipher.iv.data;
+
+ if (sess->cipher.mode == OPENSSL_CIPHER_LIB)
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ status = process_openssl_cipher_encrypt(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_decrypt(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx,
+ sess->cipher.evp_algo);
+ else
+ status = process_openssl_cipher_des3ctr(src, dst, iv,
+ sess->cipher.key.data, srclen,
+ sess->cipher.ctx);
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process auth operation */
+static void
+process_openssl_auth_op
+ (struct rte_crypto_op *op, struct openssl_session *sess,
+ struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
+ dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
+ op->sym->auth.digest.length);
+ else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL)
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ }
+
+ switch (sess->auth.mode) {
+ case OPENSSL_AUTH_AS_AUTH:
+ status = process_openssl_auth(src, dst,
+ NULL, NULL, srclen,
+ sess->auth.auth.ctx, sess->auth.auth.evp_algo);
+ break;
+ case OPENSSL_AUTH_AS_HMAC:
+ status = process_openssl_auth_hmac(src, dst,
+ NULL, sess->auth.hmac.pkey, srclen,
+ sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
+ break;
+ default:
+ status = -1;
+ break;
+ }
+
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(mbuf_src,
+ op->sym->auth.digest.length);
+ }
+
+ if (status != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess)
+{
+ struct rte_mbuf *msrc, *mdst;
+ int retval;
+
+ msrc = op->sym->m_src;
+ mdst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ switch (sess->chain_order) {
+ case OPENSSL_CHAIN_ONLY_CIPHER:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_ONLY_AUTH:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_CIPHER_AUTH:
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(op, sess, mdst, mdst);
+ break;
+ case OPENSSL_CHAIN_AUTH_CIPHER:
+ process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_cipher_op(op, sess, msrc, mdst);
+ break;
+ case OPENSSL_CHAIN_COMBINED:
+ process_openssl_combined_op(op, sess, msrc, mdst);
+ break;
+ default:
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+
+ if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ if (op->status != RTE_CRYPTO_OP_STATUS_ERROR)
+ retval = rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ retval = -1;
+
+ return retval;
+}
+
+/*
+ *------------------------------------------------------------------------------
+ * PMD Framework
+ *------------------------------------------------------------------------------
+ */
+
+/** Enqueue burst */
+static uint16_t
+openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_session *sess;
+ struct openssl_qp *qp = queue_pair;
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ qp->stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct openssl_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/** Create OPENSSL crypto device */
+static int
+cryptodev_openssl_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct openssl_private *internals;
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ OPENSSL_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct openssl_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ OPENSSL_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_OPENSSL_PMD;
+ dev->dev_ops = rte_openssl_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = openssl_pmd_dequeue_burst;
+ dev->enqueue_burst = openssl_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed", name);
+
+ cryptodev_openssl_remove(crypto_dev_name);
+ return -EFAULT;
+}
+
+/** Initialise OPENSSL crypto device */
+static int
+cryptodev_openssl_probe(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_openssl_create(name, &init_params);
+}
+
+/** Uninitialise OPENSSL crypto device */
+static int
+cryptodev_openssl_remove(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing OPENSSL crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
+ .probe = cryptodev_openssl_probe,
+ .remove = cryptodev_openssl_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
+ cryptodev_openssl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
new file mode 100644
index 00000000..875550c7
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -0,0 +1,708 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_openssl_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+openssl_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+openssl_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+openssl_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+openssl_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+openssl_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+openssl_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct openssl_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+openssl_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct openssl_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = openssl_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+openssl_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct openssl_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "openssl_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ OPENSSL_LOG_INFO(
+ "Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ OPENSSL_LOG_ERR(
+ "Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+
+/** Setup a queue pair */
+static int
+openssl_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct openssl_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ openssl_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("OPENSSL PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (openssl_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = openssl_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+openssl_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure */
+static unsigned
+openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct openssl_session);
+}
+
+/** Configure the session from a crypto xform chain */
+static void *
+openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ OPENSSL_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (openssl_set_session_parameters(
+ sess, xform) != 0) {
+ OPENSSL_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+openssl_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess) {
+ openssl_reset_session(sess);
+ memset(sess, 0, sizeof(struct openssl_session));
+ }
+}
+
+struct rte_cryptodev_ops openssl_pmd_ops = {
+ .dev_configure = openssl_pmd_config,
+ .dev_start = openssl_pmd_start,
+ .dev_stop = openssl_pmd_stop,
+ .dev_close = openssl_pmd_close,
+
+ .stats_get = openssl_pmd_stats_get,
+ .stats_reset = openssl_pmd_stats_reset,
+
+ .dev_infos_get = openssl_pmd_info_get,
+
+ .queue_pair_setup = openssl_pmd_qp_setup,
+ .queue_pair_release = openssl_pmd_qp_release,
+ .queue_pair_start = openssl_pmd_qp_start,
+ .queue_pair_stop = openssl_pmd_qp_stop,
+ .queue_pair_count = openssl_pmd_qp_count,
+
+ .session_get_size = openssl_pmd_session_get_size,
+ .session_configure = openssl_pmd_session_configure,
+ .session_clear = openssl_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
new file mode 100644
index 00000000..65c5f979
--- /dev/null
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -0,0 +1,174 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _OPENSSL_PMD_PRIVATE_H_
+#define _OPENSSL_PMD_PRIVATE_H_
+
+#include <openssl/evp.h>
+#include <openssl/des.h>
+
+
+#define OPENSSL_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_OPENSSL_DEBUG
+#define OPENSSL_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define OPENSSL_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define OPENSSL_LOG_INFO(fmt, args...)
+#define OPENSSL_LOG_DBG(fmt, args...)
+#endif
+
+
+/** OPENSSL operation order mode enumerator */
+enum openssl_chain_order {
+ OPENSSL_CHAIN_ONLY_CIPHER,
+ OPENSSL_CHAIN_ONLY_AUTH,
+ OPENSSL_CHAIN_CIPHER_AUTH,
+ OPENSSL_CHAIN_AUTH_CIPHER,
+ OPENSSL_CHAIN_COMBINED,
+ OPENSSL_CHAIN_NOT_SUPPORTED
+};
+
+/** OPENSSL cipher mode enumerator */
+enum openssl_cipher_mode {
+ OPENSSL_CIPHER_LIB,
+ OPENSSL_CIPHER_DES3CTR,
+};
+
+/** OPENSSL auth mode enumerator */
+enum openssl_auth_mode {
+ OPENSSL_AUTH_AS_AUTH,
+ OPENSSL_AUTH_AS_HMAC,
+};
+
+/** private data structure for each OPENSSL crypto device */
+struct openssl_private {
+ unsigned int max_nb_qpairs;
+ /**< Max number of queue pairs */
+ unsigned int max_nb_sessions;
+ /**< Max number of sessions */
+};
+
+/** OPENSSL crypto queue pair */
+struct openssl_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+/** OPENSSL crypto private session structure */
+struct openssl_session {
+ enum openssl_chain_order chain_order;
+ /**< chain order mode */
+
+ /** Cipher Parameters */
+ struct {
+ enum rte_crypto_cipher_operation direction;
+ /**< cipher operation direction */
+ enum openssl_cipher_mode mode;
+ /**< cipher operation mode */
+ enum rte_crypto_cipher_algorithm algo;
+ /**< cipher algorithm */
+
+ struct {
+ uint8_t data[32];
+ /**< key data */
+ size_t length;
+ /**< key length in bytes */
+ } key;
+
+ const EVP_CIPHER *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_CIPHER_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ enum rte_crypto_auth_operation operation;
+ /**< auth operation generate or verify */
+ enum openssl_auth_mode mode;
+ /**< auth operation mode */
+ enum rte_crypto_auth_algorithm algo;
+ /**< cipher algorithm */
+
+ union {
+ struct {
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } auth;
+
+ struct {
+ EVP_PKEY *pkey;
+ /**< pointer to EVP key */
+ const EVP_MD *evp_algo;
+ /**< pointer to EVP algorithm function */
+ EVP_MD_CTX *ctx;
+ /**< pointer to EVP context structure */
+ } hmac;
+ };
+ } auth;
+
+} __rte_cache_aligned;
+
+/** Set and validate OPENSSL crypto session parameters */
+extern int
+openssl_set_session_parameters(struct openssl_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** Reset OPENSSL crypto session parameters */
+extern void
+openssl_reset_session(struct openssl_session *sess);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_openssl_pmd_ops;
+
+#endif /* _OPENSSL_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/openssl/rte_pmd_openssl_version.map b/drivers/crypto/openssl/rte_pmd_openssl_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/drivers/crypto/openssl/rte_pmd_openssl_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index 4d4d8e4d..ebe245f6 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -237,6 +237,11 @@ enum icp_qat_hw_cipher_dir {
ICP_QAT_HW_CIPHER_DECRYPT = 1,
};
+enum icp_qat_hw_auth_op {
+ ICP_QAT_HW_AUTH_VERIFY = 0,
+ ICP_QAT_HW_AUTH_GENERATE = 1,
+};
+
enum icp_qat_hw_cipher_convert {
ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
@@ -293,14 +298,12 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
-struct icp_qat_hw_cipher_aes256_f8 {
- struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
-};
+#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
struct icp_qat_hw_cipher_algo_blk {
- struct icp_qat_hw_cipher_aes256_f8 aes;
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
} __rte_cache_aligned;
+
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 243c1b40..dcc0df59 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -51,6 +51,18 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+
#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
ICP_QAT_HW_CIPHER_NO_CONVERT, \
@@ -86,11 +98,13 @@ struct qat_session {
enum icp_qat_hw_cipher_dir qat_dir;
enum icp_qat_hw_cipher_mode qat_mode;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
struct qat_alg_cd cd;
+ uint8_t *cd_cur_ptr;
phys_addr_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
struct qat_crypto_instance *inst;
- uint8_t salt[ICP_QAT_HW_AES_BLK_SZ];
rte_spinlock_t lock; /* protects this struct */
};
@@ -115,7 +129,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t digestsize,
unsigned int operation);
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header);
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ uint16_t proto);
void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
int alg, const uint8_t *key,
@@ -127,5 +142,6 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 185bb334..8900668d 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -58,6 +58,7 @@
#include <openssl/sha.h> /* Needed to calculate pre-compute values */
#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
/*
@@ -70,9 +71,15 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
@@ -86,6 +93,12 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -103,10 +116,16 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
switch (qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return ICP_QAT_HW_SHA224_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return ICP_QAT_HW_SHA384_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return ICP_QAT_HW_MD5_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
@@ -123,12 +142,18 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
switch (qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ return SHA256_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
return 16;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ return MD5_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum block size in this case */
return SHA512_CBLOCK;
@@ -150,6 +175,17 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
{
SHA256_CTX ctx;
@@ -161,6 +197,17 @@ static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
{
SHA512_CTX ctx;
@@ -172,6 +219,18 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
return 0;
}
+static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
+{
+ MD5_CTX ctx;
+
+ if (!MD5_Init(&ctx))
+ return -EFAULT;
+ MD5_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
+
+ return 0;
+}
+
static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
uint8_t *data_in,
uint8_t *data_out)
@@ -199,6 +258,13 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be32 =
rte_bswap32(*(((uint32_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (partial_hash_sha224(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
if (partial_hash_sha256(data_in, digest))
return -EFAULT;
@@ -206,6 +272,13 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be32 =
rte_bswap32(*(((uint32_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (partial_hash_sha384(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
if (partial_hash_sha512(data_in, digest))
return -EFAULT;
@@ -213,6 +286,10 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
*hash_state_out_be64 =
rte_bswap64(*(((uint64_t *)digest)+i));
break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (partial_hash_md5(data_in, data_out))
+ return -EFAULT;
+ break;
default:
PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
return -EFAULT;
@@ -344,7 +421,8 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
return 0;
}
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ uint16_t proto)
{
PMD_INIT_FUNC_TRACE();
header->hdr_flags =
@@ -358,7 +436,7 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
+ proto);
ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
@@ -375,127 +453,121 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
- uint16_t cipher_offset = 0;
-
+ uint32_t total_key_size;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
+ uint16_t cipher_offset, cd_size;
+ uint32_t wordIndex = 0;
+ uint32_t *temp_key = NULL;
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
- cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- cipher =
- (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk));
- cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
- } else {
- cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
- cipher_offset = 0;
- }
- /* CD setup */
- if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- } else {
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ return -EFAULT;
}
if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
- /* CTR Streaming ciphers are a special case. Decrypt = encrypt
+ /*
+ * CTR Streaming ciphers are a special case. Decrypt = encrypt
* Overriding default values previously set
*/
cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
else
key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- /* For Snow3G, set key convert and other bits */
if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- }
+ total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_padding_sz =
+ (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
+ } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
+ total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
+ proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
+ } else {
+ total_key_size = cipherkeylen;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
}
+ cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
+ cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ header->service_cmd_id = cdesc->qat_cmd;
+ qat_alg_init_common_hdr(header, proto);
- cipher->aes.cipher_config.val =
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
+
+ cipher->cipher_config.val =
ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
cdesc->qat_cipher_alg, key_convert,
cdesc->qat_dir);
- memcpy(cipher->aes.key, cipherkey, cipherkeylen);
- proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
- proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+ temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
+ sizeof(struct icp_qat_hw_cipher_config)
+ + cipherkeylen);
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ memcpy(temp_key, cipherkey, cipherkeylen);
- /* Request template setup */
- qat_alg_init_common_hdr(header);
- header->service_cmd_id = cdesc->qat_cmd;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- /* Configure the common header protocol flags */
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
+ /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
+ wordIndex++)
+ temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
- /* Cipher CD config setup */
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- cipher_cd_ctrl->cipher_key_sz =
- (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
- cipher_cd_ctrl->cipher_state_sz =
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- }
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen + cipherkeylen;
} else {
- cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ memcpy(cipher->key, cipherkey, cipherkeylen);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ cipherkeylen;
}
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else {
- PMD_DRV_LOG(ERR, "invalid param, only authenticated "
- "encryption supported");
- return -EFAULT;
+ if (total_key_size > cipherkeylen) {
+ uint32_t padding_size = total_key_size-cipherkeylen;
+ if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ /* K3 not provided so use K1 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
+ else
+ memset(cdesc->cd_cur_ptr, 0, padding_size);
+ cdesc->cd_cur_ptr += padding_size;
}
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
+
return 0;
}
@@ -506,8 +578,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t digestsize,
unsigned int operation)
{
- struct icp_qat_hw_cipher_algo_blk *cipher;
- struct icp_qat_hw_auth_algo_blk *hash;
+ struct icp_qat_hw_auth_setup *hash;
struct icp_qat_hw_cipher_algo_blk *cipherconfig;
struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
@@ -519,98 +590,129 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
(struct icp_qat_fw_la_auth_req_params *)
((char *)&req_tmpl->serv_specif_rqpars +
sizeof(struct icp_qat_fw_la_cipher_req_params));
- enum icp_qat_hw_cipher_convert key_convert;
- uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
- uint16_t state1_size = 0;
- uint16_t state2_size = 0;
- uint16_t cipher_offset = 0, hash_offset = 0;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */
+ uint16_t state1_size = 0, state2_size = 0;
+ uint16_t hash_offset, cd_size;
+ uint32_t *aad_len = NULL;
+ uint32_t wordIndex = 0;
+ uint32_t *pTempKey;
PMD_INIT_FUNC_TRACE();
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
- cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
- cipher =
- (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk));
- hash_offset = 0;
- cipher_offset = ((char *)hash - (char *)cipher);
- } else {
- cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_cipher_algo_blk));
- cipher_offset = 0;
- hash_offset = ((char *)hash - (char *)cipher);
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
+ } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ return -EFAULT;
}
- /* CD setup */
- if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+ if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
} else {
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
}
- if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
- /* CTR Streaming ciphers are a special case. Decrypt = encrypt
- * Overriding default values previously set
- */
- cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- else
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- cipher->aes.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
- cdesc->qat_cipher_alg, key_convert,
- cdesc->qat_dir);
-
- hash->sha.inner_setup.auth_config.reserved = 0;
- hash->sha.inner_setup.auth_config.config =
+ /*
+ * Setup the inner hash config
+ */
+ hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
+ hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
+ hash->auth_config.reserved = 0;
+ hash->auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
cdesc->qat_hash_alg, digestsize);
- hash->sha.inner_setup.auth_counter.counter =
- rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- hash->sha.inner_setup.auth_counter.counter = 0;
- hash->sha.outer_setup.auth_config.reserved = 0;
- cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
- ((char *)&cdesc->cd +
- sizeof(struct icp_qat_hw_auth_algo_blk)
- + 16);
- cipherconfig->aes.cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
- ICP_QAT_HW_CIPHER_KEY_CONVERT,
- ICP_QAT_HW_CIPHER_ENCRYPT);
- memcpy(cipherconfig->aes.key, authkey, authkeylen);
- memset(cipherconfig->aes.key + authkeylen, 0,
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
- }
- /* Do precomputes */
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
- ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9)
+ hash->auth_counter.counter = 0;
+ else
+ hash->auth_counter.counter = rte_bswap32(
+ qat_hash_get_block_size(cdesc->qat_hash_alg));
+
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
+
+ /*
+ * cd_cur_ptr now points at the state1 information.
+ */
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA224:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA384:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
return -EFAULT;
}
- } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ proto = ICP_QAT_FW_LA_GCM_PROTO;
+ state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
- ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
+ authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size)) {
PMD_DRV_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
@@ -618,62 +720,76 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* Write (the length of AAD) into bytes 16-19 of state2
* in big-endian format. This field is 8 bytes
*/
- uint32_t *aad_len = (uint32_t *)&hash->sha.state1[
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+ aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ];
+ ICP_QAT_HW_GALOIS_H_SZ);
*aad_len = rte_bswap32(add_auth_data_length);
-
- proto = ICP_QAT_FW_LA_GCM_PROTO;
- } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
- state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
- } else {
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, (uint8_t *)(hash->sha.state1),
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
+ state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ (cdesc->cd_cur_ptr + state1_size + state2_size);
+ cipherconfig->cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->key, authkey, authkeylen);
+ memset(cipherconfig->key + authkeylen,
+ 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
+ authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
+ auth_param->hash_state_sz =
+ RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_MD5:
+ if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ authkey, authkeylen, cdesc->cd_cur_ptr,
&state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ PMD_DRV_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
+ state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
+ state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
+ memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
+ pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
+ + authkeylen);
+ /*
+ * The Inner Hash Initial State2 block must contain IK
+ * (Initialisation Key), followed by IK XOR-ed with KM
+ * (Key Modifier): IK||(IK^KM).
+ */
+ /* write the auth key */
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ /* initialise temp key with auth key */
+ memcpy(pTempKey, authkey, authkeylen);
+ /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
+ for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
+ pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
}
/* Request template setup */
- qat_alg_init_common_hdr(header);
+ qat_alg_init_common_hdr(header, proto);
header->service_cmd_id = cdesc->qat_cmd;
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
- /* Configure the common header protocol flags */
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- }
- if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
- }
-
- /* Cipher CD config setup */
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
-
- if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
- } else {
- cipher_cd_ctrl->cipher_state_sz = 0;
- cipher_cd_ctrl->cipher_cfg_offset = 0;
- }
/* Auth CD config setup */
hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
@@ -681,130 +797,21 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
hash_cd_ctrl->inner_res_sz = digestsize;
hash_cd_ctrl->final_sz = digestsize;
hash_cd_ctrl->inner_state1_sz = state1_size;
+ auth_param->auth_res_sz = digestsize;
- switch (cdesc->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- hash_cd_ctrl->inner_state2_sz =
- RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
- hash_cd_ctrl->inner_state2_sz =
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
- hash_cd_ctrl->inner_state1_sz =
- ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
- break;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
- ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ;
- hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- hash_cd_ctrl->inner_state2_sz =
- ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
- hash_cd_ctrl->inner_state1_sz =
- ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
- memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
- break;
- default:
- PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
- return -EFAULT;
- }
-
+ hash_cd_ctrl->inner_state2_sz = state2_size;
hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
((sizeof(struct icp_qat_hw_auth_setup) +
RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
>> 3);
- auth_param->auth_res_sz = digestsize;
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- } else {
- PMD_DRV_LOG(ERR, "invalid param, only authenticated "
- "encryption supported");
- return -EFAULT;
- }
- return 0;
-}
-
-static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
- struct icp_qat_hw_cipher_algo_blk *cd,
- const uint8_t *key, unsigned int keylen)
-{
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
- struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
- PMD_INIT_FUNC_TRACE();
- rte_memcpy(cd->aes.key, key, keylen);
- qat_alg_init_common_hdr(header);
- header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
- cd_pars->u.s.content_desc_params_sz =
- sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
- /* Cipher CD config setup */
- cd_ctrl->cipher_key_sz = keylen >> 3;
- cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- cd_ctrl->cipher_cfg_offset = 0;
- ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
-}
+ cdesc->cd_cur_ptr += state1_size + state2_size;
+ cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
-void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
- int alg, const uint8_t *key,
- unsigned int keylen)
-{
- struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
- struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
-
- PMD_INIT_FUNC_TRACE();
- qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
-}
-
-void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
- int alg, const uint8_t *key,
- unsigned int keylen)
-{
- struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
- struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
- PMD_INIT_FUNC_TRACE();
- qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+ return 0;
}
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
@@ -836,3 +843,28 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
}
return 0;
}
+
+int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_KASUMI_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case QAT_3DES_KEY_SZ_OPT1:
+ case QAT_3DES_KEY_SZ_OPT2:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index d51ca968..798cd982 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -54,7 +54,6 @@
#include <rte_lcore.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_string_fns.h>
@@ -90,6 +89,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* SHA256 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -111,6 +131,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* SHA512 HMAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -132,6 +173,27 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
{ /* AES XCBC MAC */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -178,7 +240,32 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UIA2) */
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 1,
+ .max = 65535,
+ .increment = 1
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
@@ -243,7 +330,7 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UEA2) */
+ { /* SNOW 3G (UEA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
@@ -283,6 +370,132 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
}, }
}, }
},
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -400,18 +613,46 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
+ PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_3DES_ECB:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_alg_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_AES_CCM:
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
goto error_out;
default:
@@ -512,9 +753,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
break;
@@ -524,22 +771,28 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_GCM:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
case RTE_CRYPTO_AUTH_SHA1:
case RTE_CRYPTO_AUTH_SHA256:
case RTE_CRYPTO_AUTH_SHA512:
case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_MD5_HMAC:
case RTE_CRYPTO_AUTH_AES_CCM:
- case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
@@ -575,7 +828,8 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
return session;
error_out:
- rte_mempool_put(internals->sess_mp, session);
+ if (internals->sess_mp != NULL)
+ rte_mempool_put(internals->sess_mp, session);
return NULL;
}
@@ -697,6 +951,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint32_t digest_appended = 1;
+ uint64_t buf_start = 0;
+
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
@@ -719,87 +980,178 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
ctx = (struct qat_session *)op->sym->session->_private;
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- *qat_req = ctx->fw_req;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- rte_pktmbuf_data_len(op->sym->m_src);
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ do_auth = 1;
+ do_cipher = 1;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- rte_pktmbuf_mtophys(op->sym->m_src);
+ if (do_cipher) {
- if (unlikely(op->sym->m_dst != NULL)) {
- qat_req->comn_mid.dest_data_addr =
- rte_pktmbuf_mtophys(op->sym->m_dst);
- qat_req->comn_mid.dst_length =
- rte_pktmbuf_data_len(op->sym->m_dst);
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
+
+ if (unlikely(
+ (cipher_param->cipher_length % BYTE_LENGTH != 0)
+ || (cipher_param->cipher_offset
+ % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "SNOW3G/KASUMI in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ /* copy IV into request if it fits */
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ op->sym->cipher.iv.phys_addr;
+ }
+ min_ofs = cipher_ofs;
}
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
+ || (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR,
+ "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
+ if (do_cipher) {
+ auth_len = auth_len + auth_ofs + 1 -
+ ICP_QAT_HW_KASUMI_BLK_SZ;
+ auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
+ } else {
+ auth_len = auth_len + auth_ofs + 1;
+ auth_ofs = 0;
+ }
+ }
- cipher_param->cipher_length = op->sym->cipher.data.length;
- cipher_param->cipher_offset = op->sym->cipher.data.offset;
- if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
- (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
- "supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+ }
+ min_ofs = auth_ofs;
+
+ if (op->sym->auth.digest.phys_addr) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+ digest_appended = 0;
}
- cipher_param->cipher_length >>= 3;
- cipher_param->cipher_offset >>= 3;
+
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+
}
- if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array))) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+
+ /* Start DMA at nearest aligned address below min_ofs */
+ #define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+ buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs) &
+ QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src)
+ - rte_pktmbuf_headroom(op->sym->m_src)) > buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ buf_start = rte_pktmbuf_mtophys(op->sym->m_src);
+ }
+
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr = buf_start;
+
+ if (do_cipher) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, cipher_ofs) - buf_start;
+ cipher_param->cipher_length = cipher_len;
} else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
}
- if (op->sym->auth.digest.phys_addr) {
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
- auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ if (do_auth) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ op->sym->m_src, auth_ofs) - buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
}
- auth_param->auth_off = op->sym->auth.data.offset;
- auth_param->auth_len = op->sym->auth.data.length;
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
- (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
- "supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_param->auth_off >>= 3;
- auth_param->auth_len >>= 3;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_auth && digest_appended) {
+ if (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE)
+ qat_req->comn_mid.dst_length
+ += op->sym->auth.digest.length;
+ else
+ qat_req->comn_mid.src_length
+ += op->sym->auth.digest.length;
}
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
- /* (GCM) aad length(240 max) will be at this location after precompute */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- struct icp_qat_hw_auth_algo_blk *hash;
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
+ /* out-of-place operation (OOP) */
+ if (unlikely(op->sym->m_dst != NULL)) {
+
+ if (do_auth)
+ qat_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ auth_ofs)
+ - auth_param->auth_off;
else
- hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
- sizeof(struct icp_qat_hw_cipher_algo_blk));
+ qat_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ cipher_ofs)
+ - cipher_param->cipher_offset;
+ }
- auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
- ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
if (op->sym->cipher.iv.length == 12) {
/*
* For GCM a 12 bit IV is allowed,
@@ -809,8 +1161,24 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
qat_req->comn_hdr.serv_specif_flags,
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ if (op->sym->cipher.data.length == 0) {
+ /*
+ * GMAC
+ */
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ op->sym->auth.aad.phys_addr;
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
+ cipher_param->cipher_length = 0;
+ cipher_param->cipher_offset = 0;
+ auth_param->u1.aad_adr = 0;
+ auth_param->auth_len = op->sym->auth.aad.length;
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->u2.aad_sz = 0;
+ }
}
- auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 5de47e36..2e7188bd 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -300,7 +300,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- dev->driver->pci_drv.name, "qp_mem", dev->data->dev_id,
+ dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
socket_id);
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 82ab047f..1e7ee61c 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -71,6 +71,12 @@ static struct rte_pci_id pci_id_qat_map[] = {
{
RTE_PCI_DEVICE(0x8086, 0x0443),
},
+ {
+ RTE_PCI_DEVICE(0x8086, 0x37c9),
+ },
+ {
+ RTE_PCI_DEVICE(0x8086, 0x19e3),
+ },
{.device_id = 0},
};
@@ -113,26 +119,16 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
}
static struct rte_cryptodev_driver rte_qat_pmd = {
- {
+ .pci_drv = {
.id_table = pci_id_qat_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = rte_cryptodev_pci_probe,
+ .remove = rte_cryptodev_pci_remove,
},
.cryptodev_init = crypto_qat_dev_init,
.dev_private_size = sizeof(struct qat_pmd_private),
};
-static int
-rte_qat_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
- return rte_cryptodev_pmd_driver_register(&rte_qat_pmd, PMD_PDEV);
-}
-
-static struct rte_driver pmd_qat_drv = {
- .type = PMD_PDEV,
- .init = rte_qat_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(pmd_qat_drv, CRYPTODEV_NAME_QAT_SYM_PMD);
-DRIVER_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index ec31de28..3b4292a6 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -35,7 +35,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_dev.h>
+#include <rte_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -545,7 +545,7 @@ snow3g_pmd_dequeue_burst(void *queue_pair,
return nb_dequeued;
}
-static int cryptodev_snow3g_uninit(const char *name);
+static int cryptodev_snow3g_remove(const char *name);
static int
cryptodev_snow3g_create(const char *name,
@@ -599,12 +599,12 @@ cryptodev_snow3g_create(const char *name,
init_error:
SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", name);
- cryptodev_snow3g_uninit(crypto_dev_name);
+ cryptodev_snow3g_remove(crypto_dev_name);
return -EFAULT;
}
static int
-cryptodev_snow3g_init(const char *name,
+cryptodev_snow3g_probe(const char *name,
const char *input_args)
{
struct rte_crypto_vdev_init_params init_params = {
@@ -626,26 +626,26 @@ cryptodev_snow3g_init(const char *name,
}
static int
-cryptodev_snow3g_uninit(const char *name)
+cryptodev_snow3g_remove(const char *name)
{
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing SNOW3G crypto device %s"
+ RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
" on numa socket %u\n",
name, rte_socket_id());
return 0;
}
-static struct rte_driver cryptodev_snow3g_pmd_drv = {
- .type = PMD_VDEV,
- .init = cryptodev_snow3g_init,
- .uninit = cryptodev_snow3g_uninit
+static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
+ .probe = cryptodev_snow3g_probe,
+ .remove = cryptodev_snow3g_remove
};
-PMD_REGISTER_DRIVER(cryptodev_snow3g_pmd_drv, CRYPTODEV_NAME_SNOW3G_PMD);
-DRIVER_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
+RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index 6f00b066..4602dfd4 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -39,7 +39,7 @@
#include "rte_snow3g_pmd_private.h"
static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
- { /* SNOW3G (UIA2) */
+ { /* SNOW 3G (UIA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
@@ -64,7 +64,7 @@ static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
}, }
}, }
},
- { /* SNOW3G (UEA2) */
+ { /* SNOW 3G (UEA2) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
.xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
@@ -228,7 +228,7 @@ snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
snow3g_pmd_qp_release(dev, qp_id);
/* Allocate the queue pair data structure. */
- qp = rte_zmalloc_socket("SNOW3G PMD Queue Pair", sizeof(*qp),
+ qp = rte_zmalloc_socket("SNOW 3G PMD Queue Pair", sizeof(*qp),
RTE_CACHE_LINE_SIZE, socket_id);
if (qp == NULL)
return (-ENOMEM);
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
new file mode 100644
index 00000000..b15eb0f6
--- /dev/null
+++ b/drivers/crypto/zuc/Makefile
@@ -0,0 +1,69 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(LIBSSO_ZUC_PATH),)
+$(error "Please define LIBSSO_ZUC_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_zuc.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zuc_version.map
+
+# external library dependencies
+CFLAGS += -I$(LIBSSO_ZUC_PATH)
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
+CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
+LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_ring
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/zuc/rte_pmd_zuc_version.map b/drivers/crypto/zuc/rte_pmd_zuc_version.map
new file mode 100644
index 00000000..cc5829e3
--- /dev/null
+++ b/drivers/crypto/zuc/rte_pmd_zuc_version.map
@@ -0,0 +1,3 @@
+DPDK_16.11 {
+ local: *;
+};
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
new file mode 100644
index 00000000..38491193
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -0,0 +1,550 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_zuc_pmd_private.h"
+
+#define ZUC_DIGEST_LENGTH 4
+#define ZUC_MAX_BURST 8
+#define BYTE_LEN 8
+
+/**
+ * Global static parameter used to create a unique name for each ZUC
+ * crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_ZUC_PMD),
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/** Get xform chain order. */
+static enum zuc_operation
+zuc_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return ZUC_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ZUC_OP_AUTH_CIPHER;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return ZUC_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ZUC_OP_CIPHER_AUTH;
+ else
+ return ZUC_OP_NOT_SUPPORTED;
+ }
+
+ return ZUC_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ enum zuc_operation mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = zuc_get_mode(xform);
+
+ switch (mode) {
+ case ZUC_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case ZUC_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case ZUC_OP_ONLY_AUTH:
+ auth_xform = xform;
+ break;
+ case ZUC_OP_NOT_SUPPORTED:
+ default:
+ ZUC_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only ZUC EEA3 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_ZUC_EEA3)
+ return -EINVAL;
+ /* Copy the key */
+ memcpy(sess->pKey_cipher, xform->cipher.key.data, ZUC_IV_KEY_LENGTH);
+ }
+
+ if (auth_xform) {
+ /* Only ZUC EIA3 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_ZUC_EIA3)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Copy the key */
+ memcpy(sess->pKey_hash, xform->auth.key.data, ZUC_IV_KEY_LENGTH);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get ZUC session. */
+static struct zuc_session *
+zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
+{
+ struct zuc_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_ZUC_PMD))
+ return NULL;
+
+ sess = (struct zuc_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct zuc_session *)c_sess->_private;
+
+ if (unlikely(zuc_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_zuc_cipher_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[ZUC_MAX_BURST], *dst[ZUC_MAX_BURST];
+ uint8_t *IV[ZUC_MAX_BURST];
+ uint32_t num_bytes[ZUC_MAX_BURST];
+ uint8_t *cipher_keys[ZUC_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (unlikely(ops[i]->sym->cipher.iv.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("iv");
+ break;
+ }
+
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Data Length or offset");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = ops[i]->sym->cipher.iv.data;
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ cipher_keys[i] = session->pKey_cipher;
+
+ processed_ops++;
+ }
+
+ sso_zuc_eea3_n_buffer(cipher_keys, IV, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_zuc_hash_op(struct rte_crypto_op **ops,
+ struct zuc_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src;
+ uint32_t *dst;
+ uint32_t length_in_bits;
+
+ for (i = 0; i < num_ops; i++) {
+ if (unlikely(ops[i]->sym->auth.aad.length != ZUC_IV_KEY_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("aad");
+ break;
+ }
+
+ if (unlikely(ops[i]->sym->auth.digest.length != ZUC_DIGEST_LENGTH)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("digest");
+ break;
+ }
+
+ /* Data must be byte aligned */
+ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ ZUC_LOG_ERR("Offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = (uint32_t *)ops[i]->sym->auth.digest.data;
+
+ sso_zuc_eia3_1_buffer(session->pKey_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+ struct zuc_qp *qp, uint8_t num_ops,
+ uint16_t *accumulated_enqueued_ops)
+{
+ unsigned i;
+ unsigned enqueued_ops, processed_ops;
+
+ switch (session->op) {
+ case ZUC_OP_ONLY_CIPHER:
+ processed_ops = process_zuc_cipher_op(ops,
+ session, num_ops);
+ break;
+ case ZUC_OP_ONLY_AUTH:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ break;
+ case ZUC_OP_CIPHER_AUTH:
+ processed_ops = process_zuc_cipher_op(ops, session,
+ num_ops);
+ process_zuc_hash_op(ops, session, processed_ops);
+ break;
+ case ZUC_OP_AUTH_CIPHER:
+ processed_ops = process_zuc_hash_op(ops, session,
+ num_ops);
+ process_zuc_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)ops, processed_ops);
+ qp->qp_stats.enqueued_count += enqueued_ops;
+ *accumulated_enqueued_ops += enqueued_ops;
+
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_qp *qp = queue_pair;
+ unsigned i;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = zuc_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ break;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == ZUC_MAX_BURST) {
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ if (processed_ops < burst_size) {
+ burst_size = 0;
+ break;
+ }
+
+ burst_size = 0;
+ prev_sess = curr_sess;
+
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops, prev_sess,
+ qp, burst_size, &enqueued_ops);
+ }
+
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+}
+
+static uint16_t
+zuc_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct zuc_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_zuc_remove(const char *name);
+
+static int
+cryptodev_zuc_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct zuc_private *internals;
+ uint64_t cpu_flags = 0;
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ else {
+ ZUC_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+
+ /* Create a unique device name. */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ ZUC_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct zuc_private), init_params->socket_id);
+ if (dev == NULL) {
+ ZUC_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_ZUC_PMD;
+ dev->dev_ops = rte_zuc_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = zuc_pmd_dequeue_burst;
+ dev->enqueue_burst = zuc_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ cpu_flags;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed", name);
+
+ cryptodev_zuc_remove(crypto_dev_name);
+ return -EFAULT;
+}
+
+static int
+cryptodev_zuc_probe(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_zuc_create(name, &init_params);
+}
+
+static int
+cryptodev_zuc_remove(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ZUC crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
+ .probe = cryptodev_zuc_probe,
+ .remove = cryptodev_zuc_remove
+};
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
new file mode 100644
index 00000000..2c886d51
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_zuc_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities zuc_pmd_capabilities[] = {
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+zuc_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+zuc_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zuc_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+zuc_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+zuc_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zuc_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zuc_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+zuc_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct zuc_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zuc_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zuc_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct zuc_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "zuc_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ ZUC_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ZUC_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+zuc_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct zuc_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zuc_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZUC PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zuc_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = zuc_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+zuc_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the ZUC session structure */
+static unsigned
+zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct zuc_session);
+}
+
+/** Configure a ZUC session from a crypto xform chain */
+static void *
+zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ ZUC_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (zuc_set_session_parameters(sess, xform) != 0) {
+ ZUC_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+zuc_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct zuc_session));
+}
+
+struct rte_cryptodev_ops zuc_pmd_ops = {
+ .dev_configure = zuc_pmd_config,
+ .dev_start = zuc_pmd_start,
+ .dev_stop = zuc_pmd_stop,
+ .dev_close = zuc_pmd_close,
+
+ .stats_get = zuc_pmd_stats_get,
+ .stats_reset = zuc_pmd_stats_reset,
+
+ .dev_infos_get = zuc_pmd_info_get,
+
+ .queue_pair_setup = zuc_pmd_qp_setup,
+ .queue_pair_release = zuc_pmd_qp_release,
+ .queue_pair_start = zuc_pmd_qp_start,
+ .queue_pair_stop = zuc_pmd_qp_stop,
+ .queue_pair_count = zuc_pmd_qp_count,
+
+ .session_get_size = zuc_pmd_session_get_size,
+ .session_configure = zuc_pmd_session_configure,
+ .session_clear = zuc_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
new file mode 100644
index 00000000..030f120b
--- /dev/null
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -0,0 +1,108 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_ZUC_PMD_PRIVATE_H_
+#define _RTE_ZUC_PMD_PRIVATE_H_
+
+#include <sso_zuc.h>
+
+#define ZUC_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_ZUC_DEBUG
+#define ZUC_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+
+#define ZUC_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define ZUC_LOG_INFO(fmt, args...)
+#define ZUC_LOG_DBG(fmt, args...)
+#endif
+
+#define ZUC_IV_KEY_LENGTH 16
+/** private data structure for each virtual ZUC device */
+struct zuc_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** ZUC buffer queue pair */
+struct zuc_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum zuc_operation {
+ ZUC_OP_ONLY_CIPHER,
+ ZUC_OP_ONLY_AUTH,
+ ZUC_OP_CIPHER_AUTH,
+ ZUC_OP_AUTH_CIPHER,
+ ZUC_OP_NOT_SUPPORTED
+};
+
+/** ZUC private session structure */
+struct zuc_session {
+ enum zuc_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ uint8_t pKey_cipher[ZUC_IV_KEY_LENGTH];
+ uint8_t pKey_hash[ZUC_IV_KEY_LENGTH];
+} __rte_cache_aligned;
+
+
+extern int
+zuc_set_session_parameters(struct zuc_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_zuc_pmd_ops;
+
+
+
+#endif /* _RTE_ZUC_PMD_PRIVATE_H_ */