summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile40
-rw-r--r--drivers/crypto/aesni_gcm/Makefile67
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h127
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c524
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c343
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h120
-rw-r--r--drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map3
-rw-r--r--drivers/crypto/aesni_mb/Makefile65
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h210
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c719
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c473
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h229
-rw-r--r--drivers/crypto/aesni_mb/rte_pmd_aesni_version.map3
-rw-r--r--drivers/crypto/null/Makefile59
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c278
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c355
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h93
-rw-r--r--drivers/crypto/null/rte_pmd_null_crypto_version.map3
-rw-r--r--drivers/crypto/qat/Makefile64
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h174
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw.h316
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw_la.h404
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h306
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h130
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c829
-rw-r--r--drivers/crypto/qat/qat_crypto.c900
-rw-r--r--drivers/crypto/qat/qat_crypto.h136
-rw-r--r--drivers/crypto/qat/qat_logs.h78
-rw-r--r--drivers/crypto/qat/qat_qp.c429
-rw-r--r--drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c140
-rw-r--r--drivers/crypto/snow3g/Makefile64
-rw-r--r--drivers/crypto/snow3g/rte_pmd_snow3g_version.map3
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c551
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c342
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h107
36 files changed, 8687 insertions, 0 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
new file mode 100644
index 00000000..b4205385
--- /dev/null
+++ b/drivers/crypto/Makefile
@@ -0,0 +1,40 @@
+# BSD LICENSE
+#
+# Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
new file mode 100644
index 00000000..aa2621bd
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -0,0 +1,67 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_aesni_gcm.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_gcm_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
new file mode 100644
index 00000000..c399068c
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -0,0 +1,127 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_GCM_OPS_H_
+#define _AESNI_GCM_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <gcm_defines.h>
+#include <aux_funcs.h>
+
+/** Supported vector modes */
+enum aesni_gcm_vector_mode {
+ RTE_AESNI_GCM_NOT_SUPPORTED = 0,
+ RTE_AESNI_GCM_SSE,
+ RTE_AESNI_GCM_AVX,
+ RTE_AESNI_GCM_AVX2
+};
+
+typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys);
+
+typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in,
+ u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len,
+ u8 *auth_tag, u64 auth_tag_len);
+
+typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey);
+
+/** GCM library function pointer table */
+struct aesni_gcm_ops {
+ struct {
+ struct {
+ aes_keyexp_128_enc_t aes128_enc;
+ /**< AES128 enc key expansion */
+ } keyexp;
+ /**< Key expansion functions */
+ } aux; /**< Auxiliary functions */
+
+ struct {
+ aesni_gcm_t enc; /**< GCM encode function pointer */
+ aesni_gcm_t dec; /**< GCM decode function pointer */
+ aesni_gcm_precomp_t precomp; /**< GCM pre-compute */
+ } gcm; /**< GCM functions */
+};
+
+
+static const struct aesni_gcm_ops gcm_ops[] = {
+ [RTE_AESNI_GCM_NOT_SUPPORTED] = {
+ .aux = {
+ .keyexp = {
+ NULL
+ }
+ },
+ .gcm = {
+ NULL
+ }
+ },
+ [RTE_AESNI_GCM_SSE] = {
+ .aux = {
+ .keyexp = {
+ aes_keyexp_128_enc_sse
+ }
+ },
+ .gcm = {
+ aesni_gcm_enc_sse,
+ aesni_gcm_dec_sse,
+ aesni_gcm_precomp_sse
+ }
+ },
+ [RTE_AESNI_GCM_AVX] = {
+ .aux = {
+ .keyexp = {
+ aes_keyexp_128_enc_avx,
+ }
+ },
+ .gcm = {
+ aesni_gcm_enc_avx_gen2,
+ aesni_gcm_dec_avx_gen2,
+ aesni_gcm_precomp_avx_gen2
+ }
+ },
+ [RTE_AESNI_GCM_AVX2] = {
+ .aux = {
+ .keyexp = {
+ aes_keyexp_128_enc_avx2,
+ }
+ },
+ .gcm = {
+ aesni_gcm_enc_avx_gen4,
+ aesni_gcm_dec_avx_gen4,
+ aesni_gcm_precomp_avx_gen4
+ }
+ }
+};
+
+
+#endif /* _AESNI_GCM_OPS_H_ */
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
new file mode 100644
index 00000000..2987ef6b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -0,0 +1,524 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <openssl/aes.h>
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int
+aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
+ uint8_t *aeskey, unsigned aeskey_length)
+{
+ uint8_t key[aeskey_length] __rte_aligned(16);
+ AES_KEY enc_key;
+
+ if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
+ return -EFAULT;
+
+ memcpy(key, aeskey, aeskey_length);
+
+ if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
+ return -EFAULT;
+
+ AES_encrypt(hsubkey, hsubkey, &enc_key);
+
+ return 0;
+}
+
+/** Get xform chain order */
+static int
+aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ /*
+ * GCM only supports authenticated encryption or authenticated
+ * decryption, all other options are invalid, so we must have exactly
+ * 2 xform structs chained together
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return -1;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ }
+
+ return -1;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+ uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ switch (aesni_gcm_get_mode(xform)) {
+ case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ default:
+ GCM_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ /* We only support AES GCM */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
+ auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+ return -EINVAL;
+
+ /* Select cipher direction */
+ if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
+ cipher_xform->cipher.op !=
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
+ "(DECRYPT) specified are an invalid selection");
+ return -EINVAL;
+ } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
+ cipher_xform->cipher.op !=
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
+ "(ENCRYPT) specified are an invalid selection");
+ return -EINVAL;
+ }
+
+ /* Expand GCM AES128 key */
+ (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
+ sess->gdata.expanded_keys);
+
+ /* Calculate hash sub key here */
+ aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
+ cipher_xform->cipher.key.data,
+ cipher_xform->cipher.key.length);
+
+ /* Calculate GCM pre-compute */
+ (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+
+ return 0;
+}
+
+/** Get gcm session */
+static struct aesni_gcm_session *
+aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct aesni_gcm_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session->dev_type
+ != RTE_CRYPTODEV_AESNI_GCM_PMD))
+ return sess;
+
+ sess = (struct aesni_gcm_session *)op->session->_private;
+ } else {
+ void *_sess;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return sess;
+
+ sess = (struct aesni_gcm_session *)
+ ((struct rte_cryptodev_session *)_sess)->_private;
+
+ if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
+ sess, op->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
+ }
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param op symmetric crypto operation
+ * @param session GCM session
+ *
+ * @return
+ *
+ */
+static int
+process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+ struct aesni_gcm_session *session)
+{
+ uint8_t *src, *dst;
+ struct rte_mbuf *m = op->m_src;
+
+ src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
+ dst = op->m_dst ?
+ rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
+ op->cipher.data.offset) :
+ rte_pktmbuf_mtod_offset(m, uint8_t *,
+ op->cipher.data.offset);
+
+ /* sanity checks */
+ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
+ op->auth.aad.length != 0) {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ if (op->auth.digest.length != 16 &&
+ op->auth.digest.length != 12 &&
+ op->auth.digest.length != 8 &&
+ op->auth.digest.length != 0) {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
+
+ (*qp->ops->gcm.enc)(&session->gdata, dst, src,
+ (uint64_t)op->cipher.data.length,
+ op->cipher.iv.data,
+ op->auth.aad.data,
+ (uint64_t)op->auth.aad.length,
+ op->auth.digest.data,
+ (uint64_t)op->auth.digest.length);
+ } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+ op->auth.digest.length);
+
+ if (!auth_tag) {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ (*qp->ops->gcm.dec)(&session->gdata, dst, src,
+ (uint64_t)op->cipher.data.length,
+ op->cipher.iv.data,
+ op->auth.aad.data,
+ (uint64_t)op->auth.aad.length,
+ auth_tag,
+ (uint64_t)op->auth.digest.length);
+ } else {
+ GCM_LOG_ERR("iv");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static void
+post_process_gcm_crypto_op(struct rte_crypto_op *op)
+{
+ struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ struct aesni_gcm_session *session =
+ (struct aesni_gcm_session *)op->sym->session->_private;
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* Verify digest if required */
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
+
+ uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
+ m->data_len - op->sym->auth.digest.length);
+
+#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
+ rte_hexdump(stdout, "auth tag (orig):",
+ op->sym->auth.digest.data, op->sym->auth.digest.length);
+ rte_hexdump(stdout, "auth tag (calc):",
+ tag, op->sym->auth.digest.length);
+#endif
+
+ if (memcmp(tag, op->sym->auth.digest.data,
+ op->sym->auth.digest.length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m, op->sym->auth.digest.length);
+ }
+}
+
+/**
+ * Process a completed GCM request
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static void
+handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op)
+{
+ post_process_gcm_crypto_op(op);
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static uint16_t
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_session *sess;
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ int i, retval = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+
+ sess = aesni_gcm_get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+
+ retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+ if (retval < 0) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ qp->qp_stats.enqueue_err_count++;
+ break;
+ }
+
+ handle_completed_gcm_crypto_op(qp, ops[i]);
+
+ qp->qp_stats.enqueued_count++;
+ }
+ return i;
+}
+
+static uint16_t
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct aesni_gcm_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int aesni_gcm_uninit(const char *name);
+
+static int
+aesni_gcm_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct aesni_gcm_private *internals;
+ enum aesni_gcm_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ GCM_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_GCM_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_GCM_AVX;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ vector_mode = RTE_AESNI_GCM_SSE;
+ else {
+ GCM_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ GCM_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct aesni_gcm_private), init_params->socket_id);
+ if (dev == NULL) {
+ GCM_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD;
+ dev->dev_ops = rte_aesni_gcm_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_GCM_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_GCM_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_GCM_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ GCM_LOG_ERR("driver %s: create failed", name);
+
+ aesni_gcm_uninit(crypto_dev_name);
+ return -EFAULT;
+}
+
+static int
+aesni_gcm_init(const char *name, const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return aesni_gcm_create(name, &init_params);
+}
+
+static int
+aesni_gcm_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver aesni_gcm_pmd_drv = {
+ .name = CRYPTODEV_NAME_AESNI_GCM_PMD,
+ .type = PMD_VDEV,
+ .init = aesni_gcm_init,
+ .uninit = aesni_gcm_uninit
+};
+
+PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
new file mode 100644
index 00000000..e824d4b3
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -0,0 +1,343 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "aesni_gcm_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_gcm_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_gcm_pmd_capabilities;
+
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_gcm_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_gcm_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ GCM_LOG_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ GCM_LOG_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct aesni_gcm_qp *qp = NULL;
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_gcm_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_gcm_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->ops = &gcm_ops[internals->vector_mode];
+
+ qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni gcm session structure */
+static unsigned
+aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_gcm_session);
+}
+
+/** Configure a aesni gcm session from a crypto xform chain */
+static void *
+aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct aesni_gcm_private *internals = dev->data->dev_private;
+
+ if (unlikely(sess == NULL)) {
+ GCM_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode],
+ sess, xform) != 0) {
+ GCM_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct aesni_gcm_session));
+}
+
+struct rte_cryptodev_ops aesni_gcm_pmd_ops = {
+ .dev_configure = aesni_gcm_pmd_config,
+ .dev_start = aesni_gcm_pmd_start,
+ .dev_stop = aesni_gcm_pmd_stop,
+ .dev_close = aesni_gcm_pmd_close,
+
+ .stats_get = aesni_gcm_pmd_stats_get,
+ .stats_reset = aesni_gcm_pmd_stats_reset,
+
+ .dev_infos_get = aesni_gcm_pmd_info_get,
+
+ .queue_pair_setup = aesni_gcm_pmd_qp_setup,
+ .queue_pair_release = aesni_gcm_pmd_qp_release,
+ .queue_pair_start = aesni_gcm_pmd_qp_start,
+ .queue_pair_stop = aesni_gcm_pmd_qp_stop,
+ .queue_pair_count = aesni_gcm_pmd_qp_count,
+
+ .session_get_size = aesni_gcm_pmd_session_get_size,
+ .session_configure = aesni_gcm_pmd_session_configure,
+ .session_clear = aesni_gcm_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
new file mode 100644
index 00000000..a42f9414
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -0,0 +1,120 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_
+#define _RTE_AESNI_GCM_PMD_PRIVATE_H_
+
+#include "aesni_gcm_ops.h"
+
+#define GCM_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_GCM_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define GCM_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_GCM_PMD, \
+ __func__, __LINE__, ## args)
+
+#define GCM_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_GCM_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define GCM_LOG_INFO(fmt, args...)
+#define GCM_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each virtual AESNI GCM device */
+struct aesni_gcm_private {
+ enum aesni_gcm_vector_mode vector_mode;
+ /**< Vector mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+struct aesni_gcm_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ const struct aesni_gcm_ops *ops;
+ /**< Architecture dependent function pointer table of the gcm APIs */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+enum aesni_gcm_operation {
+ AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION,
+ AESNI_GCM_OP_AUTHENTICATED_DECRYPTION
+};
+
+/** AESNI GCM private session structure */
+struct aesni_gcm_session {
+ enum aesni_gcm_operation op;
+ /**< GCM operation type */
+ struct gcm_data gdata __rte_cache_aligned;
+ /**< GCM parameters */
+};
+
+
+/**
+ * Setup GCM session parameters
+ * @param ops gcm ops function pointer table
+ * @param sess aesni gcm session structure
+ * @param xform crypto transform chain
+ *
+ * @return
+ * - On success returns 0
+ * - On failure returns error code < 0
+ */
+extern int
+aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops,
+ struct aesni_gcm_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/**
+ * Device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops;
+
+
+#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
new file mode 100644
index 00000000..ec65291f
--- /dev/null
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -0,0 +1,65 @@
+# BSD LICENSE
+#
+# Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
+$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
+endif
+endif
+
+# library name
+LIB = librte_pmd_aesni_mb.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_aesni_version.map
+
+# external library include paths
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
+CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
new file mode 100644
index 00000000..0c119bf1
--- /dev/null
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -0,0 +1,210 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _AESNI_MB_OPS_H_
+#define _AESNI_MB_OPS_H_
+
+#ifndef LINUX
+#define LINUX
+#endif
+
+#include <mb_mgr.h>
+#include <aux_funcs.h>
+
+enum aesni_mb_vector_mode {
+ RTE_AESNI_MB_NOT_SUPPORTED = 0,
+ RTE_AESNI_MB_SSE,
+ RTE_AESNI_MB_AVX,
+ RTE_AESNI_MB_AVX2
+};
+
+typedef void (*md5_one_block_t)(void *data, void *digest);
+
+typedef void (*sha1_one_block_t)(void *data, void *digest);
+typedef void (*sha224_one_block_t)(void *data, void *digest);
+typedef void (*sha256_one_block_t)(void *data, void *digest);
+typedef void (*sha384_one_block_t)(void *data, void *digest);
+typedef void (*sha512_one_block_t)(void *data, void *digest);
+
+typedef void (*aes_keyexp_128_t)
+ (void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_192_t)
+ (void *key, void *enc_exp_keys, void *dec_exp_keys);
+typedef void (*aes_keyexp_256_t)
+ (void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+typedef void (*aes_xcbc_expand_key_t)
+ (void *key, void *exp_k1, void *k2, void *k3);
+
+/** Multi-buffer library function pointer table */
+struct aesni_mb_ops {
+ struct {
+ init_mb_mgr_t init_mgr;
+ /**< Initialise scheduler */
+ get_next_job_t get_next;
+ /**< Get next free job structure */
+ submit_job_t submit;
+ /**< Submit job to scheduler */
+ get_completed_job_t get_completed_job;
+ /**< Get completed job */
+ flush_job_t flush_job;
+ /**< flush jobs from manager */
+ } job;
+ /**< multi buffer manager functions */
+
+ struct {
+ struct {
+ md5_one_block_t md5;
+ /**< MD5 one block hash */
+ sha1_one_block_t sha1;
+ /**< SHA1 one block hash */
+ sha224_one_block_t sha224;
+ /**< SHA224 one block hash */
+ sha256_one_block_t sha256;
+ /**< SHA256 one block hash */
+ sha384_one_block_t sha384;
+ /**< SHA384 one block hash */
+ sha512_one_block_t sha512;
+ /**< SHA512 one block hash */
+ } one_block;
+ /**< one block hash functions */
+
+ struct {
+ aes_keyexp_128_t aes128;
+ /**< AES128 key expansions */
+ aes_keyexp_192_t aes192;
+ /**< AES192 key expansions */
+ aes_keyexp_256_t aes256;
+ /**< AES256 key expansions */
+
+ aes_xcbc_expand_key_t aes_xcbc;
+ /**< AES XCBC key expansions */
+ } keyexp;
+ /**< Key expansion functions */
+ } aux;
+ /**< Auxiliary functions */
+};
+
+
+static const struct aesni_mb_ops job_ops[] = {
+ [RTE_AESNI_MB_NOT_SUPPORTED] = {
+ .job = {
+ NULL
+ },
+ .aux = {
+ .one_block = {
+ NULL
+ },
+ .keyexp = {
+ NULL
+ }
+ }
+ },
+ [RTE_AESNI_MB_SSE] = {
+ .job = {
+ init_mb_mgr_sse,
+ get_next_job_sse,
+ submit_job_sse,
+ get_completed_job_sse,
+ flush_job_sse
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_sse,
+ sha1_one_block_sse,
+ sha224_one_block_sse,
+ sha256_one_block_sse,
+ sha384_one_block_sse,
+ sha512_one_block_sse
+ },
+ .keyexp = {
+ aes_keyexp_128_sse,
+ aes_keyexp_192_sse,
+ aes_keyexp_256_sse,
+ aes_xcbc_expand_key_sse
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX] = {
+ .job = {
+ init_mb_mgr_avx,
+ get_next_job_avx,
+ submit_job_avx,
+ get_completed_job_avx,
+ flush_job_avx
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx,
+ sha1_one_block_avx,
+ sha224_one_block_avx,
+ sha256_one_block_avx,
+ sha384_one_block_avx,
+ sha512_one_block_avx
+ },
+ .keyexp = {
+ aes_keyexp_128_avx,
+ aes_keyexp_192_avx,
+ aes_keyexp_256_avx,
+ aes_xcbc_expand_key_avx
+ }
+ }
+ },
+ [RTE_AESNI_MB_AVX2] = {
+ .job = {
+ init_mb_mgr_avx2,
+ get_next_job_avx2,
+ submit_job_avx2,
+ get_completed_job_avx2,
+ flush_job_avx2
+ },
+ .aux = {
+ .one_block = {
+ md5_one_block_avx2,
+ sha1_one_block_avx2,
+ sha224_one_block_avx2,
+ sha256_one_block_avx2,
+ sha384_one_block_avx2,
+ sha512_one_block_avx2
+ },
+ .keyexp = {
+ aes_keyexp_128_avx2,
+ aes_keyexp_192_avx2,
+ aes_keyexp_256_avx2,
+ aes_xcbc_expand_key_avx2
+ }
+ }
+ }
+};
+
+
+#endif /* _AESNI_MB_OPS_H_ */
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
new file mode 100644
index 00000000..3415ac1b
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -0,0 +1,719 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each AES-NI multi
+ * buffer crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_MB_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+typedef void (*hash_one_block_t)(void *data, void *digest);
+typedef void (*aes_keyexp_t)(void *key, void *enc_exp_keys, void *dec_exp_keys);
+
+/**
+ * Calculate the authentication pre-computes
+ *
+ * @param one_block_hash Function pointer to calculate digest on ipad/opad
+ * @param ipad Inner pad output byte array
+ * @param opad Outer pad output byte array
+ * @param hkey Authentication key
+ * @param hkey_len Authentication key length
+ * @param blocksize Block size of selected hash algo
+ */
+static void
+calculate_auth_precomputes(hash_one_block_t one_block_hash,
+ uint8_t *ipad, uint8_t *opad,
+ uint8_t *hkey, uint16_t hkey_len,
+ uint16_t blocksize)
+{
+ unsigned i, length;
+
+ uint8_t ipad_buf[blocksize] __rte_aligned(16);
+ uint8_t opad_buf[blocksize] __rte_aligned(16);
+
+ /* Setup inner and outer pads */
+ memset(ipad_buf, HMAC_IPAD_VALUE, blocksize);
+ memset(opad_buf, HMAC_OPAD_VALUE, blocksize);
+
+ /* XOR hash key with inner and outer pads */
+ length = hkey_len > blocksize ? blocksize : hkey_len;
+
+ for (i = 0; i < length; i++) {
+ ipad_buf[i] ^= hkey[i];
+ opad_buf[i] ^= hkey[i];
+ }
+
+ /* Compute partial hashes */
+ (*one_block_hash)(ipad_buf, ipad);
+ (*one_block_hash)(opad_buf, opad);
+
+ /* Clean up stack */
+ memset(ipad_buf, 0, blocksize);
+ memset(opad_buf, 0, blocksize);
+}
+
+/** Get xform chain order */
+static int
+aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ /*
+ * Multi-buffer only supports HASH_CIPHER or CIPHER_HASH chained
+ * operations, all other options are invalid, so we must have exactly
+ * 2 xform structs chained together
+ */
+ if (xform->next == NULL || xform->next->next != NULL)
+ return -1;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return HASH_CIPHER;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CIPHER_HASH;
+
+ return -1;
+}
+
+/** Set session authentication parameters */
+static int
+aesni_mb_set_session_auth_parameters(const struct aesni_mb_ops *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ hash_one_block_t hash_oneblock_fn;
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MB_LOG_ERR("Crypto xform struct not of type auth");
+ return -1;
+ }
+
+ /* Set Authentication Parameters */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
+ sess->auth.algo = AES_XCBC;
+ (*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
+ sess->auth.xcbc.k1_expanded,
+ sess->auth.xcbc.k2, sess->auth.xcbc.k3);
+ return 0;
+ }
+
+ switch (xform->auth.algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ sess->auth.algo = MD5;
+ hash_oneblock_fn = mb_ops->aux.one_block.md5;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ sess->auth.algo = SHA1;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ sess->auth.algo = SHA_224;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ sess->auth.algo = SHA_256;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ sess->auth.algo = SHA_384;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ sess->auth.algo = SHA_512;
+ hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported authentication algorithm selection");
+ return -1;
+ }
+
+ /* Calculate Authentication precomputes */
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+
+ return 0;
+}
+
+/** Set session cipher parameters */
+static int
+aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ aes_keyexp_t aes_keyexp_fn;
+
+ if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MB_LOG_ERR("Crypto xform struct not of type cipher");
+ return -1;
+ }
+
+ /* Select cipher direction */
+ switch (xform->cipher.op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ sess->cipher.direction = ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ sess->cipher.direction = DECRYPT;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher operation parameter");
+ return -1;
+ }
+
+ /* Select cipher mode */
+ switch (xform->cipher.algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.mode = CBC;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher mode parameter");
+ return -1;
+ }
+
+ /* Check key length and choose key expansion function */
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported cipher key length");
+ return -1;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ return 0;
+}
+
+/** Parse crypto xform chain and set private session parameters */
+int
+aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ switch (aesni_mb_get_chain_order(xform)) {
+ case HASH_CIPHER:
+ sess->chain_order = HASH_CIPHER;
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CIPHER_HASH:
+ sess->chain_order = CIPHER_HASH;
+ auth_xform = xform->next;
+ cipher_xform = xform;
+ break;
+ default:
+ MB_LOG_ERR("Unsupported operation chain order parameter");
+ return -1;
+ }
+
+ if (aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform)) {
+ MB_LOG_ERR("Invalid/unsupported authentication parameters");
+ return -1;
+ }
+
+ if (aesni_mb_set_session_cipher_parameters(mb_ops, sess,
+ cipher_xform)) {
+ MB_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -1;
+ }
+ return 0;
+}
+
+/** Get multi buffer session */
+static struct aesni_mb_session *
+get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
+{
+ struct aesni_mb_session *sess = NULL;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_AESNI_MB_PMD))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)op->sym->session->_private;
+ } else {
+ void *_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess))
+ return NULL;
+
+ sess = (struct aesni_mb_session *)
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
+
+ if (unlikely(aesni_mb_set_session_parameters(qp->ops,
+ sess, op->sym->xform) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ sess = NULL;
+ }
+ }
+
+ return sess;
+}
+
+/**
+ * Process a crypto operation and complete a JOB_AES_HMAC job structure for
+ * submission to the multi buffer library for processing.
+ *
+ * @param qp queue pair
+ * @param job JOB_AES_HMAC structure to fill
+ * @param m mbuf to process
+ *
+ * @return
+ * - Completed JOB_AES_HMAC structure pointer on success
+ * - NULL pointer if completion of JOB_AES_HMAC structure isn't possible
+ */
+static JOB_AES_HMAC *
+process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op,
+ struct aesni_mb_session *session)
+{
+ JOB_AES_HMAC *job;
+
+ struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
+ uint16_t m_offset = 0;
+
+ job = (*qp->ops->job.get_next)(&qp->mb_mgr);
+ if (unlikely(job == NULL))
+ return job;
+
+ /* Set crypto operation */
+ job->chain_order = session->chain_order;
+
+ /* Set cipher parameters */
+ job->cipher_direction = session->cipher.direction;
+ job->cipher_mode = session->cipher.mode;
+
+ job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
+ job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
+
+
+ /* Set authentication parameters */
+ job->hash_alg = session->auth.algo;
+ if (job->hash_alg == AES_XCBC) {
+ job->_k1_expanded = session->auth.xcbc.k1_expanded;
+ job->_k2 = session->auth.xcbc.k2;
+ job->_k3 = session->auth.xcbc.k3;
+ } else {
+ job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ }
+
+ /* Mutable crypto operation parameters */
+ if (op->sym->m_dst) {
+ m_src = m_dst = op->sym->m_dst;
+
+ /* append space for output data to mbuf */
+ char *odata = rte_pktmbuf_append(m_dst,
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (odata == NULL)
+ MB_LOG_ERR("failed to allocate space in destination "
+ "mbuf for source data");
+
+ memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ } else {
+ m_dst = m_src;
+ m_offset = op->sym->cipher.data.offset;
+ }
+
+ /* Set digest output location */
+ if (job->cipher_direction == DECRYPT) {
+ job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
+ get_digest_byte_length(job->hash_alg));
+
+ if (job->auth_tag_output == NULL) {
+ MB_LOG_ERR("failed to allocate space in output mbuf "
+ "for temp digest");
+ return NULL;
+ }
+
+ memset(job->auth_tag_output, 0,
+ sizeof(get_digest_byte_length(job->hash_alg)));
+
+ } else {
+ job->auth_tag_output = op->sym->auth.digest.data;
+ }
+
+ /*
+ * Multi-buffer library current only support returning a truncated
+ * digest length as specified in the relevant IPsec RFCs
+ */
+ job->auth_tag_output_len_in_bytes =
+ get_truncated_digest_byte_length(job->hash_alg);
+
+ /* Set IV parameters */
+ job->iv = op->sym->cipher.iv.data;
+ job->iv_len_in_bytes = op->sym->cipher.iv.length;
+
+ /* Data Parameter */
+ job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
+
+ job->cipher_start_src_offset_in_bytes = op->sym->cipher.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
+
+ job->hash_start_src_offset_in_bytes = op->sym->auth.data.offset;
+ job->msg_len_to_hash_in_bytes = op->sym->auth.data.length;
+
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+ job->user_data2 = m_dst;
+
+ return job;
+}
+
+/**
+ * Process a completed job and return rte_mbuf which job processed
+ *
+ * @param job JOB_AES_HMAC job to process
+ *
+ * @return
+ * - Returns processed mbuf which is trimmed of output digest used in
+ * verification of supplied digest in the case of a HASH_CIPHER operation
+ * - Returns NULL on invalid job
+ */
+static struct rte_crypto_op *
+post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+ struct rte_crypto_op *op =
+ (struct rte_crypto_op *)job->user_data;
+ struct rte_mbuf *m_dst =
+ (struct rte_mbuf *)job->user_data2;
+
+ if (op == NULL || m_dst == NULL)
+ return NULL;
+
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /* check if job has been processed */
+ if (unlikely(job->status != STS_COMPLETED)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return op;
+ } else if (job->chain_order == HASH_CIPHER) {
+ /* Verify digest if required */
+ if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
+ job->auth_tag_output_len_in_bytes) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* trim area used for digest from mbuf */
+ rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
+ }
+
+ /* Free session if a session-less crypto op */
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, op->sym->session);
+ op->sym->session = NULL;
+ }
+
+ return op;
+}
+
+/**
+ * Process a completed JOB_AES_HMAC job and keep processing jobs until
+ * get_completed_job return NULL
+ *
+ * @param qp Queue Pair to process
+ * @param job JOB_AES_HMAC job
+ *
+ * @return
+ * - Number of processed jobs
+ */
+static unsigned
+handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
+{
+ struct rte_crypto_op *op = NULL;
+ unsigned processed_jobs = 0;
+
+ while (job) {
+ processed_jobs++;
+ op = post_process_mb_job(qp, job);
+ if (op)
+ rte_ring_enqueue(qp->processed_ops, (void *)op);
+ else
+ qp->stats.dequeue_err_count++;
+ job = (*qp->ops->job.get_completed_job)(&qp->mb_mgr);
+ }
+
+ return processed_jobs;
+}
+
+static uint16_t
+aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_session *sess;
+ struct aesni_mb_qp *qp = queue_pair;
+
+ JOB_AES_HMAC *job = NULL;
+
+ int i, processed_jobs = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ MB_LOG_ERR("PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ qp->stats.enqueue_err_count++;
+ goto flush_jobs;
+ }
+#endif
+ sess = get_session(qp, ops[i]);
+ if (unlikely(sess == NULL)) {
+ qp->stats.enqueue_err_count++;
+ goto flush_jobs;
+ }
+
+ job = process_crypto_op(qp, ops[i], sess);
+ if (unlikely(job == NULL)) {
+ qp->stats.enqueue_err_count++;
+ goto flush_jobs;
+ }
+
+ /* Submit Job */
+ job = (*qp->ops->job.submit)(&qp->mb_mgr);
+
+ /*
+ * If submit returns a processed job then handle it,
+ * before submitting subsequent jobs
+ */
+ if (job)
+ processed_jobs += handle_completed_jobs(qp, job);
+ }
+
+ if (processed_jobs == 0)
+ goto flush_jobs;
+ else
+ qp->stats.enqueued_count += processed_jobs;
+ return i;
+
+flush_jobs:
+ /*
+ * If we haven't processed any jobs in submit loop, then flush jobs
+ * queue to stop the output stalling
+ */
+ job = (*qp->ops->job.flush_job)(&qp->mb_mgr);
+ if (job)
+ qp->stats.enqueued_count += handle_completed_jobs(qp, job);
+
+ return i;
+}
+
+static uint16_t
+aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct aesni_mb_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)ops, nb_ops);
+ qp->stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+
+static int cryptodev_aesni_mb_uninit(const char *name);
+
+static int
+cryptodev_aesni_mb_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct aesni_mb_private *internals;
+ enum aesni_mb_vector_mode vector_mode;
+
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ MB_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* Check CPU for supported vector instruction set */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ vector_mode = RTE_AESNI_MB_AVX2;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
+ vector_mode = RTE_AESNI_MB_AVX;
+ else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+ vector_mode = RTE_AESNI_MB_SSE;
+ else {
+ MB_LOG_ERR("Vector instructions are not supported by CPU");
+ return -EFAULT;
+ }
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ MB_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct aesni_mb_private), init_params->socket_id);
+ if (dev == NULL) {
+ MB_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_AESNI_MB_PMD;
+ dev->dev_ops = rte_aesni_mb_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = aesni_mb_pmd_dequeue_burst;
+ dev->enqueue_burst = aesni_mb_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_CPU_AESNI;
+
+ switch (vector_mode) {
+ case RTE_AESNI_MB_SSE:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
+ break;
+ case RTE_AESNI_MB_AVX:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
+ break;
+ case RTE_AESNI_MB_AVX2:
+ dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
+ break;
+ default:
+ break;
+ }
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->vector_mode = vector_mode;
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ MB_LOG_ERR("driver %s: cryptodev_aesni_create failed", name);
+
+ cryptodev_aesni_mb_uninit(crypto_dev_name);
+ return -EFAULT;
+}
+
+
+static int
+cryptodev_aesni_mb_init(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_aesni_mb_create(name, &init_params);
+}
+
+static int
+cryptodev_aesni_mb_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver cryptodev_aesni_mb_pmd_drv = {
+ .name = CRYPTODEV_NAME_AESNI_MB_PMD,
+ .type = PMD_VDEV,
+ .init = cryptodev_aesni_mb_init,
+ .uninit = cryptodev_aesni_mb_uninit
+};
+
+PMD_REGISTER_DRIVER(cryptodev_aesni_mb_pmd_drv);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
new file mode 100644
index 00000000..3806a66e
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -0,0 +1,473 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_aesni_mb_pmd_private.h"
+
+
+static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 14,
+ .max = 14,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES XCBC HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/** Configure device */
+static int
+aesni_mb_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+aesni_mb_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+aesni_mb_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+aesni_mb_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+aesni_mb_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+aesni_mb_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct aesni_mb_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+
+/** Get device info */
+static void
+aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = aesni_mb_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/** Release queue pair */
+static int
+aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct aesni_mb_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "aesni_mb_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed operations on */
+static struct rte_ring *
+aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ MB_LOG_INFO("Reusing existing ring %s for processed ops",
+ qp->name);
+ return r;
+ }
+
+ MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct aesni_mb_qp *qp = NULL;
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ aesni_mb_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (aesni_mb_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->ops = &job_ops[internals->vector_mode];
+
+ qp->processed_ops = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+
+ /* Initialise multi-buffer manager */
+ (*qp->ops->job.init_mgr)(&qp->mb_mgr);
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the aesni multi-buffer session structure */
+static unsigned
+aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct aesni_mb_session);
+}
+
+/** Configure a aesni multi-buffer session from a crypto xform chain */
+static void *
+aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct aesni_mb_private *internals = dev->data->dev_private;
+
+ if (unlikely(sess == NULL)) {
+ MB_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
+ sess, xform) != 0) {
+ MB_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+aesni_mb_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct aesni_mb_session));
+}
+
+struct rte_cryptodev_ops aesni_mb_pmd_ops = {
+ .dev_configure = aesni_mb_pmd_config,
+ .dev_start = aesni_mb_pmd_start,
+ .dev_stop = aesni_mb_pmd_stop,
+ .dev_close = aesni_mb_pmd_close,
+
+ .stats_get = aesni_mb_pmd_stats_get,
+ .stats_reset = aesni_mb_pmd_stats_reset,
+
+ .dev_infos_get = aesni_mb_pmd_info_get,
+
+ .queue_pair_setup = aesni_mb_pmd_qp_setup,
+ .queue_pair_release = aesni_mb_pmd_qp_release,
+ .queue_pair_start = aesni_mb_pmd_qp_start,
+ .queue_pair_stop = aesni_mb_pmd_qp_stop,
+ .queue_pair_count = aesni_mb_pmd_qp_count,
+
+ .session_get_size = aesni_mb_pmd_session_get_size,
+ .session_configure = aesni_mb_pmd_session_configure,
+ .session_clear = aesni_mb_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
new file mode 100644
index 00000000..949d9a60
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -0,0 +1,229 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_AESNI_MB_PMD_PRIVATE_H_
+#define _RTE_AESNI_MB_PMD_PRIVATE_H_
+
+#include "aesni_mb_ops.h"
+
+#define MB_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_MB_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
+#define MB_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_MB_PMD, \
+ __func__, __LINE__, ## args)
+
+#define MB_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_AESNI_MB_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define MB_LOG_INFO(fmt, args...)
+#define MB_LOG_DBG(fmt, args...)
+#endif
+
+#define HMAC_IPAD_VALUE (0x36)
+#define HMAC_OPAD_VALUE (0x5C)
+
+static const unsigned auth_blocksize[] = {
+ [MD5] = 64,
+ [SHA1] = 64,
+ [SHA_224] = 64,
+ [SHA_256] = 64,
+ [SHA_384] = 128,
+ [SHA_512] = 128,
+ [AES_XCBC] = 16,
+};
+
+/**
+ * Get the blocksize in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_auth_algo_blocksize(JOB_HASH_ALG algo)
+{
+ return auth_blocksize[algo];
+}
+
+static const unsigned auth_truncated_digest_byte_lengths[] = {
+ [MD5] = 12,
+ [SHA1] = 12,
+ [SHA_224] = 14,
+ [SHA_256] = 16,
+ [SHA_384] = 24,
+ [SHA_512] = 32,
+ [AES_XCBC] = 12,
+};
+
+/**
+ * Get the IPsec specified truncated length in bytes of the HMAC digest for a
+ * specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_truncated_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_truncated_digest_byte_lengths[algo];
+}
+
+static const unsigned auth_digest_byte_lengths[] = {
+ [MD5] = 16,
+ [SHA1] = 20,
+ [SHA_224] = 28,
+ [SHA_256] = 32,
+ [SHA_384] = 48,
+ [SHA_512] = 64,
+ [AES_XCBC] = 16,
+};
+
+/**
+ * Get the output digest size in bytes for a specified authentication algorithm
+ *
+ * @Note: this function will not return a valid value for a non-valid
+ * authentication algorithm
+ */
+static inline unsigned
+get_digest_byte_length(JOB_HASH_ALG algo)
+{
+ return auth_digest_byte_lengths[algo];
+}
+
+
+/** private data structure for each virtual AESNI device */
+struct aesni_mb_private {
+ enum aesni_mb_vector_mode vector_mode;
+ /**< CPU vector instruction set mode */
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** AESNI Multi buffer queue pair */
+struct aesni_mb_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ const struct aesni_mb_ops *ops;
+ /**< Vector mode dependent pointer table of the multi-buffer APIs */
+ MB_MGR mb_mgr;
+ /**< Multi-buffer instance */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing process operations */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** AES-NI multi-buffer private session structure */
+struct aesni_mb_session {
+ JOB_CHAIN_ORDER chain_order;
+
+ /** Cipher Parameters */
+ struct {
+ /** Cipher direction - encrypt / decrypt */
+ JOB_CIPHER_DIRECTION direction;
+ /** Cipher mode - CBC / Counter */
+ JOB_CIPHER_MODE mode;
+
+ uint64_t key_length_in_bytes;
+
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ /**< Expanded AES keys - Allocating space to
+ * contain the maximum expanded key size which
+ * is 240 bytes for 256 bit AES, calculate by:
+ * ((key size (bytes)) *
+ * ((number of rounds) + 1))
+ */
+ } cipher;
+
+ /** Authentication Parameters */
+ struct {
+ JOB_HASH_ALG algo; /**< Authentication Algorithm */
+ union {
+ struct {
+ uint8_t inner[128] __rte_aligned(16);
+ /**< inner pad */
+ uint8_t outer[128] __rte_aligned(16);
+ /**< outer pad */
+ } pads;
+ /**< HMAC Authentication pads -
+ * allocating space for the maximum pad
+ * size supported which is 128 bytes for
+ * SHA512
+ */
+
+ struct {
+ uint32_t k1_expanded[44] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint8_t k2[16] __rte_aligned(16);
+ /**< k2. */
+ uint8_t k3[16] __rte_aligned(16);
+ /**< k3. */
+ } xcbc;
+ /**< Expanded XCBC authentication keys */
+ };
+ } auth;
+} __rte_cache_aligned;
+
+
+/**
+ *
+ */
+extern int
+aesni_mb_set_session_parameters(const struct aesni_mb_ops *mb_ops,
+ struct aesni_mb_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops;
+
+
+
+#endif /* _RTE_AESNI_MB_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map b/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
new file mode 100644
index 00000000..ad607bbe
--- /dev/null
+++ b/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+};
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
new file mode 100644
index 00000000..2173277b
--- /dev/null
+++ b/drivers/crypto/null/Makefile
@@ -0,0 +1,59 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+
+# library name
+LIB = librte_pmd_null_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_null_crypto_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null_crypto_pmd_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
new file mode 100644
index 00000000..bdaf13ca
--- /dev/null
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -0,0 +1,278 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "null_crypto_pmd_private.h"
+
+/**
+ * Global static parameter used to create a unique name for each crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_NULL_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+
+/** verify and set session parameters */
+int
+null_crypto_set_session_parameters(
+ struct null_crypto_session *sess __rte_unused,
+ const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL) {
+ return -1;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ /* Authentication Only */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ /* Authentication then Cipher */
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
+ xform->next->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL) {
+ /* Cipher Only */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 0;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ /* Cipher then Authentication */
+ if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL &&
+ xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 0;
+ }
+
+ return -1;
+}
+
+/** Process crypto operation for mbuf */
+static int
+process_op(const struct null_crypto_qp *qp, struct rte_crypto_op *op,
+ struct null_crypto_session *sess __rte_unused)
+{
+ /* set status as successful by default */
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+
+ /*
+ * if crypto session and operation are valid just enqueue the packet
+ * in the processed ring
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+static struct null_crypto_session *
+get_session(struct null_crypto_qp *qp, struct rte_crypto_sym_op *op)
+{
+ struct null_crypto_session *sess;
+
+ if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->session == NULL ||
+ op->session->dev_type != RTE_CRYPTODEV_NULL_PMD))
+ return NULL;
+
+ sess = (struct null_crypto_session *)op->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct null_crypto_session *)c_sess->_private;
+
+ if (null_crypto_set_session_parameters(sess, op->xform) != 0)
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Enqueue burst */
+static uint16_t
+null_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_session *sess;
+ struct null_crypto_qp *qp = queue_pair;
+
+ int i, retval;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_session(qp, ops[i]->sym);
+ if (unlikely(sess == NULL))
+ goto enqueue_err;
+
+ retval = process_op(qp, ops[i], sess);
+ if (unlikely(retval < 0))
+ goto enqueue_err;
+ }
+
+ qp->qp_stats.enqueued_count += i;
+ return i;
+
+enqueue_err:
+ if (ops[i])
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+
+ qp->qp_stats.enqueue_err_count++;
+ return i;
+}
+
+/** Dequeue burst */
+static uint16_t
+null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct null_crypto_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_null_uninit(const char *name);
+
+/** Create crypto device */
+static int
+cryptodev_null_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct null_crypto_private *internals;
+
+ /* create a unique device name */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ NULL_CRYPTO_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct null_crypto_private),
+ init_params->socket_id);
+ if (dev == NULL) {
+ NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_NULL_PMD;
+ dev->dev_ops = null_crypto_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = null_crypto_pmd_dequeue_burst;
+ dev->enqueue_burst = null_crypto_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+
+init_error:
+ NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed", name);
+ cryptodev_null_uninit(crypto_dev_name);
+
+ return -EFAULT;
+}
+
+/** Initialise null crypto device */
+static int
+cryptodev_null_init(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_null_create(name, &init_params);
+}
+
+/** Uninitialise null crypto device */
+static int
+cryptodev_null_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver cryptodev_null_pmd_drv = {
+ .name = CRYPTODEV_NAME_NULL_PMD,
+ .type = PMD_VDEV,
+ .init = cryptodev_null_init,
+ .uninit = cryptodev_null_uninit
+};
+
+PMD_REGISTER_DRIVER(cryptodev_null_pmd_drv);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
new file mode 100644
index 00000000..cf1a5196
--- /dev/null
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -0,0 +1,355 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "null_crypto_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities null_crypto_pmd_capabilities[] = {
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+null_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+null_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+null_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+null_crypto_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Get device statistics */
+static void
+null_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+null_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct null_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+null_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = null_crypto_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+null_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on it's name, dev_id and qp_id */
+static int
+null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct null_crypto_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "null_crypto_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ NULL_CRYPTO_LOG_INFO(
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ NULL_CRYPTO_LOG_INFO(
+ "Unable to reuse existing ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct null_crypto_private *internals = dev->data->dev_private;
+ struct null_crypto_qp *qp;
+ int retval;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum "
+ "number of queue pairs supported (%u).",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ null_crypto_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = null_crypto_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ NULL_CRYPTO_LOG_ERR("Failed to create unique name for null "
+ "crypto device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+null_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the NULL crypto session structure */
+static unsigned
+null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct null_crypto_session);
+}
+
+/** Configure a null crypto session from a crypto xform chain */
+static void *
+null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ int retval;
+
+ if (unlikely(sess == NULL)) {
+ NULL_CRYPTO_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+ retval = null_crypto_set_session_parameters(
+ (struct null_crypto_session *)sess, xform);
+ if (retval != 0) {
+ NULL_CRYPTO_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+null_crypto_pmd_session_clear(struct rte_cryptodev *dev __rte_unused,
+ void *sess)
+{
+ if (sess)
+ memset(sess, 0, sizeof(struct null_crypto_session));
+}
+
+struct rte_cryptodev_ops pmd_ops = {
+ .dev_configure = null_crypto_pmd_config,
+ .dev_start = null_crypto_pmd_start,
+ .dev_stop = null_crypto_pmd_stop,
+ .dev_close = null_crypto_pmd_close,
+
+ .stats_get = null_crypto_pmd_stats_get,
+ .stats_reset = null_crypto_pmd_stats_reset,
+
+ .dev_infos_get = null_crypto_pmd_info_get,
+
+ .queue_pair_setup = null_crypto_pmd_qp_setup,
+ .queue_pair_release = null_crypto_pmd_qp_release,
+ .queue_pair_start = null_crypto_pmd_qp_start,
+ .queue_pair_stop = null_crypto_pmd_qp_stop,
+ .queue_pair_count = null_crypto_pmd_qp_count,
+
+ .session_get_size = null_crypto_pmd_session_get_size,
+ .session_configure = null_crypto_pmd_session_configure,
+ .session_clear = null_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops;
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
new file mode 100644
index 00000000..2a4c739c
--- /dev/null
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -0,0 +1,93 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
+#define _NULL_CRYPTO_PMD_PRIVATE_H_
+
+#include "rte_config.h"
+
+#define NULL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG
+#define NULL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+
+#define NULL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_NULL_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define NULL_CRYPTO_LOG_INFO(fmt, args...)
+#define NULL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+
+/** private data structure for each NULL crypto device */
+struct null_crypto_private {
+ unsigned max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned max_nb_sessions; /**< Max number of sessions */
+};
+
+/** NULL crypto queue pair */
+struct null_crypto_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+
+/** NULL crypto private session structure */
+struct null_crypto_session {
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+/** Set and validate NULL crypto session parameters */
+extern int
+null_crypto_set_session_parameters(struct null_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *null_crypto_pmd_ops;
+
+#endif /* _NULL_CRYPTO_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/null/rte_pmd_null_crypto_version.map b/drivers/crypto/null/rte_pmd_null_crypto_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/drivers/crypto/null/rte_pmd_null_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644
index 00000000..258c2d5f
--- /dev/null
+++ b/drivers/crypto/qat/Makefile
@@ -0,0 +1,64 @@
+# BSD LICENSE
+#
+# Copyright(c) 2015 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_qat.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+
+# external library include paths
+CFLAGS += -I$(SRCDIR)/qat_adf
+LDLIBS += -lcrypto
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_qat_version.map
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev
+
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
new file mode 100644
index 00000000..47f1c91a
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -0,0 +1,174 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+/* CSR write macro */
+#define ADF_CSR_WR(csrAddr, csrOffset, val) \
+ (void)((*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset)) \
+ = (val)))
+
+/* CSR read macro */
+#define ADF_CSR_RD(csrAddr, csrOffset) \
+ (*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset)))
+
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+#define ADF_NUM_BUNDLES_PER_DEV 1
+#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
+
+/* Valid internal msg size values */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg size values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+ ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
+ SIZE) & ~0x4)
+/* Max outstanding requests */
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+ ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
+#define BUILD_RING_CONFIG(size) \
+ ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+ ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+ | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+ | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+ ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+ ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+ uint32_t l_base = 0, u_base = 0; \
+ l_base = (uint32_t)(value & 0xFFFFFFFF); \
+ u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_COL_CTL, \
+ ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+ ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+ ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
new file mode 100644
index 00000000..498ee833
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
@@ -0,0 +1,316 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+ (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+ (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+ ICP_QAT_FW_COMN_RESP_SERV_NULL,
+ ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+ ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+ ICP_QAT_FW_COMN_REQ_NULL = 0,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+ ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+ ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t serv_specif_fields[4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+ uint64_t opaque_data;
+ uint64_t src_data_addr;
+ uint64_t dest_data_addr;
+ uint32_t src_length;
+ uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+ uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+ uint8_t resrvd1;
+ uint8_t service_cmd_id;
+ uint8_t service_type;
+ uint8_t hdr_flags;
+ uint16_t serv_specif_flags;
+ uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+ uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+ uint8_t xlat_err_code;
+ uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+ uint8_t resrvd1;
+ uint8_t service_id;
+ uint8_t response_type;
+ uint8_t hdr_flags;
+ struct icp_qat_fw_comn_error comn_error;
+ uint8_t comn_status;
+ uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_hdr;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+ icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+ ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+ ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+ QAT_FIELD_GET(hdr_flags, \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+ (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+ QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+ ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+ (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+ ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+ ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+ | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+ QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+ QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+ { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+ ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+ QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+ (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+ QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+ (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+ QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+ (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+ QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+ QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+ QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+ QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+ QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+ ICP_QAT_FW_SLICE_NULL = 0,
+ ICP_QAT_FW_SLICE_CIPHER = 1,
+ ICP_QAT_FW_SLICE_AUTH = 2,
+ ICP_QAT_FW_SLICE_DRAM_RD = 3,
+ ICP_QAT_FW_SLICE_DRAM_WR = 4,
+ ICP_QAT_FW_SLICE_COMP = 5,
+ ICP_QAT_FW_SLICE_XLAT = 6,
+ ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
new file mode 100644
index 00000000..fbf2b839
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
@@ -0,0 +1,404 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+ ICP_QAT_FW_LA_CMD_CIPHER = 0,
+ ICP_QAT_FW_LA_CMD_AUTH = 1,
+ ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+ ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+ ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+ ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+ ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+ ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+ ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+ ICP_QAT_FW_LA_CMD_MGF1 = 9,
+ ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+ ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+ ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+ struct icp_qat_fw_comn_req_hdr comn_hdr;
+ struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+ struct icp_qat_fw_comn_req_mid comn_mid;
+ struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+ struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO 2
+#define ICP_QAT_FW_LA_CCM_PROTO 1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK 0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+ cmp_auth, ret_auth, update_state, \
+ ciph_iv, ciphcfg, partial) \
+ (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+ ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+ QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+ ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+ QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+ ((proto & QAT_LA_PROTO_MASK) << \
+ QAT_LA_PROTO_BITPOS) | \
+ ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+ QAT_LA_CMP_AUTH_RES_BITPOS) | \
+ ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+ QAT_LA_RET_AUTH_RES_BITPOS) | \
+ ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+ QAT_LA_UPDATE_STATE_BITPOS) | \
+ ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+ QAT_LA_CIPH_IV_FLD_BITPOS) | \
+ ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+ ((partial & QAT_LA_PARTIAL_MASK) << \
+ QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+ QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+ QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+ QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+ QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+ QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+ QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+ QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+ QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+ QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+ QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+ QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+ QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } s1;
+ } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+ union {
+ struct {
+ uint64_t content_desc_addr;
+ uint16_t content_desc_resrvd1;
+ uint8_t content_desc_params_sz;
+ uint8_t content_desc_hdr_resrvd2;
+ uint32_t content_desc_resrvd3;
+ } s;
+ struct {
+ uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ } sl;
+ } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t cipher_padding_sz;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+ uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+ uint32_t resrvd1;
+ uint8_t resrvd2;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id;
+ uint8_t resrvd3;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd4;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+ uint8_t cipher_state_sz;
+ uint8_t cipher_key_sz;
+ uint8_t cipher_cfg_offset;
+ uint8_t next_curr_id_cipher;
+ uint8_t cipher_padding_sz;
+ uint8_t hash_flags;
+ uint8_t hash_cfg_offset;
+ uint8_t next_curr_id_auth;
+ uint8_t resrvd1;
+ uint8_t outer_prefix_sz;
+ uint8_t final_sz;
+ uint8_t inner_res_sz;
+ uint8_t resrvd2;
+ uint8_t inner_state1_sz;
+ uint8_t inner_state2_offset;
+ uint8_t inner_state2_sz;
+ uint8_t outer_config_offset;
+ uint8_t outer_state1_sz;
+ uint8_t outer_res_sz;
+ uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+ (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+ uint32_t cipher_offset;
+ uint32_t cipher_length;
+ union {
+ uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+ struct {
+ uint64_t cipher_IV_ptr;
+ uint64_t resrvd1;
+ } s;
+ } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+ uint32_t auth_off;
+ uint32_t auth_len;
+ union {
+ uint64_t auth_partial_st_prefix;
+ uint64_t aad_adr;
+ } u1;
+ uint64_t auth_res_addr;
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint8_t hash_state_sz;
+ uint8_t auth_res_sz;
+} __rte_packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+ union {
+ uint8_t inner_prefix_sz;
+ uint8_t aad_sz;
+ } u2;
+ uint8_t resrvd1;
+ uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+ struct icp_qat_fw_comn_resp_hdr comn_resp;
+ uint64_t opaque_data;
+ uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+ ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+ >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+ ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+ (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+ ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+ & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+ ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
new file mode 100644
index 00000000..4d4d8e4d
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+ ICP_QAT_HW_AE_0 = 0,
+ ICP_QAT_HW_AE_1 = 1,
+ ICP_QAT_HW_AE_2 = 2,
+ ICP_QAT_HW_AE_3 = 3,
+ ICP_QAT_HW_AE_4 = 4,
+ ICP_QAT_HW_AE_5 = 5,
+ ICP_QAT_HW_AE_6 = 6,
+ ICP_QAT_HW_AE_7 = 7,
+ ICP_QAT_HW_AE_8 = 8,
+ ICP_QAT_HW_AE_9 = 9,
+ ICP_QAT_HW_AE_10 = 10,
+ ICP_QAT_HW_AE_11 = 11,
+ ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+ ICP_QAT_HW_QAT_0 = 0,
+ ICP_QAT_HW_QAT_1 = 1,
+ ICP_QAT_HW_QAT_2 = 2,
+ ICP_QAT_HW_QAT_3 = 3,
+ ICP_QAT_HW_QAT_4 = 4,
+ ICP_QAT_HW_QAT_5 = 5,
+ ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+ ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+ ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+ ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+ ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+ ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+ ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+ ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+ ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+ ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+ ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+ ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+ ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+ ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+ ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+ ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+ ICP_QAT_HW_AUTH_MODE0 = 0,
+ ICP_QAT_HW_AUTH_MODE1 = 1,
+ ICP_QAT_HW_AUTH_MODE2 = 2,
+ ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+ uint32_t config;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+ (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+ ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+ (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+ QAT_AUTH_ALGO_SHA3_BITPOS) | \
+ (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+ (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+ & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+ ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+ uint32_t counter;
+ uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+ (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+ struct icp_qat_hw_auth_config auth_config;
+ struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+ ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+ struct icp_qat_hw_auth_setup inner_setup;
+ uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+ struct icp_qat_hw_auth_setup outer_setup;
+ uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+ struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+ ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+ ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+ ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+ ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+ ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+ ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+ ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+ ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+ ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+ ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+ ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+ ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+ ICP_QAT_HW_CIPHER_F8_MODE = 3,
+ ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+ ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+ uint32_t val;
+ uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+ ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+ ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+ ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+ (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+ ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+ ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+ ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+ QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+ QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+ struct icp_qat_hw_cipher_config cipher_config;
+ uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+ struct icp_qat_hw_cipher_aes256_f8 aes;
+} __rte_cache_aligned;
+#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
new file mode 100644
index 00000000..b47dbc23
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -0,0 +1,130 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ICP_QAT_ALGS_H_
+#define _ICP_QAT_ALGS_H_
+#include <rte_memory.h>
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+struct qat_alg_buf {
+ uint32_t len;
+ uint32_t resrvd;
+ uint64_t addr;
+} __rte_packed;
+
+struct qat_alg_buf_list {
+ uint64_t resrvd;
+ uint32_t num_bufs;
+ uint32_t num_mapped_bufs;
+ struct qat_alg_buf bufers[];
+} __rte_packed __rte_cache_aligned;
+
+/* Common content descriptor */
+struct qat_alg_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ struct qat_alg_cd cd;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ struct qat_crypto_instance *inst;
+ uint8_t salt[ICP_QAT_HW_AES_BLK_SZ];
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+struct qat_alg_ablkcipher_cd {
+ struct icp_qat_hw_cipher_algo_blk *cd;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ struct qat_crypto_instance *inst;
+ rte_spinlock_t lock; /* protects this struct */
+};
+
+int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize);
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header);
+
+void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
+ int alg, const uint8_t *key,
+ unsigned int keylen);
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
new file mode 100644
index 00000000..bcccdf4f
--- /dev/null
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -0,0 +1,829 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * qat-linux@intel.com
+ *
+ * BSD LICENSE
+ * Copyright(c) 2015-2016 Intel Corporation.
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_byteorder.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "../qat_logs.h"
+#include "qat_algs.h"
+
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+
+
+/*
+ * Returns size in bytes per hash algo for state1 size field in cd_ctrl
+ * This is digest size rounded up to nearest quadword
+ */
+static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum state1 size in this case */
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns digest size in bytes per hash algo */
+static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return ICP_QAT_HW_SHA1_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return ICP_QAT_HW_SHA256_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum digest size in this case */
+ return ICP_QAT_HW_SHA512_STATE1_SZ;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+/* returns block size in byes per hash algo */
+static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+ switch (qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ return SHA_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ return SHA256_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ return SHA512_CBLOCK;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ return 16;
+ case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
+ /* return maximum block size in this case */
+ return SHA512_CBLOCK;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ return -EFAULT;
+ };
+ return -EFAULT;
+}
+
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
+ uint8_t *data_in,
+ uint8_t *data_out)
+{
+ int digest_size;
+ uint8_t digest[qat_hash_get_digest_size(
+ ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint32_t *hash_state_out_be32;
+ uint64_t *hash_state_out_be64;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ digest_size = qat_hash_get_digest_size(hash_alg);
+ if (digest_size <= 0)
+ return -EFAULT;
+
+ hash_state_out_be32 = (uint32_t *)data_out;
+ hash_state_out_be64 = (uint64_t *)data_out;
+
+ switch (hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ if (partial_hash_sha1(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ if (partial_hash_sha256(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
+ *hash_state_out_be32 =
+ rte_bswap32(*(((uint32_t *)digest)+i));
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ if (partial_hash_sha512(data_in, digest))
+ return -EFAULT;
+ for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
+ *hash_state_out_be64 =
+ rte_bswap64(*(((uint64_t *)digest)+i));
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+#define HASH_XCBC_PRECOMP_KEY_NUM 3
+
+static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+ const uint8_t *auth_key,
+ uint16_t auth_keylen,
+ uint8_t *p_state_buf,
+ uint16_t *p_state_len)
+{
+ int block_size;
+ uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
+ } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ AES_KEY enc_key;
+
+ memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ);
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_GALOIS_H_SZ, 16);
+ memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
+ if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
+ &enc_key) != 0) {
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ rte_free(in);
+ return 0;
+ }
+
+ block_size = qat_hash_get_block_size(hash_alg);
+ if (block_size <= 0)
+ return -EFAULT;
+ /* init ipad and opad from key and xor with fixed values */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+
+ if (auth_keylen > (unsigned int)block_size) {
+ PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ return -EFAULT;
+ }
+ rte_memcpy(ipad, auth_key, auth_keylen);
+ rte_memcpy(opad, auth_key, auth_keylen);
+
+ for (i = 0; i < block_size; i++) {
+ uint8_t *ipad_ptr = ipad + i;
+ uint8_t *opad_ptr = opad + i;
+ *ipad_ptr ^= HMAC_IPAD_VALUE;
+ *opad_ptr ^= HMAC_OPAD_VALUE;
+ }
+
+ /* do partial hash of ipad and copy to state1 */
+ if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "ipad precompute failed");
+ return -EFAULT;
+ }
+
+ /*
+ * State len is a multiple of 8, so may be larger than the digest.
+ * Put the partial hash of opad state_len bytes after state1
+ */
+ *p_state_len = qat_hash_get_state1_size(hash_alg);
+ if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ PMD_DRV_LOG(ERR, "opad precompute failed");
+ return -EFAULT;
+ }
+
+ /* don't leave data lying around */
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
+ return 0;
+}
+
+void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+ PMD_INIT_FUNC_TRACE();
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+ header->comn_req_flags =
+ ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+ QAT_COMN_PTR_TYPE_FLAT);
+ ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_PARTIAL_NONE);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_PROTO);
+ ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+ uint8_t *cipherkey,
+ uint32_t cipherkeylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ enum icp_qat_hw_cipher_convert key_convert;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
+ uint16_t cipher_offset = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
+ cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ cipher =
+ (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
+ sizeof(struct icp_qat_hw_auth_algo_blk));
+ cipher_offset = sizeof(struct icp_qat_hw_auth_algo_blk);
+ } else {
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
+ cipher_offset = 0;
+ }
+ /* CD setup */
+ if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /* CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ /* For Snow3G, set key convert and other bits */
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
+ }
+
+ cipher->aes.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+ memcpy(cipher->aes.key, cipherkey, cipherkeylen);
+
+ proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags);
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2)
+ proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+
+ /* Request template setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = cdesc->qat_cmd;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ /* Configure the common header protocol flags */
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
+
+ /* Cipher CD config setup */
+ if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ cipher_cd_ctrl->cipher_key_sz =
+ (ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ) >> 3;
+ cipher_cd_ctrl->cipher_state_sz =
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ }
+ } else {
+ cipher_cd_ctrl->cipher_key_sz = cipherkeylen >> 3;
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+ }
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else {
+ PMD_DRV_LOG(ERR, "invalid param, only authenticated "
+ "encryption supported");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t add_auth_data_length,
+ uint32_t digestsize)
+{
+ struct icp_qat_hw_cipher_algo_blk *cipher;
+ struct icp_qat_hw_auth_algo_blk *hash;
+ struct icp_qat_hw_cipher_algo_blk *cipherconfig;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+ void *ptr = &req_tmpl->cd_ctrl;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+ struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+ struct icp_qat_fw_la_auth_req_params *auth_param =
+ (struct icp_qat_fw_la_auth_req_params *)
+ ((char *)&req_tmpl->serv_specif_rqpars +
+ sizeof(struct icp_qat_fw_la_cipher_req_params));
+ enum icp_qat_hw_cipher_convert key_convert;
+ uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/Snow3G */
+ uint16_t state1_size = 0;
+ uint16_t state2_size = 0;
+ uint16_t cipher_offset = 0, hash_offset = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER &&
+ cdesc->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ hash = (struct icp_qat_hw_auth_algo_blk *)&cdesc->cd;
+ cipher =
+ (struct icp_qat_hw_cipher_algo_blk *)((char *)&cdesc->cd +
+ sizeof(struct icp_qat_hw_auth_algo_blk));
+ hash_offset = 0;
+ cipher_offset = ((char *)hash - (char *)cipher);
+ } else {
+ cipher = (struct icp_qat_hw_cipher_algo_blk *)&cdesc->cd;
+ hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&cdesc->cd +
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ cipher_offset = 0;
+ hash_offset = ((char *)hash - (char *)cipher);
+ }
+
+ /* CD setup */
+ if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ } else {
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_CMP_AUTH_RES);
+ }
+
+ if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
+ /* CTR Streaming ciphers are a special case. Decrypt = encrypt
+ * Overriding default values previously set
+ */
+ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ } else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
+ key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
+ else
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2)
+ key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
+
+ cipher->aes.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
+ cdesc->qat_cipher_alg, key_convert,
+ cdesc->qat_dir);
+
+ hash->sha.inner_setup.auth_config.reserved = 0;
+ hash->sha.inner_setup.auth_config.config =
+ ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+ cdesc->qat_hash_alg, digestsize);
+ hash->sha.inner_setup.auth_counter.counter =
+ rte_bswap32(qat_hash_get_block_size(cdesc->qat_hash_alg));
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ hash->sha.inner_setup.auth_counter.counter = 0;
+ hash->sha.outer_setup.auth_config.reserved = 0;
+ cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
+ ((char *)&cdesc->cd +
+ sizeof(struct icp_qat_hw_auth_algo_blk)
+ + 16);
+ cipherconfig->aes.cipher_config.val =
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
+ ICP_QAT_HW_CIPHER_KEY_CONVERT,
+ ICP_QAT_HW_CIPHER_ENCRYPT);
+ memcpy(cipherconfig->aes.key, authkey, authkeylen);
+ memset(cipherconfig->aes.key + authkeylen, 0,
+ ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
+ }
+
+ /* Do precomputes */
+ if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
+ if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
+ ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ), &state2_size)) {
+ PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ return -EFAULT;
+ }
+ } else if ((cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, (uint8_t *)(hash->sha.state1 +
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ), &state2_size)) {
+ PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ return -EFAULT;
+ }
+ /*
+ * Write (the length of AAD) into bytes 16-19 of state2
+ * in big-endian format. This field is 8 bytes
+ */
+ *(uint32_t *)&(hash->sha.state1[
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ]) =
+ rte_bswap32(add_auth_data_length);
+ proto = ICP_QAT_FW_LA_GCM_PROTO;
+ } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ proto = ICP_QAT_FW_LA_SNOW_3G_PROTO;
+ state1_size = qat_hash_get_state1_size(cdesc->qat_hash_alg);
+ } else {
+ if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ authkey, authkeylen, (uint8_t *)(hash->sha.state1),
+ &state1_size)) {
+ PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ return -EFAULT;
+ }
+ }
+
+ /* Request template setup */
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = cdesc->qat_cmd;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+ /* Configure the common header protocol flags */
+ ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, proto);
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ cd_pars->u.s.content_desc_params_sz = sizeof(cdesc->cd) >> 3;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_RET_AUTH_RES);
+ ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+ }
+
+ /* Cipher CD config setup */
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
+
+ if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_AUTH) {
+ cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ cipher_cd_ctrl->cipher_cfg_offset = cipher_offset>>3;
+ } else {
+ cipher_cd_ctrl->cipher_state_sz = 0;
+ cipher_cd_ctrl->cipher_cfg_offset = 0;
+ }
+
+ /* Auth CD config setup */
+ hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
+ hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+ hash_cd_ctrl->inner_res_sz = digestsize;
+ hash_cd_ctrl->final_sz = digestsize;
+ hash_cd_ctrl->inner_state1_sz = state1_size;
+
+ switch (cdesc->qat_hash_alg) {
+ case ICP_QAT_HW_AUTH_ALGO_SHA1:
+ hash_cd_ctrl->inner_state2_sz =
+ RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA256:
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SHA512:
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ hash_cd_ctrl->inner_state2_sz =
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ hash_cd_ctrl->inner_state1_sz =
+ ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ memset(hash->sha.state1, 0, ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
+ case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
+ hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_GALOIS_H_SZ +
+ ICP_QAT_HW_GALOIS_LEN_A_SZ +
+ ICP_QAT_HW_GALOIS_E_CTR0_SZ;
+ hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
+ memset(hash->sha.state1, 0, ICP_QAT_HW_GALOIS_128_STATE1_SZ);
+ break;
+ case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
+ hash_cd_ctrl->inner_state2_sz =
+ ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
+ hash_cd_ctrl->inner_state1_sz =
+ ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ;
+ memset(hash->sha.state1, 0, ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid HASH alg %u", cdesc->qat_hash_alg);
+ return -EFAULT;
+ }
+
+ hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+ ((sizeof(struct icp_qat_hw_auth_setup) +
+ RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
+ >> 3);
+ auth_param->auth_res_sz = digestsize;
+
+ if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
+ ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_AUTH);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ } else {
+ PMD_DRV_LOG(ERR, "invalid param, only authenticated "
+ "encryption supported");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void qat_alg_ablkcipher_init_com(struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
+{
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_memcpy(cd->aes.key, key, keylen);
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cd_pars->u.s.content_desc_params_sz =
+ sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+ /* Cipher CD config setup */
+ cd_ctrl->cipher_key_sz = keylen >> 3;
+ cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
+ cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
+
+void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cdesc,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *enc_cd = cdesc->cd;
+ struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ PMD_INIT_FUNC_TRACE();
+ qat_alg_ablkcipher_init_com(req, enc_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+}
+
+void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cdesc,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *dec_cd = cdesc->cd;
+ struct icp_qat_fw_la_bulk_req *req = &cdesc->fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ PMD_INIT_FUNC_TRACE();
+ qat_alg_ablkcipher_init_com(req, dec_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
+ dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+}
+
+int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_AES_128_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ break;
+ case ICP_QAT_HW_AES_192_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ break;
+ case ICP_QAT_HW_AES_256_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+{
+ switch (key_len) {
+ case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
+ *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
new file mode 100644
index 00000000..495ea1c7
--- /dev/null
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -0,0 +1,900 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <strings.h>
+#include <string.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/queue.h>
+#include <stdarg.h>
+
+#include <rte_common.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_tailq.h>
+#include <rte_ether.h>
+#include <rte_malloc.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_per_lcore.h>
+#include <rte_lcore.h>
+#include <rte_atomic.h>
+#include <rte_branch_prediction.h>
+#include <rte_ring.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_string_fns.h>
+#include <rte_spinlock.h>
+#include <rte_hexdump.h>
+
+#include "qat_logs.h"
+#include "qat_algs.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+#define BYTE_LENGTH 8
+
+static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = {
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 128,
+ .max = 128,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES XCBC MAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static inline uint32_t
+adf_modulo(uint32_t data, uint32_t shift);
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg);
+
+void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ void *session)
+{
+ struct qat_session *sess = session;
+ phys_addr_t cd_paddr = sess->cd_paddr;
+
+ PMD_INIT_FUNC_TRACE();
+ if (session) {
+ memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
+
+ sess->cd_paddr = cd_paddr;
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_pmd_private *internals = dev->data->dev_private;
+
+ struct qat_session *session = session_private;
+
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_GCM:
+ if (qat_alg_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid SNOW3G cipher key size");
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ case RTE_CRYPTO_CIPHER_AES_CCM:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length))
+ goto error_out;
+
+ return session;
+
+error_out:
+ rte_mempool_put(internals->sess_mp, session);
+ return NULL;
+}
+
+
+void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_pmd_private *internals = dev->data->dev_private;
+
+ struct qat_session *session = session_private;
+
+ int qat_cmd_id;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
+ goto error_out;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ goto error_out;
+ }
+ return session;
+
+error_out:
+ rte_mempool_put(internals->sess_mp, session);
+ return NULL;
+}
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private)
+{
+
+ struct qat_pmd_private *internals = dev->data->dev_private;
+ struct qat_session *session = session_private;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ auth_xform = qat_get_auth_xform(xform);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GCM:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_AES_CCM:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ goto error_out;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ goto error_out;
+ }
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
+ (session->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length))
+ goto error_out;
+ } else {
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ auth_xform->key.data,
+ auth_xform->key.length,
+ auth_xform->add_auth_data_length,
+ auth_xform->digest_length))
+ goto error_out;
+ }
+ return session;
+
+error_out:
+ rte_mempool_put(internals->sess_mp, session);
+ return NULL;
+}
+
+unsigned qat_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
+}
+
+
+uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ register struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ register uint32_t nb_ops_sent = 0;
+ register struct rte_crypto_op **cur_op = ops;
+ register int ret;
+ uint16_t nb_ops_possible = nb_ops;
+ register uint8_t *base_addr;
+ register uint32_t tail;
+ int overflow;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /* read params used a lot in main loop into registers */
+ queue = &(tmp_qp->tx_q);
+ base_addr = (uint8_t *)queue->base_addr;
+ tail = queue->tail;
+
+ /* Find how many can actually fit on the ring */
+ overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
+ - queue->max_inflights;
+ if (overflow > 0) {
+ rte_atomic16_sub(&tmp_qp->inflights16, overflow);
+ nb_ops_possible = nb_ops - overflow;
+ if (nb_ops_possible == 0)
+ return 0;
+ }
+
+ while (nb_ops_sent != nb_ops_possible) {
+ ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail);
+ if (ret != 0) {
+ tmp_qp->stats.enqueue_err_count++;
+ if (nb_ops_sent == 0)
+ return 0;
+ goto kick_tail;
+ }
+
+ tail = adf_modulo(tail + queue->msg_size, queue->modulo);
+ nb_ops_sent++;
+ cur_op++;
+ }
+kick_tail:
+ WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, tail);
+ queue->tail = tail;
+ tmp_qp->stats.enqueued_count += nb_ops_sent;
+ return nb_ops_sent;
+}
+
+uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct qat_queue *queue;
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+ uint32_t msg_counter = 0;
+ struct rte_crypto_op *rx_op;
+ struct icp_qat_fw_comn_resp *resp_msg;
+
+ queue = &(tmp_qp->rx_q);
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr + queue->head);
+
+ while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
+ msg_counter != nb_ops) {
+ rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+ rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
+ queue->head = adf_modulo(queue->head +
+ queue->msg_size,
+ ADF_RING_SIZE_MODULO(queue->queue_size));
+ resp_msg = (struct icp_qat_fw_comn_resp *)
+ ((uint8_t *)queue->base_addr +
+ queue->head);
+ *ops = rx_op;
+ ops++;
+ msg_counter++;
+ }
+ if (msg_counter > 0) {
+ WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
+ queue->hw_bundle_number,
+ queue->hw_queue_number, queue->head);
+ rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
+ tmp_qp->stats.dequeued_count += msg_counter;
+ }
+ return msg_counter;
+}
+
+static inline int
+qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg)
+{
+ struct qat_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+#endif
+ if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ PMD_DRV_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_session *)op->sym->session->_private;
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ *qat_req = ctx->fw_req;
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ rte_pktmbuf_data_len(op->sym->m_src);
+
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys(op->sym->m_src);
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ qat_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys(op->sym->m_dst);
+ qat_req->comn_mid.dst_length =
+ rte_pktmbuf_data_len(op->sym->m_dst);
+ }
+
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ cipher_param->cipher_length = op->sym->cipher.data.length;
+ cipher_param->cipher_offset = op->sym->cipher.data.offset;
+ if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
+ if (unlikely((cipher_param->cipher_length % BYTE_LENGTH != 0) ||
+ (cipher_param->cipher_offset
+ % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
+ "supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_param->cipher_length >>= 3;
+ cipher_param->cipher_offset >>= 3;
+ }
+
+ if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <=
+ sizeof(cipher_param->u.cipher_IV_array))) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr = op->sym->cipher.iv.phys_addr;
+ }
+ if (op->sym->auth.digest.phys_addr) {
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
+ auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ }
+ auth_param->auth_off = op->sym->auth.data.offset;
+ auth_param->auth_len = op->sym->auth.data.length;
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) {
+ if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) ||
+ (auth_param->auth_len % BYTE_LENGTH != 0))) {
+ PMD_DRV_LOG(ERR, " For Snow3g, QAT PMD only "
+ "supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_param->auth_off >>= 3;
+ auth_param->auth_len >>= 3;
+ }
+ auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+ /* (GCM) aad length(240 max) will be at this location after precompute */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ struct icp_qat_hw_auth_algo_blk *hash;
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER)
+ hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd);
+ else
+ hash = (struct icp_qat_hw_auth_algo_blk *)((char *)&ctx->cd +
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+
+ auth_param->u2.aad_sz = ALIGN_POW2_ROUNDUP(hash->sha.state1[
+ ICP_QAT_HW_GALOIS_128_STATE1_SZ +
+ ICP_QAT_HW_GALOIS_H_SZ + 3], 16);
+ if (op->sym->cipher.iv.length == 12) {
+ /*
+ * For GCM a 12 bit IV is allowed,
+ * but we need to inform the f/w
+ */
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ }
+ auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
+
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+ rte_hexdump(stdout, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ rte_hexdump(stdout, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
+ op->sym->cipher.iv.length);
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ op->sym->auth.digest.length);
+ rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
+ op->sym->auth.aad.length);
+#endif
+ return 0;
+}
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+ uint32_t div = data >> shift;
+ uint32_t mult = div << shift;
+
+ return data - mult;
+}
+
+void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess)
+{
+ struct qat_session *s = priv_sess;
+
+ PMD_INIT_FUNC_TRACE();
+ s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd);
+}
+
+int qat_dev_config(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return -ENOTSUP;
+}
+
+int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+int qat_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_crypto_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_pmd_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = qat_pmd_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ }
+}
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->stats.enqueued_count;
+ stats->dequeued_count += qp[i]->stats.enqueued_count;
+ stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp[i]->stats.enqueue_err_count;
+ }
+}
+
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < dev->data->nb_queue_pairs; i++)
+ memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
+ PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
+}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
new file mode 100644
index 00000000..0afe74ee
--- /dev/null
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -0,0 +1,136 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_CRYPTO_H_
+#define _QAT_CRYPTO_H_
+
+#include <rte_cryptodev_pmd.h>
+#include <rte_memzone.h>
+
+/*
+ * This macro rounds up a number to a be a multiple of
+ * the alignment when the alignment is a power of 2
+ */
+#define ALIGN_POW2_ROUNDUP(num, align) \
+ (((num) + (align) - 1) & ~((align) - 1))
+
+/**
+ * Structure associated with each queue.
+ */
+struct qat_queue {
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+ void *base_addr; /* Base address */
+ phys_addr_t base_phys_addr; /* Queue physical address */
+ uint32_t head; /* Shadow copy of the head */
+ uint32_t tail; /* Shadow copy of the tail */
+ uint32_t modulo;
+ uint32_t msg_size;
+ uint16_t max_inflights;
+ uint32_t queue_size;
+ uint8_t hw_bundle_number;
+ uint8_t hw_queue_number;
+ /* HW queue aka ring offset on bundle */
+};
+
+struct qat_qp {
+ void *mmap_bar_addr;
+ rte_atomic16_t inflights16;
+ struct qat_queue tx_q;
+ struct qat_queue rx_q;
+ struct rte_cryptodev_stats stats;
+} __rte_cache_aligned;
+
+/** private data structure for each QAT device */
+struct qat_pmd_private {
+ char sess_mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *sess_mp;
+
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+int qat_dev_config(struct rte_cryptodev *dev);
+int qat_dev_start(struct rte_cryptodev *dev);
+void qat_dev_stop(struct rte_cryptodev *dev);
+int qat_dev_close(struct rte_cryptodev *dev);
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info);
+
+void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+int
+qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
+ unsigned nb_objs, unsigned obj_cache_size, int socket_id);
+
+extern unsigned
+qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
+
+extern void
+qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
+
+extern void *
+qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+struct qat_session *
+qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session_private);
+
+void *
+qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+
+extern void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
+
+
+extern uint16_t
+qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+extern uint16_t
+qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
new file mode 100644
index 00000000..a909f630
--- /dev/null
+++ b/drivers/crypto/qat/qat_logs.h
@@ -0,0 +1,78 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _QAT_LOGS_H_
+#define _QAT_LOGS_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
+#define PMD_TX_FREE_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
new file mode 100644
index 00000000..5de47e36
--- /dev/null
+++ b/drivers/crypto/qat/qat_qp.c
@@ -0,0 +1,429 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_atomic.h>
+#include <rte_prefetch.h>
+
+#include "qat_logs.h"
+#include "qat_crypto.h"
+#include "adf_transport_access_macros.h"
+
+#define ADF_MAX_SYM_DESC 4096
+#define ADF_MIN_SYM_DESC 128
+#define ADF_SYM_TX_RING_DESC_SIZE 128
+#define ADF_SYM_RX_RING_DESC_SIZE 32
+#define ADF_SYM_TX_QUEUE_STARTOFF 2
+/* Offset from bundle start to 1st Sym Tx queue */
+#define ADF_SYM_RX_QUEUE_STARTOFF 10
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+ ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+ (ADF_ARB_REG_SLOT * index), value)
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes);
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
+ int socket_id);
+static void qat_queue_delete(struct qat_queue *queue);
+static int qat_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
+ int socket_id);
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *queue_size_for_csr);
+static void adf_configure_queues(struct qat_qp *queue);
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
+
+static const struct rte_memzone *
+queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+ unsigned memzone_flags = 0;
+ const struct rte_memseg *ms;
+
+ PMD_INIT_FUNC_TRACE();
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ PMD_DRV_LOG(DEBUG, "re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+
+ PMD_DRV_LOG(ERR, "Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+ ms = rte_eal_get_physmem_layout();
+ switch (ms[0].hugepage_sz) {
+ case(RTE_PGSIZE_2M):
+ memzone_flags = RTE_MEMZONE_2MB;
+ break;
+ case(RTE_PGSIZE_1G):
+ memzone_flags = RTE_MEMZONE_1GB;
+ break;
+ case(RTE_PGSIZE_16M):
+ memzone_flags = RTE_MEMZONE_16MB;
+ break;
+ case(RTE_PGSIZE_16G):
+ memzone_flags = RTE_MEMZONE_16GB;
+ break;
+ default:
+ memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
+}
+#ifdef RTE_LIBRTE_XEN_DOM0
+ return rte_memzone_reserve_bounded(queue_name, queue_size,
+ socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
+#else
+ return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
+ memzone_flags, queue_size);
+#endif
+}
+
+int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct qat_qp *qp;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (dev->data->queue_pairs[queue_pair_id] != NULL) {
+ ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
+ if (ret < 0)
+ return ret;
+ }
+
+ if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
+ (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
+ PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
+ qp_conf->nb_descriptors);
+ return -EINVAL;
+ }
+
+ if (dev->pci_dev->mem_resource[0].addr == NULL) {
+ PMD_DRV_LOG(ERR, "Could not find VF config space "
+ "(UIO driver attached?).");
+ return -EINVAL;
+ }
+
+ if (queue_pair_id >=
+ (ADF_NUM_SYM_QPS_PER_BUNDLE *
+ ADF_NUM_BUNDLES_PER_DEV)) {
+ PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
+ queue_pair_id);
+ return -EINVAL;
+ }
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc("qat PMD qp metadata",
+ sizeof(*qp), RTE_CACHE_LINE_SIZE);
+ if (qp == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
+ return -ENOMEM;
+ }
+ qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
+ rte_atomic16_init(&qp->inflights16);
+
+ if (qat_tx_queue_create(dev, &(qp->tx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_INIT_LOG(ERR, "Tx queue create failed "
+ "queue_pair_id=%u", queue_pair_id);
+ goto create_err;
+ }
+
+ if (qat_rx_queue_create(dev, &(qp->rx_q),
+ queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
+ PMD_DRV_LOG(ERR, "Rx queue create failed "
+ "queue_pair_id=%hu", queue_pair_id);
+ qat_queue_delete(&(qp->tx_q));
+ goto create_err;
+ }
+ adf_configure_queues(qp);
+ adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
+ dev->data->queue_pairs[queue_pair_id] = qp;
+ return 0;
+
+create_err:
+ rte_free(qp);
+ return -EFAULT;
+}
+
+int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_qp *qp =
+ (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+ if (qp == NULL) {
+ PMD_DRV_LOG(DEBUG, "qp already freed");
+ return 0;
+ }
+
+ /* Don't free memory if there are still responses to be processed */
+ if (rte_atomic16_read(&(qp->inflights16)) == 0) {
+ qat_queue_delete(&(qp->tx_q));
+ qat_queue_delete(&(qp->rx_q));
+ } else {
+ return -EAGAIN;
+ }
+
+ adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
+ rte_free(qp);
+ dev->data->queue_pairs[queue_pair_id] = NULL;
+ return 0;
+}
+
+static int qat_tx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id,
+ uint32_t nb_desc, int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_TX_QUEUE_STARTOFF;
+ PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_TX_RING_DESC_SIZE, socket_id);
+}
+
+static int qat_rx_queue_create(struct rte_cryptodev *dev,
+ struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
+ int socket_id)
+{
+ PMD_INIT_FUNC_TRACE();
+ queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
+ queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
+ ADF_SYM_RX_QUEUE_STARTOFF;
+
+ PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
+ nb_desc, qp_id, queue->hw_bundle_number,
+ queue->hw_queue_number);
+ return qat_queue_create(dev, queue, nb_desc,
+ ADF_SYM_RX_RING_DESC_SIZE, socket_id);
+}
+
+static void qat_queue_delete(struct qat_queue *queue)
+{
+ const struct rte_memzone *mz;
+ int status = 0;
+
+ if (queue == NULL) {
+ PMD_DRV_LOG(DEBUG, "Invalid queue");
+ return;
+ }
+ mz = rte_memzone_lookup(queue->memz_name);
+ if (mz != NULL) {
+ /* Write an unused pattern to the queue memory. */
+ memset(queue->base_addr, 0x7F, queue->queue_size);
+ status = rte_memzone_free(mz);
+ if (status != 0)
+ PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
+ status, queue->memz_name);
+ } else {
+ PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
+ queue->memz_name);
+ }
+}
+
+static int
+qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
+ uint32_t nb_desc, uint8_t desc_size, int socket_id)
+{
+ uint64_t queue_base;
+ void *io_addr;
+ const struct rte_memzone *qp_mz;
+ uint32_t queue_size_bytes = nb_desc*desc_size;
+
+ PMD_INIT_FUNC_TRACE();
+ if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+ PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
+ return -EINVAL;
+ }
+
+ /*
+ * Allocate a memzone for the queue - create a unique name.
+ */
+ snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
+ dev->driver->pci_drv.name, "qp_mem", dev->data->dev_id,
+ queue->hw_bundle_number, queue->hw_queue_number);
+ qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
+ socket_id);
+ if (qp_mz == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
+ return -ENOMEM;
+ }
+
+ queue->base_addr = (char *)qp_mz->addr;
+ queue->base_phys_addr = qp_mz->phys_addr;
+ if (qat_qp_check_queue_alignment(queue->base_phys_addr,
+ queue_size_bytes)) {
+ PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
+ " 0x%"PRIx64"\n",
+ queue->base_phys_addr);
+ return -EFAULT;
+ }
+
+ if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
+ != 0) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+
+ queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
+ ADF_BYTES_TO_MSG_SIZE(desc_size));
+ queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
+ PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
+ " msg_size %u, max_inflights %u modulo %u",
+ queue->queue_size, queue_size_bytes,
+ nb_desc, desc_size, queue->max_inflights,
+ queue->modulo);
+
+ if (queue->max_inflights < 2) {
+ PMD_DRV_LOG(ERR, "Invalid num inflights");
+ return -EINVAL;
+ }
+ queue->head = 0;
+ queue->tail = 0;
+ queue->msg_size = desc_size;
+
+ /*
+ * Write an unused pattern to the queue memory.
+ */
+ memset(queue->base_addr, 0x7F, queue_size_bytes);
+
+ queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
+ queue->queue_size);
+ io_addr = dev->pci_dev->mem_resource[0].addr;
+
+ WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_base);
+ return 0;
+}
+
+static int qat_qp_check_queue_alignment(uint64_t phys_addr,
+ uint32_t queue_size_bytes)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (((queue_size_bytes - 1) & phys_addr) != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
+ uint32_t *p_queue_size_for_csr)
+{
+ uint8_t i = ADF_MIN_RING_SIZE;
+
+ PMD_INIT_FUNC_TRACE();
+ for (; i <= ADF_MAX_RING_SIZE; i++)
+ if ((msg_size * msg_num) ==
+ (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
+ *p_queue_size_for_csr = i;
+ return 0;
+ }
+ PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
+ return -EINVAL;
+}
+
+static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value |= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
+{
+ uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
+ (ADF_ARB_REG_SLOT *
+ txq->hw_bundle_number);
+ uint32_t value;
+
+ PMD_INIT_FUNC_TRACE();
+ value = ADF_CSR_RD(base_addr, arb_csr_offset);
+ value ^= (0x01 << txq->hw_queue_number);
+ ADF_CSR_WR(base_addr, arb_csr_offset, value);
+}
+
+static void adf_configure_queues(struct qat_qp *qp)
+{
+ uint32_t queue_config;
+ struct qat_queue *queue = &qp->tx_q;
+
+ PMD_INIT_FUNC_TRACE();
+ queue_config = BUILD_RING_CONFIG(queue->queue_size);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+
+ queue = &qp->rx_q;
+ queue_config =
+ BUILD_RESP_RING_CONFIG(queue->queue_size,
+ ADF_RING_NEAR_WATERMARK_512,
+ ADF_RING_NEAR_WATERMARK_0);
+
+ WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
+ queue->hw_queue_number, queue_config);
+}
diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..bbaf1c85
--- /dev/null
+++ b/drivers/crypto/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_2.2 {
+ local: *;
+}; \ No newline at end of file
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
new file mode 100644
index 00000000..a7912f5a
--- /dev/null
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -0,0 +1,140 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_crypto.h"
+#include "qat_logs.h"
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_dev_config,
+ .dev_start = qat_dev_start,
+ .dev_stop = qat_dev_stop,
+ .dev_close = qat_dev_close,
+ .dev_infos_get = qat_dev_info_get,
+
+ .stats_get = qat_crypto_sym_stats_get,
+ .stats_reset = qat_crypto_sym_stats_reset,
+ .queue_pair_setup = qat_crypto_sym_qp_setup,
+ .queue_pair_release = qat_crypto_sym_qp_release,
+ .queue_pair_start = NULL,
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .session_get_size = qat_crypto_sym_get_session_private_size,
+ .session_configure = qat_crypto_sym_configure_session,
+ .session_initialize = qat_crypto_sym_session_init,
+ .session_clear = qat_crypto_sym_clear_session
+};
+
+/*
+ * The set of PCI devices this driver supports
+ */
+
+static struct rte_pci_id pci_id_qat_map[] = {
+ {
+ .vendor_id = 0x8086,
+ .device_id = 0x0443,
+ .subsystem_vendor_id = PCI_ANY_ID,
+ .subsystem_device_id = PCI_ANY_ID
+ },
+ {.device_id = 0},
+};
+
+static int
+crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
+ struct rte_cryptodev *cryptodev)
+{
+ struct qat_pmd_private *internals;
+
+ PMD_INIT_FUNC_TRACE();
+ PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
+ cryptodev->pci_dev->addr.bus,
+ cryptodev->pci_dev->addr.devid,
+ cryptodev->pci_dev->addr.function);
+
+ cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
+ return 0;
+ }
+
+ return 0;
+}
+
+static struct rte_cryptodev_driver rte_qat_pmd = {
+ {
+ .name = "rte_qat_pmd",
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ },
+ .cryptodev_init = crypto_qat_dev_init,
+ .dev_private_size = sizeof(struct qat_pmd_private),
+};
+
+static int
+rte_qat_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return rte_cryptodev_pmd_driver_register(&rte_qat_pmd, PMD_PDEV);
+}
+
+static struct rte_driver pmd_qat_drv = {
+ .type = PMD_PDEV,
+ .init = rte_qat_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(pmd_qat_drv);
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
new file mode 100644
index 00000000..ee582702
--- /dev/null
+++ b/drivers/crypto/snow3g/Makefile
@@ -0,0 +1,64 @@
+# BSD LICENSE
+#
+# Copyright(c) 2016 Intel Corporation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifeq ($(LIBSSO_PATH),)
+$(error "Please define LIBSSO_PATH environment variable")
+endif
+
+# library name
+LIB = librte_pmd_snow3g.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_snow3g_version.map
+
+# external library include paths
+CFLAGS += -I$(LIBSSO_PATH)
+CFLAGS += -I$(LIBSSO_PATH)/include
+CFLAGS += -I$(LIBSSO_PATH)/build
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_cryptodev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/snow3g/rte_pmd_snow3g_version.map b/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
new file mode 100644
index 00000000..dc4d417b
--- /dev/null
+++ b/drivers/crypto/snow3g/rte_pmd_snow3g_version.map
@@ -0,0 +1,3 @@
+DPDK_16.04 {
+ local: *;
+};
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
new file mode 100644
index 00000000..f3e0e667
--- /dev/null
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -0,0 +1,551 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+#include <rte_kvargs.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+#define SNOW3G_MAX_BURST 8
+#define BYTE_LEN 8
+
+/**
+ * Global static parameter used to create a unique name for each SNOW 3G
+ * crypto device.
+ */
+static unsigned unique_name_id;
+
+static inline int
+create_unique_device_name(char *name, size_t size)
+{
+ int ret;
+
+ if (name == NULL)
+ return -EINVAL;
+
+ ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_SNOW3G_PMD,
+ unique_name_id++);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+/** Get xform chain order. */
+static enum snow3g_operation
+snow3g_get_mode(const struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->next)
+ if (xform->next->next != NULL)
+ return SNOW3G_OP_NOT_SUPPORTED;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return SNOW3G_OP_AUTH_CIPHER;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return SNOW3G_OP_ONLY_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return SNOW3G_OP_CIPHER_AUTH;
+ else
+ return SNOW3G_OP_NOT_SUPPORTED;
+ }
+
+ return SNOW3G_OP_NOT_SUPPORTED;
+}
+
+
+/** Parse crypto xform chain and set private session parameters. */
+int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ int mode;
+
+ /* Select Crypto operation - hash then cipher / cipher then hash */
+ mode = snow3g_get_mode(xform);
+
+ switch (mode) {
+ case SNOW3G_OP_CIPHER_AUTH:
+ auth_xform = xform->next;
+
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_CIPHER:
+ cipher_xform = xform;
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ cipher_xform = xform->next;
+ /* Fall-through */
+ case SNOW3G_OP_ONLY_AUTH:
+ auth_xform = xform;
+ }
+
+ if (mode == SNOW3G_OP_NOT_SUPPORTED) {
+ SNOW3G_LOG_ERR("Unsupported operation chain order parameter");
+ return -EINVAL;
+ }
+
+ if (cipher_xform) {
+ /* Only SNOW 3G UEA2 supported */
+ if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_SNOW3G_UEA2)
+ return -EINVAL;
+ /* Initialize key */
+ sso_snow3g_init_key_sched(xform->cipher.key.data,
+ &sess->pKeySched_cipher);
+ }
+
+ if (auth_xform) {
+ /* Only SNOW 3G UIA2 supported */
+ if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_SNOW3G_UIA2)
+ return -EINVAL;
+ sess->auth_op = auth_xform->auth.op;
+ /* Initialize key */
+ sso_snow3g_init_key_sched(xform->auth.key.data,
+ &sess->pKeySched_hash);
+ }
+
+
+ sess->op = mode;
+
+ return 0;
+}
+
+/** Get SNOW 3G session. */
+static struct snow3g_session *
+snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op)
+{
+ struct snow3g_session *sess;
+
+ if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session->dev_type !=
+ RTE_CRYPTODEV_SNOW3G_PMD))
+ return NULL;
+
+ sess = (struct snow3g_session *)op->sym->session->_private;
+ } else {
+ struct rte_cryptodev_session *c_sess = NULL;
+
+ if (rte_mempool_get(qp->sess_mp, (void **)&c_sess))
+ return NULL;
+
+ sess = (struct snow3g_session *)c_sess->_private;
+
+ if (unlikely(snow3g_set_session_parameters(sess,
+ op->sym->xform) != 0))
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Encrypt/decrypt mbufs with same cipher key. */
+static uint8_t
+process_snow3g_cipher_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src[SNOW3G_MAX_BURST], *dst[SNOW3G_MAX_BURST];
+ uint8_t *IV[SNOW3G_MAX_BURST];
+ uint32_t num_bytes[SNOW3G_MAX_BURST];
+
+ for (i = 0; i < num_ops; i++) {
+ /* Sanity checks. */
+ if (ops[i]->sym->cipher.iv.length != 16) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("iv");
+ break;
+ }
+
+ if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->cipher.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("Data Length or offset");
+ break;
+ }
+
+ src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ dst[i] = ops[i]->sym->m_dst ?
+ rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3) :
+ rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->cipher.data.offset >> 3);
+ IV[i] = ops[i]->sym->cipher.iv.data;
+ num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
+
+ processed_ops++;
+ }
+
+ sso_snow3g_f8_n_buffer(&session->pKeySched_cipher, IV, src, dst,
+ num_bytes, processed_ops);
+
+ return processed_ops;
+}
+
+/** Generate/verify hash from mbufs with same hash key. */
+static int
+process_snow3g_hash_op(struct rte_crypto_op **ops,
+ struct snow3g_session *session,
+ uint8_t num_ops)
+{
+ unsigned i;
+ uint8_t processed_ops = 0;
+ uint8_t *src, *dst;
+ uint32_t length_in_bits;
+
+ for (i = 0; i < num_ops; i++) {
+ if (ops[i]->sym->auth.aad.length != 16) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("aad");
+ break;
+ }
+
+ if (ops[i]->sym->auth.digest.length != 4) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("digest");
+ break;
+ }
+
+ if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0)
+ || ((ops[i]->sym->auth.data.offset
+ % BYTE_LEN) != 0)) {
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ SNOW3G_LOG_ERR("Data Length or offset");
+ break;
+ }
+
+ length_in_bits = ops[i]->sym->auth.data.length;
+
+ src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
+ (ops[i]->sym->auth.data.offset >> 3);
+
+ if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ /* Verify digest. */
+ if (memcmp(dst, ops[i]->sym->auth.digest.data,
+ ops[i]->sym->auth.digest.length) != 0)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(ops[i]->sym->m_src,
+ ops[i]->sym->auth.digest.length);
+ } else {
+ dst = ops[i]->sym->auth.digest.data;
+
+ sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
+ ops[i]->sym->auth.aad.data, src,
+ length_in_bits, dst);
+ }
+ processed_ops++;
+ }
+
+ return processed_ops;
+}
+
+/** Process a batch of crypto ops which shares the same session. */
+static int
+process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
+ struct snow3g_qp *qp, uint8_t num_ops)
+{
+ unsigned i;
+ unsigned processed_ops;
+
+ switch (session->op) {
+ case SNOW3G_OP_ONLY_CIPHER:
+ processed_ops = process_snow3g_cipher_op(ops,
+ session, num_ops);
+ break;
+ case SNOW3G_OP_ONLY_AUTH:
+ processed_ops = process_snow3g_hash_op(ops, session,
+ num_ops);
+ break;
+ case SNOW3G_OP_CIPHER_AUTH:
+ processed_ops = process_snow3g_cipher_op(ops, session,
+ num_ops);
+ process_snow3g_hash_op(ops, session, processed_ops);
+ break;
+ case SNOW3G_OP_AUTH_CIPHER:
+ processed_ops = process_snow3g_hash_op(ops, session,
+ num_ops);
+ process_snow3g_cipher_op(ops, session, processed_ops);
+ break;
+ default:
+ /* Operation not supported. */
+ processed_ops = 0;
+ }
+
+ for (i = 0; i < num_ops; i++) {
+ /*
+ * If there was no error/authentication failure,
+ * change status to successful.
+ */
+ if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ /* Free session if a session-less crypto op. */
+ if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) {
+ rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ }
+
+ return processed_ops;
+}
+
+static uint16_t
+snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct rte_crypto_op *c_ops[SNOW3G_MAX_BURST];
+ struct rte_crypto_op *curr_c_op;
+
+ struct snow3g_session *prev_sess = NULL, *curr_sess = NULL;
+ struct snow3g_qp *qp = queue_pair;
+ unsigned i, n;
+ uint8_t burst_size = 0;
+ uint16_t enqueued_ops = 0;
+ uint8_t processed_ops;
+
+ for (i = 0; i < nb_ops; i++) {
+ curr_c_op = ops[i];
+
+ /* Set status as enqueued (not processed yet) by default. */
+ curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ curr_sess = snow3g_get_session(qp, curr_c_op);
+ if (unlikely(curr_sess == NULL ||
+ curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) {
+ curr_c_op->status =
+ RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
+ return enqueued_ops;
+ }
+
+ /* Batch ops that share the same session. */
+ if (prev_sess == NULL) {
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ } else if (curr_sess == prev_sess) {
+ c_ops[burst_size++] = curr_c_op;
+ /*
+ * When there are enough ops to process in a batch,
+ * process them, and start a new batch.
+ */
+ if (burst_size == SNOW3G_MAX_BURST) {
+ processed_ops = process_ops(c_ops,
+ prev_sess, qp, burst_size);
+ n = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)c_ops,
+ processed_ops);
+ qp->qp_stats.enqueued_count += n;
+ enqueued_ops += n;
+ if (n < burst_size) {
+ qp->qp_stats.enqueue_err_count +=
+ nb_ops - enqueued_ops;
+ return enqueued_ops;
+ }
+ burst_size = 0;
+
+ prev_sess = NULL;
+ }
+ } else {
+ /*
+ * Different session, process the ops
+ * of the previous session.
+ */
+ processed_ops = process_ops(c_ops,
+ prev_sess, qp, burst_size);
+ n = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)c_ops,
+ processed_ops);
+ qp->qp_stats.enqueued_count += n;
+ enqueued_ops += n;
+ if (n < burst_size) {
+ qp->qp_stats.enqueue_err_count +=
+ nb_ops - enqueued_ops;
+ return enqueued_ops;
+ }
+ burst_size = 0;
+
+ prev_sess = curr_sess;
+ c_ops[burst_size++] = curr_c_op;
+ }
+ }
+
+ if (burst_size != 0) {
+ /* Process the crypto ops of the last session. */
+ processed_ops = process_ops(c_ops,
+ prev_sess, qp, burst_size);
+ n = rte_ring_enqueue_burst(qp->processed_ops,
+ (void **)c_ops,
+ processed_ops);
+ qp->qp_stats.enqueued_count += n;
+ enqueued_ops += n;
+ if (n < burst_size) {
+ qp->qp_stats.enqueue_err_count +=
+ nb_ops - enqueued_ops;
+ return enqueued_ops;
+ }
+ }
+
+ return enqueued_ops;
+}
+
+static uint16_t
+snow3g_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **c_ops, uint16_t nb_ops)
+{
+ struct snow3g_qp *qp = queue_pair;
+
+ unsigned nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops,
+ (void **)c_ops, nb_ops);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int cryptodev_snow3g_uninit(const char *name);
+
+static int
+cryptodev_snow3g_create(const char *name,
+ struct rte_crypto_vdev_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct snow3g_private *internals;
+
+ /* Create a unique device name. */
+ if (create_unique_device_name(crypto_dev_name,
+ RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
+ SNOW3G_LOG_ERR("failed to create unique cryptodev name");
+ return -EINVAL;
+ }
+
+ dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ sizeof(struct snow3g_private), init_params->socket_id);
+ if (dev == NULL) {
+ SNOW3G_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->dev_type = RTE_CRYPTODEV_SNOW3G_PMD;
+ dev->dev_ops = rte_snow3g_pmd_ops;
+
+ /* Register RX/TX burst functions for data path. */
+ dev->dequeue_burst = snow3g_pmd_dequeue_burst;
+ dev->enqueue_burst = snow3g_pmd_enqueue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ return 0;
+init_error:
+ SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", name);
+
+ cryptodev_snow3g_uninit(crypto_dev_name);
+ return -EFAULT;
+}
+
+static int
+cryptodev_snow3g_init(const char *name,
+ const char *input_args)
+{
+ struct rte_crypto_vdev_init_params init_params = {
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ rte_socket_id()
+ };
+
+ rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.socket_id);
+ RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
+ init_params.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
+ init_params.max_nb_sessions);
+
+ return cryptodev_snow3g_create(name, &init_params);
+}
+
+static int
+cryptodev_snow3g_uninit(const char *name)
+{
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing SNOW3G crypto device %s"
+ " on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+static struct rte_driver cryptodev_snow3g_pmd_drv = {
+ .name = CRYPTODEV_NAME_SNOW3G_PMD,
+ .type = PMD_VDEV,
+ .init = cryptodev_snow3g_init,
+ .uninit = cryptodev_snow3g_uninit
+};
+
+PMD_REGISTER_DRIVER(cryptodev_snow3g_pmd_drv);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
new file mode 100644
index 00000000..6f00b066
--- /dev/null
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -0,0 +1,342 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_snow3g_pmd_private.h"
+
+static const struct rte_cryptodev_capabilities snow3g_pmd_capabilities[] = {
+ { /* SNOW3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+snow3g_pmd_config(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Start device */
+static int
+snow3g_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+snow3g_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/** Close device */
+static int
+snow3g_pmd_close(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+
+/** Get device statistics */
+static void
+snow3g_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+snow3g_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct snow3g_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+
+/** Get device info */
+static void
+snow3g_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct snow3g_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->dev_type = dev->dev_type;
+ dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = snow3g_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+snow3g_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct snow3g_qp *qp)
+{
+ unsigned n = snprintf(qp->name, sizeof(qp->name),
+ "snow3g_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place processed ops on */
+static struct rte_ring *
+snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp,
+ unsigned ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->prod.size >= ring_size) {
+ SNOW3G_LOG_INFO("Reusing existing ring %s"
+ " for processed packets",
+ qp->name);
+ return r;
+ }
+
+ SNOW3G_LOG_ERR("Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** Setup a queue pair */
+static int
+snow3g_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id)
+{
+ struct snow3g_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ snow3g_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("SNOW3G PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (snow3g_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_ops = snow3g_pmd_qp_create_processed_ops_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_ops == NULL)
+ goto qp_setup_cleanup;
+
+ qp->sess_mp = dev->data->session_pool;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Start queue pair */
+static int
+snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair */
+static int
+snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+snow3g_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the SNOW 3G session structure */
+static unsigned
+snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct snow3g_session);
+}
+
+/** Configure a SNOW 3G session from a crypto xform chain */
+static void *
+snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ if (unlikely(sess == NULL)) {
+ SNOW3G_LOG_ERR("invalid session struct");
+ return NULL;
+ }
+
+ if (snow3g_set_session_parameters(sess, xform) != 0) {
+ SNOW3G_LOG_ERR("failed configure session parameters");
+ return NULL;
+ }
+
+ return sess;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+snow3g_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+{
+ /*
+ * Current just resetting the whole data structure, need to investigate
+ * whether a more selective reset of key would be more performant
+ */
+ if (sess)
+ memset(sess, 0, sizeof(struct snow3g_session));
+}
+
+struct rte_cryptodev_ops snow3g_pmd_ops = {
+ .dev_configure = snow3g_pmd_config,
+ .dev_start = snow3g_pmd_start,
+ .dev_stop = snow3g_pmd_stop,
+ .dev_close = snow3g_pmd_close,
+
+ .stats_get = snow3g_pmd_stats_get,
+ .stats_reset = snow3g_pmd_stats_reset,
+
+ .dev_infos_get = snow3g_pmd_info_get,
+
+ .queue_pair_setup = snow3g_pmd_qp_setup,
+ .queue_pair_release = snow3g_pmd_qp_release,
+ .queue_pair_start = snow3g_pmd_qp_start,
+ .queue_pair_stop = snow3g_pmd_qp_stop,
+ .queue_pair_count = snow3g_pmd_qp_count,
+
+ .session_get_size = snow3g_pmd_session_get_size,
+ .session_configure = snow3g_pmd_session_configure,
+ .session_clear = snow3g_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
new file mode 100644
index 00000000..b383cbcb
--- /dev/null
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -0,0 +1,107 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_SNOW3G_PMD_PRIVATE_H_
+#define _RTE_SNOW3G_PMD_PRIVATE_H_
+
+#include <sso_snow3g.h>
+
+#define SNOW3G_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_SNOW3G_PMD, \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_SNOW3G_DEBUG
+#define SNOW3G_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_SNOW3G_PMD, \
+ __func__, __LINE__, ## args)
+
+#define SNOW3G_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ CRYPTODEV_NAME_SNOW3G_PMD, \
+ __func__, __LINE__, ## args)
+#else
+#define SNOW3G_LOG_INFO(fmt, args...)
+#define SNOW3G_LOG_DBG(fmt, args...)
+#endif
+
+/** private data structure for each virtual SNOW 3G device */
+struct snow3g_private {
+ unsigned max_nb_queue_pairs;
+ /**< Max number of queue pairs supported by device */
+ unsigned max_nb_sessions;
+ /**< Max number of sessions supported by device */
+};
+
+/** SNOW 3G buffer queue pair */
+struct snow3g_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_ops;
+ /**< Ring for placing processed ops */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+} __rte_cache_aligned;
+
+enum snow3g_operation {
+ SNOW3G_OP_ONLY_CIPHER,
+ SNOW3G_OP_ONLY_AUTH,
+ SNOW3G_OP_CIPHER_AUTH,
+ SNOW3G_OP_AUTH_CIPHER,
+ SNOW3G_OP_NOT_SUPPORTED
+};
+
+/** SNOW 3G private session structure */
+struct snow3g_session {
+ enum snow3g_operation op;
+ enum rte_crypto_auth_operation auth_op;
+ sso_snow3g_key_schedule_t pKeySched_cipher;
+ sso_snow3g_key_schedule_t pKeySched_hash;
+} __rte_cache_aligned;
+
+
+extern int
+snow3g_set_session_parameters(struct snow3g_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_snow3g_pmd_ops;
+
+
+
+#endif /* _RTE_SNOW3G_PMD_PRIVATE_H_ */