aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/crypto/qat
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r--drivers/crypto/qat/Makefile35
-rw-r--r--drivers/crypto/qat/README7
-rw-r--r--drivers/crypto/qat/meson.build24
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h176
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw.h316
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw_la.h404
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h329
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h169
-rw-r--r--drivers/crypto/qat/qat_crypto.c1696
-rw-r--r--drivers/crypto/qat/qat_crypto.h150
-rw-r--r--drivers/crypto/qat/qat_logs.h49
-rw-r--r--drivers/crypto/qat/qat_qp.c470
-rw-r--r--drivers/crypto/qat/qat_sym.c569
-rw-r--r--drivers/crypto/qat/qat_sym.h174
-rw-r--r--drivers/crypto/qat/qat_sym_capabilities.h (renamed from drivers/crypto/qat/qat_crypto_capabilities.h)10
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.c331
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.h41
-rw-r--r--drivers/crypto/qat/qat_sym_session.c (renamed from drivers/crypto/qat/qat_adf/qat_algs_build_desc.c)890
-rw-r--r--drivers/crypto/qat/qat_sym_session.h145
-rw-r--r--drivers/crypto/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c180
21 files changed, 2064 insertions, 4104 deletions
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
deleted file mode 100644
index 260912dc..00000000
--- a/drivers/crypto/qat/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-# library name
-LIB = librte_pmd_qat.a
-
-# library version
-LIBABIVER := 1
-
-# build flags
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -O3
-
-# external library include paths
-CFLAGS += -I$(SRCDIR)/qat_adf
-LDLIBS += -lcrypto
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
-LDLIBS += -lrte_cryptodev
-LDLIBS += -lrte_pci -lrte_bus_pci
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c
-
-# export include files
-SYMLINK-y-include +=
-
-# versioning export map
-EXPORT_MAP := rte_pmd_qat_version.map
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README
new file mode 100644
index 00000000..444ae605
--- /dev/null
+++ b/drivers/crypto/qat/README
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2015-2018 Intel Corporation
+
+Makefile for crypto QAT PMD is in common/qat directory.
+The build for the QAT driver is done from there as only one library is built for the
+whole QAT pci device and that library includes all the services (crypto, compression)
+which are enabled on the device.
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index 7b904630..9cc98d2c 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -1,14 +1,18 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
+# this does not build the QAT driver, instead that is done in the compression
+# driver which comes later. Here we just add our sources files to the list
+build = false
dep = dependency('libcrypto', required: false)
-if not dep.found()
- build = false
+qat_includes += include_directories('.')
+qat_deps += 'cryptodev'
+if dep.found()
+ # Add our sources files to the list
+ qat_sources += files('qat_sym_pmd.c',
+ 'qat_sym.c',
+ 'qat_sym_session.c')
+ qat_ext_deps += dep
+ pkgconfig_extra_libs += '-lcrypto'
+ qat_cflags += '-DBUILD_QAT_SYM'
endif
-sources = files('qat_crypto.c', 'qat_qp.c',
- 'qat_adf/qat_algs_build_desc.c',
- 'rte_qat_cryptodev.c')
-includes += include_directories('qat_adf')
-deps += ['bus_pci']
-ext_deps += dep
-pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
deleted file mode 100644
index 4f8f3d13..00000000
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
-#define ADF_TRANSPORT_ACCESS_MACROS_H
-
-#include <rte_io.h>
-
-/* CSR write macro */
-#define ADF_CSR_WR(csrAddr, csrOffset, val) \
- rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
-
-/* CSR read macro */
-#define ADF_CSR_RD(csrAddr, csrOffset) \
- rte_read32((((uint8_t *)csrAddr) + csrOffset))
-
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
-#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
-#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
-#define ADF_COALESCING_MIN_TIME 0x1FF
-#define ADF_COALESCING_MAX_TIME 0xFFFFF
-#define ADF_COALESCING_DEF_TIME 0x27FF
-#define ADF_RING_NEAR_WATERMARK_512 0x08
-#define ADF_RING_NEAR_WATERMARK_0 0x00
-#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
-#define ADF_RING_EMPTY_SIG_BYTE 0x7F
-
-/* Valid internal ring size values */
-#define ADF_RING_SIZE_128 0x01
-#define ADF_RING_SIZE_256 0x02
-#define ADF_RING_SIZE_512 0x03
-#define ADF_RING_SIZE_4K 0x06
-#define ADF_RING_SIZE_16K 0x08
-#define ADF_RING_SIZE_4M 0x10
-#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
-#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
-#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-
-#define ADF_NUM_BUNDLES_PER_DEV 1
-#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
-
-/* Valid internal msg size values */
-#define ADF_MSG_SIZE_32 0x01
-#define ADF_MSG_SIZE_64 0x02
-#define ADF_MSG_SIZE_128 0x04
-#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
-#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-
-/* Size to bytes conversion macros for ring and msg size values */
-#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
-#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
-#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
-#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-
-/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
- ADF_RING_SIZE_4K : SIZE)
-#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
-#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
- SIZE) & ~0x4)
-/* Max outstanding requests */
-#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
- ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
-#define BUILD_RING_CONFIG(size) \
- ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
- | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
- | size)
-#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
- ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
- | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
- | size)
-#define BUILD_RING_BASE_ADDR(addr, size) \
- ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_HEAD + (ring << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_TAIL + (ring << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- uint32_t l_base = 0, u_base = 0; \
- l_base = (uint32_t)(value & 0xFFFFFFFF); \
- u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
-} while (0)
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_HEAD + (ring << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_TAIL + (ring << 2), value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, value)
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
deleted file mode 100644
index 5de34d55..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_FW_H_
-#define _ICP_QAT_FW_H_
-#include <sys/types.h>
-#include "icp_qat_hw.h"
-
-#define QAT_FIELD_SET(flags, val, bitpos, mask) \
-{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
- (((val) & (mask)) << (bitpos))) ; }
-
-#define QAT_FIELD_GET(flags, bitpos, mask) \
- (((flags) >> (bitpos)) & (mask))
-
-#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
-#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
-#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
-#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
-#define ICP_QAT_FW_NUM_LONGWORDS_1 1
-#define ICP_QAT_FW_NUM_LONGWORDS_2 2
-#define ICP_QAT_FW_NUM_LONGWORDS_3 3
-#define ICP_QAT_FW_NUM_LONGWORDS_4 4
-#define ICP_QAT_FW_NUM_LONGWORDS_5 5
-#define ICP_QAT_FW_NUM_LONGWORDS_6 6
-#define ICP_QAT_FW_NUM_LONGWORDS_7 7
-#define ICP_QAT_FW_NUM_LONGWORDS_10 10
-#define ICP_QAT_FW_NUM_LONGWORDS_13 13
-#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
-
-enum icp_qat_fw_comn_resp_serv_id {
- ICP_QAT_FW_COMN_RESP_SERV_NULL,
- ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
- ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
-};
-
-enum icp_qat_fw_comn_request_id {
- ICP_QAT_FW_COMN_REQ_NULL = 0,
- ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
- ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
- ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
- ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
- ICP_QAT_FW_COMN_REQ_DELIMITER
-};
-
-struct icp_qat_fw_comn_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t serv_specif_fields[4];
- } s1;
- } u;
-};
-
-struct icp_qat_fw_comn_req_mid {
- uint64_t opaque_data;
- uint64_t src_data_addr;
- uint64_t dest_data_addr;
- uint32_t src_length;
- uint32_t dst_length;
-};
-
-struct icp_qat_fw_comn_req_cd_ctrl {
- uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
-};
-
-struct icp_qat_fw_comn_req_hdr {
- uint8_t resrvd1;
- uint8_t service_cmd_id;
- uint8_t service_type;
- uint8_t hdr_flags;
- uint16_t serv_specif_flags;
- uint16_t comn_req_flags;
-};
-
-struct icp_qat_fw_comn_req_rqpars {
- uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
-};
-
-struct icp_qat_fw_comn_req {
- struct icp_qat_fw_comn_req_hdr comn_hdr;
- struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
- struct icp_qat_fw_comn_req_mid comn_mid;
- struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
- struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-struct icp_qat_fw_comn_error {
- uint8_t xlat_err_code;
- uint8_t cmp_err_code;
-};
-
-struct icp_qat_fw_comn_resp_hdr {
- uint8_t resrvd1;
- uint8_t service_id;
- uint8_t response_type;
- uint8_t hdr_flags;
- struct icp_qat_fw_comn_error comn_error;
- uint8_t comn_status;
- uint8_t cmd_id;
-};
-
-struct icp_qat_fw_comn_resp {
- struct icp_qat_fw_comn_resp_hdr comn_hdr;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
-#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
-#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
- icp_qat_fw_comn_req_hdr_t.service_type
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
- icp_qat_fw_comn_req_hdr_t.service_type = val
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
- icp_qat_fw_comn_req_hdr_t.service_cmd_id
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
- icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
- ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
- ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
- QAT_FIELD_GET(hdr_flags, \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
- (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
- QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
- (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
-
-#define QAT_COMN_PTR_TYPE_BITPOS 0
-#define QAT_COMN_PTR_TYPE_MASK 0x1
-#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
-#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
-#define QAT_COMN_PTR_TYPE_FLAT 0x0
-#define QAT_COMN_PTR_TYPE_SGL 0x1
-#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
-#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-
-#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
- ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
- | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
- QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
- QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
- QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
-#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
-#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
-#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
-
-#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
- >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
- { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
-
-#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
- { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
-
-#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
-#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
-#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
-#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
-#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
- ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
- QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
- (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
- QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
- (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
- QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
- (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
-
-#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
- QAT_COMN_RESP_CRYPTO_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
- QAT_COMN_RESP_CMP_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
- QAT_COMN_RESP_XLAT_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
-
-#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
-#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
-#define ERR_CODE_NO_ERROR 0
-#define ERR_CODE_INVALID_BLOCK_TYPE -1
-#define ERR_CODE_NO_MATCH_ONES_COMP -2
-#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
-#define ERR_CODE_INCOMPLETE_LEN -4
-#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
-#define ERR_CODE_RPT_GT_SPEC_LEN -6
-#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
-#define ERR_CODE_INV_DIS_CODE_LEN -8
-#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
-#define ERR_CODE_DIS_TOO_FAR_BACK -10
-#define ERR_CODE_OVERFLOW_ERROR -11
-#define ERR_CODE_SOFT_ERROR -12
-#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
-
-enum icp_qat_fw_slice {
- ICP_QAT_FW_SLICE_NULL = 0,
- ICP_QAT_FW_SLICE_CIPHER = 1,
- ICP_QAT_FW_SLICE_AUTH = 2,
- ICP_QAT_FW_SLICE_DRAM_RD = 3,
- ICP_QAT_FW_SLICE_DRAM_WR = 4,
- ICP_QAT_FW_SLICE_COMP = 5,
- ICP_QAT_FW_SLICE_XLAT = 6,
- ICP_QAT_FW_SLICE_DELIMITER
-};
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
deleted file mode 100644
index fbf2b839..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_FW_LA_H_
-#define _ICP_QAT_FW_LA_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_la_cmd_id {
- ICP_QAT_FW_LA_CMD_CIPHER = 0,
- ICP_QAT_FW_LA_CMD_AUTH = 1,
- ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
- ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
- ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
- ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
- ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
- ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
- ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
- ICP_QAT_FW_LA_CMD_MGF1 = 9,
- ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
- ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
- ICP_QAT_FW_LA_CMD_DELIMITER = 12
-};
-
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-
-struct icp_qat_fw_la_bulk_req {
- struct icp_qat_fw_comn_req_hdr comn_hdr;
- struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
- struct icp_qat_fw_comn_req_mid comn_mid;
- struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
- struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
-#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
-#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
-#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
-#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
-#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
-#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
-#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
-#define ICP_QAT_FW_LA_GCM_PROTO 2
-#define ICP_QAT_FW_LA_CCM_PROTO 1
-#define ICP_QAT_FW_LA_NO_PROTO 0
-#define QAT_LA_PROTO_BITPOS 7
-#define QAT_LA_PROTO_MASK 0x7
-#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
-#define QAT_LA_CMP_AUTH_RES_BITPOS 6
-#define QAT_LA_CMP_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_RET_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
-#define QAT_LA_RET_AUTH_RES_BITPOS 5
-#define QAT_LA_RET_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_UPDATE_STATE 1
-#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
-#define QAT_LA_UPDATE_STATE_BITPOS 4
-#define QAT_LA_UPDATE_STATE_MASK 0x1
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
-#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
-#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
-#define QAT_LA_CIPH_IV_FLD_BITPOS 2
-#define QAT_LA_CIPH_IV_FLD_MASK 0x1
-#define ICP_QAT_FW_LA_PARTIAL_NONE 0
-#define ICP_QAT_FW_LA_PARTIAL_START 1
-#define ICP_QAT_FW_LA_PARTIAL_MID 3
-#define ICP_QAT_FW_LA_PARTIAL_END 2
-#define QAT_LA_PARTIAL_BITPOS 0
-#define QAT_LA_PARTIAL_MASK 0x3
-#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
- cmp_auth, ret_auth, update_state, \
- ciph_iv, ciphcfg, partial) \
- (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
- ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
- QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
- ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
- QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
- ((proto & QAT_LA_PROTO_MASK) << \
- QAT_LA_PROTO_BITPOS) | \
- ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
- QAT_LA_CMP_AUTH_RES_BITPOS) | \
- ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
- QAT_LA_RET_AUTH_RES_BITPOS) | \
- ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
- QAT_LA_UPDATE_STATE_BITPOS) | \
- ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
- QAT_LA_CIPH_IV_FLD_BITPOS) | \
- ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
- ((partial & QAT_LA_PARTIAL_MASK) << \
- QAT_LA_PARTIAL_BITPOS))
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
- QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
- QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
- QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
- QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
- QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
- QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
- QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
- QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
- QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
- QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
- QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
- QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
- QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
- QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
- QAT_LA_PARTIAL_MASK)
-
-struct icp_qat_fw_cipher_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- } s1;
- } u;
-};
-
-struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- } sl;
- } u;
-};
-
-struct icp_qat_fw_cipher_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id;
- uint8_t cipher_padding_sz;
- uint8_t resrvd1;
- uint16_t resrvd2;
- uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
-};
-
-struct icp_qat_fw_auth_cd_ctrl_hdr {
- uint32_t resrvd1;
- uint8_t resrvd2;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id;
- uint8_t resrvd3;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd4;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
-};
-
-struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id_cipher;
- uint8_t cipher_padding_sz;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id_auth;
- uint8_t resrvd1;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd2;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
-};
-
-#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
-#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
-#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
-#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
- (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
-#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-
-struct icp_qat_fw_la_cipher_req_params {
- uint32_t cipher_offset;
- uint32_t cipher_length;
- union {
- uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- struct {
- uint64_t cipher_IV_ptr;
- uint64_t resrvd1;
- } s;
- } u;
-};
-
-struct icp_qat_fw_la_auth_req_params {
- uint32_t auth_off;
- uint32_t auth_len;
- union {
- uint64_t auth_partial_st_prefix;
- uint64_t aad_adr;
- } u1;
- uint64_t auth_res_addr;
- union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
- } u2;
- uint8_t resrvd1;
- uint8_t hash_state_sz;
- uint8_t auth_res_sz;
-} __rte_packed;
-
-struct icp_qat_fw_la_auth_req_params_resrvd_flds {
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
- union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
- } u2;
- uint8_t resrvd1;
- uint16_t resrvd2;
-};
-
-struct icp_qat_fw_la_resp {
- struct icp_qat_fw_comn_resp_hdr comn_resp;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
- ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
- >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
deleted file mode 100644
index d03688c7..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_HW_H_
-#define _ICP_QAT_HW_H_
-
-enum icp_qat_hw_ae_id {
- ICP_QAT_HW_AE_0 = 0,
- ICP_QAT_HW_AE_1 = 1,
- ICP_QAT_HW_AE_2 = 2,
- ICP_QAT_HW_AE_3 = 3,
- ICP_QAT_HW_AE_4 = 4,
- ICP_QAT_HW_AE_5 = 5,
- ICP_QAT_HW_AE_6 = 6,
- ICP_QAT_HW_AE_7 = 7,
- ICP_QAT_HW_AE_8 = 8,
- ICP_QAT_HW_AE_9 = 9,
- ICP_QAT_HW_AE_10 = 10,
- ICP_QAT_HW_AE_11 = 11,
- ICP_QAT_HW_AE_DELIMITER = 12
-};
-
-enum icp_qat_hw_qat_id {
- ICP_QAT_HW_QAT_0 = 0,
- ICP_QAT_HW_QAT_1 = 1,
- ICP_QAT_HW_QAT_2 = 2,
- ICP_QAT_HW_QAT_3 = 3,
- ICP_QAT_HW_QAT_4 = 4,
- ICP_QAT_HW_QAT_5 = 5,
- ICP_QAT_HW_QAT_DELIMITER = 6
-};
-
-enum icp_qat_hw_auth_algo {
- ICP_QAT_HW_AUTH_ALGO_NULL = 0,
- ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
- ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
- ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
- ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
- ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
- ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
- ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
- ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
- ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
- ICP_QAT_HW_AUTH_RESERVED_1 = 15,
- ICP_QAT_HW_AUTH_RESERVED_2 = 16,
- ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
- ICP_QAT_HW_AUTH_RESERVED_3 = 18,
- ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
- ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
-};
-
-enum icp_qat_hw_auth_mode {
- ICP_QAT_HW_AUTH_MODE0 = 0,
- ICP_QAT_HW_AUTH_MODE1 = 1,
- ICP_QAT_HW_AUTH_MODE2 = 2,
- ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
-};
-
-struct icp_qat_hw_auth_config {
- uint32_t config;
- uint32_t reserved;
-};
-
-#define QAT_AUTH_MODE_BITPOS 4
-#define QAT_AUTH_MODE_MASK 0xF
-#define QAT_AUTH_ALGO_BITPOS 0
-#define QAT_AUTH_ALGO_MASK 0xF
-#define QAT_AUTH_CMP_BITPOS 8
-#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
-#define QAT_AUTH_ALGO_SHA3_BITPOS 22
-#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
- (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
- ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
- (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
- QAT_AUTH_ALGO_SHA3_BITPOS) | \
- (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
- (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
- & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
- ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
-
-struct icp_qat_hw_auth_counter {
- uint32_t counter;
- uint32_t reserved;
-};
-
-#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
-#define QAT_AUTH_COUNT_BITPOS 0
-#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
- (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
-
-struct icp_qat_hw_auth_setup {
- struct icp_qat_hw_auth_config auth_config;
- struct icp_qat_hw_auth_counter auth_counter;
-};
-
-#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
-#define ICP_QAT_HW_NULL_STATE1_SZ 32
-#define ICP_QAT_HW_MD5_STATE1_SZ 16
-#define ICP_QAT_HW_SHA1_STATE1_SZ 20
-#define ICP_QAT_HW_SHA224_STATE1_SZ 32
-#define ICP_QAT_HW_SHA256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA384_STATE1_SZ 64
-#define ICP_QAT_HW_SHA512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
-#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
-#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
-#define ICP_QAT_HW_NULL_STATE2_SZ 32
-#define ICP_QAT_HW_MD5_STATE2_SZ 16
-#define ICP_QAT_HW_SHA1_STATE2_SZ 20
-#define ICP_QAT_HW_SHA224_STATE2_SZ 32
-#define ICP_QAT_HW_SHA256_STATE2_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
-#define ICP_QAT_HW_SHA384_STATE2_SZ 64
-#define ICP_QAT_HW_SHA512_STATE2_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
-#define ICP_QAT_HW_F9_IK_SZ 16
-#define ICP_QAT_HW_F9_FK_SZ 16
-#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
- ICP_QAT_HW_F9_FK_SZ)
-#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
-#define ICP_QAT_HW_GALOIS_H_SZ 16
-#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
-#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
-
-struct icp_qat_hw_auth_sha512 {
- struct icp_qat_hw_auth_setup inner_setup;
- uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
- struct icp_qat_hw_auth_setup outer_setup;
- uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
-};
-
-struct icp_qat_hw_auth_algo_blk {
- struct icp_qat_hw_auth_sha512 sha;
-};
-
-#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
-#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
-
-enum icp_qat_hw_cipher_algo {
- ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
- ICP_QAT_HW_CIPHER_ALGO_DES = 1,
- ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
- ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
- ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
- ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
- ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
- ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
- ICP_QAT_HW_CIPHER_DELIMITER = 10
-};
-
-enum icp_qat_hw_cipher_mode {
- ICP_QAT_HW_CIPHER_ECB_MODE = 0,
- ICP_QAT_HW_CIPHER_CBC_MODE = 1,
- ICP_QAT_HW_CIPHER_CTR_MODE = 2,
- ICP_QAT_HW_CIPHER_F8_MODE = 3,
- ICP_QAT_HW_CIPHER_XTS_MODE = 6,
- ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
-};
-
-struct icp_qat_hw_cipher_config {
- uint32_t val;
- uint32_t reserved;
-};
-
-enum icp_qat_hw_cipher_dir {
- ICP_QAT_HW_CIPHER_ENCRYPT = 0,
- ICP_QAT_HW_CIPHER_DECRYPT = 1,
-};
-
-enum icp_qat_hw_auth_op {
- ICP_QAT_HW_AUTH_VERIFY = 0,
- ICP_QAT_HW_AUTH_GENERATE = 1,
-};
-
-enum icp_qat_hw_cipher_convert {
- ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
- ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
-};
-
-#define QAT_CIPHER_MODE_BITPOS 4
-#define QAT_CIPHER_MODE_MASK 0xF
-#define QAT_CIPHER_ALGO_BITPOS 0
-#define QAT_CIPHER_ALGO_MASK 0xF
-#define QAT_CIPHER_CONVERT_BITPOS 9
-#define QAT_CIPHER_CONVERT_MASK 0x1
-#define QAT_CIPHER_DIR_BITPOS 8
-#define QAT_CIPHER_DIR_MASK 0x1
-#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
-#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
-#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
- (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
- ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
- ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
- ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
-#define ICP_QAT_HW_DES_BLK_SZ 8
-#define ICP_QAT_HW_3DES_BLK_SZ 8
-#define ICP_QAT_HW_NULL_BLK_SZ 8
-#define ICP_QAT_HW_AES_BLK_SZ 16
-#define ICP_QAT_HW_KASUMI_BLK_SZ 8
-#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
-#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
-#define ICP_QAT_HW_NULL_KEY_SZ 256
-#define ICP_QAT_HW_DES_KEY_SZ 8
-#define ICP_QAT_HW_3DES_KEY_SZ 24
-#define ICP_QAT_HW_AES_128_KEY_SZ 16
-#define ICP_QAT_HW_AES_192_KEY_SZ 24
-#define ICP_QAT_HW_AES_256_KEY_SZ 32
-#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_KASUMI_KEY_SZ 16
-#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_ARC4_KEY_SZ 256
-#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
-#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-
-#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
-
-/* These defines describe position of the bit-fields
- * in the flags byte in B0
- */
-#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
-#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
-
-#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
- ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
- | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
- | ((q) - 1))
-
-#define ICP_QAT_HW_CCM_NQ_CONST 15
-#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
-#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
-#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
- ICP_QAT_HW_CCM_AAD_LEN_INFO)
-#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
-#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
-#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
-
-struct icp_qat_hw_cipher_algo_blk {
- struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
-} __rte_cache_aligned;
-
-#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
deleted file mode 100644
index 802ba95d..00000000
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_ALGS_H_
-#define _ICP_QAT_ALGS_H_
-#include <rte_memory.h>
-#include <rte_crypto.h>
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "../qat_crypto.h"
-
-/*
- * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
- * Integrity Key (IK)
- */
-#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
-
-#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
-
-/* 3DES key sizes */
-#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
-#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
-
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
-
-struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
-} __rte_packed;
-
-enum qat_crypto_proto_flag {
- QAT_CRYPTO_PROTO_FLAG_NONE = 0,
- QAT_CRYPTO_PROTO_FLAG_CCM = 1,
- QAT_CRYPTO_PROTO_FLAG_GCM = 2,
- QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
- QAT_CRYPTO_PROTO_FLAG_ZUC = 4
-};
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
-struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_crypto_op_cookie {
- struct qat_alg_buf_list qat_sgl_list_src;
- struct qat_alg_buf_list qat_sgl_list_dst;
- rte_iova_t qat_sgl_src_phys_addr;
- rte_iova_t qat_sgl_dst_phys_addr;
-};
-
-/* Common content descriptor */
-struct qat_alg_cd {
- struct icp_qat_hw_cipher_algo_blk cipher;
- struct icp_qat_hw_auth_algo_blk hash;
-} __rte_packed __rte_cache_aligned;
-
-struct qat_session {
- enum icp_qat_fw_la_cmd_id qat_cmd;
- enum icp_qat_hw_cipher_algo qat_cipher_alg;
- enum icp_qat_hw_cipher_dir qat_dir;
- enum icp_qat_hw_cipher_mode qat_mode;
- enum icp_qat_hw_auth_algo qat_hash_alg;
- enum icp_qat_hw_auth_op auth_op;
- void *bpi_ctx;
- struct qat_alg_cd cd;
- uint8_t *cd_cur_ptr;
- rte_iova_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- uint8_t aad_len;
- struct qat_crypto_instance *inst;
- struct {
- uint16_t offset;
- uint16_t length;
- } cipher_iv;
- struct {
- uint16_t offset;
- uint16_t length;
- } auth_iv;
- uint16_t digest_length;
- rte_spinlock_t lock; /* protects this struct */
- enum qat_device_gen min_qat_dev_gen;
-};
-
-int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
-
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
- uint8_t *enckey,
- uint32_t enckeylen);
-
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
- uint8_t *authkey,
- uint32_t authkeylen,
- uint32_t aad_length,
- uint32_t digestsize,
- unsigned int operation);
-
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags);
-
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
- enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-#endif
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
deleted file mode 100644
index 4afe159d..00000000
--- a/drivers/crypto/qat/qat_crypto.c
+++ /dev/null
@@ -1,1696 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <strings.h>
-#include <string.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <stdarg.h>
-
-#include <rte_common.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-#include <rte_memory.h>
-#include <rte_tailq.h>
-#include <rte_malloc.h>
-#include <rte_launch.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_mempool.h>
-#include <rte_mbuf.h>
-#include <rte_string_fns.h>
-#include <rte_spinlock.h>
-#include <rte_hexdump.h>
-#include <rte_crypto_sym.h>
-#include <rte_byteorder.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-
-#include <openssl/evp.h>
-
-#include "qat_logs.h"
-#include "qat_algs.h"
-#include "qat_crypto.h"
-#include "adf_transport_access_macros.h"
-
-#define BYTE_LENGTH 8
-/* bpi is only used for partial blocks of DES and AES
- * so AES block len can be assumed as max len for iv, src and dst
- */
-#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
-
-static int
-qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
- continue;
-
- if (capability->sym.cipher.algo == algo)
- return 1;
- }
- return 0;
-}
-
-static int
-qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
- struct qat_pmd_private *internals) {
- int i = 0;
- const struct rte_cryptodev_capabilities *capability;
-
- while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
- RTE_CRYPTO_OP_TYPE_UNDEFINED) {
- if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
- continue;
-
- if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
- continue;
-
- if (capability->sym.auth.algo == algo)
- return 1;
- }
- return 0;
-}
-
-/** Encrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt the IV, then XOR this with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_encrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_encrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed");
- return -EINVAL;
-}
-
-/** Decrypt a single partial block
- * Depends on openssl libcrypto
- * Uses ECB+XOR to do CFB encryption, same result, more performant
- */
-static inline int
-bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
- uint8_t *iv, int ivlen, int srclen,
- void *bpi_ctx)
-{
- EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
- int encrypted_ivlen;
- uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
- uint8_t *encr = encrypted_iv;
-
- /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
- if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
- <= 0)
- goto cipher_decrypt_err;
-
- for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
- *dst = *src ^ *encr;
-
- return 0;
-
-cipher_decrypt_err:
- PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
- return -EINVAL;
-}
-
-/** Creates a context in either AES or DES in ECB mode
- * Depends on openssl libcrypto
- */
-static int
-bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
- enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key, void **ctx)
-{
- const EVP_CIPHER *algo = NULL;
- int ret;
- *ctx = EVP_CIPHER_CTX_new();
-
- if (*ctx == NULL) {
- ret = -ENOMEM;
- goto ctx_init_err;
- }
-
- if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
- algo = EVP_des_ecb();
- else
- algo = EVP_aes_128_ecb();
-
- /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
- ret = -EINVAL;
- goto ctx_init_err;
- }
-
- return 0;
-
-ctx_init_err:
- if (*ctx != NULL)
- EVP_CIPHER_CTX_free(*ctx);
- return ret;
-}
-
-/** Frees a context previously created
- * Depends on openssl libcrypto
- */
-static void
-bpi_cipher_ctx_free(void *bpi_ctx)
-{
- if (bpi_ctx != NULL)
- EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
-}
-
-static inline uint32_t
-adf_modulo(uint32_t data, uint32_t shift);
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-
-void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *sess)
-{
- PMD_INIT_FUNC_TRACE();
- uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
- struct qat_session *s = (struct qat_session *)sess_priv;
-
- if (sess_priv) {
- if (s->bpi_ctx)
- bpi_cipher_ctx_free(s->bpi_ctx);
- memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
- struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
- rte_mempool_put(sess_mp, sess_priv);
- }
-}
-
-static int
-qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
-{
- /* Cipher Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_CIPHER;
-
- /* Authentication Only */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
- return ICP_QAT_FW_LA_CMD_AUTH;
-
- /* AEAD */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- /* AES-GCM and AES-CCM works with different direction
- * GCM first encrypts and generate hash where AES-CCM
- * first generate hash and encrypts. Similar relation
- * applies to decryption.
- */
- if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- else
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- }
-
- if (xform->next == NULL)
- return -1;
-
- /* Cipher then Authenticate */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
-
- /* Authenticate then Cipher */
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
-
- return -1;
-}
-
-static struct rte_crypto_auth_xform *
-qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
- return &xform->auth;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-static struct rte_crypto_cipher_xform *
-qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
-{
- do {
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
- return &xform->cipher;
-
- xform = xform->next;
- } while (xform);
-
- return NULL;
-}
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
- int ret;
-
- /* Get cipher xform from crypto xform chain */
- cipher_xform = qat_get_cipher_xform(xform);
-
- session->cipher_iv.offset = cipher_xform->iv.offset;
- session->cipher_iv.length = cipher_xform->iv.length;
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_CBC:
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CTR:
- if (qat_alg_validate_3des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
- case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_des_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- ret = bpi_cipher_ctx_init(
- cipher_xform->algo,
- cipher_xform->op,
- cipher_xform->key.data,
- &session->bpi_ctx);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
- goto error_out;
- }
- if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
- break;
- case RTE_CRYPTO_CIPHER_ZUC_EEA3:
- if (!qat_is_cipher_alg_supported(
- cipher_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_cipher_algorithm_strings
- [cipher_xform->algo]);
- ret = -ENOTSUP;
- goto error_out;
- }
- if (qat_alg_validate_zuc_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
- ret = -EINVAL;
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
- break;
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_F8:
- case RTE_CRYPTO_CIPHER_AES_XTS:
- case RTE_CRYPTO_CIPHER_ARC4:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
- cipher_xform->algo);
- ret = -ENOTSUP;
- goto error_out;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
- ret = -EINVAL;
- goto error_out;
- }
-
- if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- else
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- cipher_xform->key.data,
- cipher_xform->key.length)) {
- ret = -EINVAL;
- goto error_out;
- }
-
- return 0;
-
-error_out:
- if (session->bpi_ctx) {
- bpi_cipher_ctx_free(session->bpi_ctx);
- session->bpi_ctx = NULL;
- }
- return ret;
-}
-
-int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool)
-{
- void *sess_private_data;
- int ret;
-
- if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
- return -ENOMEM;
- }
-
- ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
- if (ret != 0) {
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
- "session parameters");
-
- /* Return session to mempool */
- rte_mempool_put(mempool, sess_private_data);
- return ret;
- }
-
- set_session_private_data(sess, dev->driver_id,
- sess_private_data);
-
- return 0;
-}
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
-{
- struct qat_session *session = session_private;
- int ret;
-
- int qat_cmd_id;
- PMD_INIT_FUNC_TRACE();
-
- /* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2iova(session) +
- offsetof(struct qat_session, cd);
-
- session->min_qat_dev_gen = QAT_GEN1;
-
- /* Get requested QAT command id */
- qat_cmd_id = qat_get_cmd_id(xform);
- if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
- PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- return -ENOTSUP;
- }
- session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
- switch (session->qat_cmd) {
- case ICP_QAT_FW_LA_CMD_CIPHER:
- ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_AUTH:
- ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
- if (ret < 0)
- return ret;
- break;
- case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- ret = qat_crypto_sym_configure_session_aead(xform,
- session);
- if (ret < 0)
- return ret;
- } else {
- ret = qat_crypto_sym_configure_session_auth(dev,
- xform, session);
- if (ret < 0)
- return ret;
- ret = qat_crypto_sym_configure_session_cipher(dev,
- xform, session);
- if (ret < 0)
- return ret;
- }
- break;
- case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
- case ICP_QAT_FW_LA_CMD_TRNG_TEST:
- case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
- case ICP_QAT_FW_LA_CMD_MGF1:
- case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
- case ICP_QAT_FW_LA_CMD_DELIMITER:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Unsupported Service %u",
- session->qat_cmd);
- return -ENOTSUP;
- }
-
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_auth_xform *auth_xform = NULL;
- struct qat_pmd_private *internals = dev->data->dev_private;
- auth_xform = qat_get_auth_xform(xform);
- uint8_t *key_data = auth_xform->key.data;
- uint8_t key_length = auth_xform->key.length;
-
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
- break;
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
- break;
- case RTE_CRYPTO_AUTH_AES_GMAC:
- if (qat_alg_validate_aes_key(auth_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
-
- break;
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
- break;
- case RTE_CRYPTO_AUTH_NULL:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
- break;
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
- break;
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
- PMD_DRV_LOG(ERR, "%s not supported on this device",
- rte_crypto_auth_algorithm_strings
- [auth_xform->algo]);
- return -ENOTSUP;
- }
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
- break;
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CMAC:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
- auth_xform->algo);
- return -ENOTSUP;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
- auth_xform->algo);
- return -EINVAL;
- }
-
- session->auth_iv.offset = auth_xform->iv.offset;
- session->auth_iv.length = auth_xform->iv.length;
-
- if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
- if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- } else {
- session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- auth_xform->key.data,
- auth_xform->key.length))
- return -EINVAL;
- }
- /* Restore to authentication only only */
- session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
- } else {
- if (qat_alg_aead_session_create_content_desc_auth(session,
- key_data,
- key_length,
- 0,
- auth_xform->digest_length,
- auth_xform->op))
- return -EINVAL;
- }
-
- session->digest_length = auth_xform->digest_length;
- return 0;
-}
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session)
-{
- struct rte_crypto_aead_xform *aead_xform = &xform->aead;
- enum rte_crypto_auth_operation crypto_operation;
-
- /*
- * Store AEAD IV parameters as cipher IV,
- * to avoid unnecessary memory usage
- */
- session->cipher_iv.offset = xform->aead.iv.offset;
- session->cipher_iv.length = xform->aead.iv.length;
-
- switch (aead_xform->algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
- case RTE_CRYPTO_AEAD_AES_CCM:
- if (qat_alg_validate_aes_key(aead_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES key size");
- return -EINVAL;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
- break;
- default:
- PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
- return -EINVAL;
- }
-
- if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
- (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
- aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
- session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /*
- * It needs to create cipher desc content first,
- * then authentication
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
- } else {
- session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
- /*
- * It needs to create authentication desc content first,
- * then cipher
- */
-
- crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
- RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
-
- if (qat_alg_aead_session_create_content_desc_auth(session,
- aead_xform->key.data,
- aead_xform->key.length,
- aead_xform->aad_length,
- aead_xform->digest_length,
- crypto_operation))
- return -EINVAL;
-
- if (qat_alg_aead_session_create_content_desc_cipher(session,
- aead_xform->key.data,
- aead_xform->key.length))
- return -EINVAL;
- }
-
- session->digest_length = aead_xform->digest_length;
- return 0;
-}
-
-unsigned qat_crypto_sym_get_session_private_size(
- struct rte_cryptodev *dev __rte_unused)
-{
- return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8);
-}
-
-static inline uint32_t
-qat_bpicipher_preprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
-
- /* Decrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = last_block - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before pre-process:", dst,
- last_block_len);
-#endif
- bpi_cipher_decrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "BPI: src after pre-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after pre-process:", dst,
- last_block_len);
-#endif
- }
-
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline uint32_t
-qat_bpicipher_postprocess(struct qat_session *ctx,
- struct rte_crypto_op *op)
-{
- int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
- struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = block_len > 0 ?
- sym_op->cipher.data.length % block_len : 0;
-
- if (last_block_len > 0 &&
- ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
-
- /* Encrypt last block */
- uint8_t *last_block, *dst, *iv;
- uint32_t last_block_offset;
-
- last_block_offset = sym_op->cipher.data.offset +
- sym_op->cipher.data.length - last_block_len;
- last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
- uint8_t *, last_block_offset);
-
- if (unlikely(sym_op->m_dst != NULL))
- /* out-of-place operation (OOP) */
- dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
- uint8_t *, last_block_offset);
- else
- dst = last_block;
-
- if (last_block_len < sym_op->cipher.data.length)
- /* use previous block ciphertext as IV */
- iv = dst - block_len;
- else
- /* runt block, i.e. less than one full block */
- iv = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src before post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst before post-process:",
- dst, last_block_len);
-#endif
- bpi_cipher_encrypt(last_block, dst, iv, block_len,
- last_block_len, ctx->bpi_ctx);
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "BPI: src after post-process:", last_block,
- last_block_len);
- if (sym_op->m_dst != NULL)
- rte_hexdump(stdout, "BPI: dst after post-process:", dst,
- last_block_len);
-#endif
- }
- return sym_op->cipher.data.length - last_block_len;
-}
-
-static inline void
-txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
- WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, q->tail);
- q->nb_pending_requests = 0;
- q->csr_tail = q->tail;
-}
-
-uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- register struct qat_queue *queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- register uint32_t nb_ops_sent = 0;
- register struct rte_crypto_op **cur_op = ops;
- register int ret;
- uint16_t nb_ops_possible = nb_ops;
- register uint8_t *base_addr;
- register uint32_t tail;
- int overflow;
-
- if (unlikely(nb_ops == 0))
- return 0;
-
- /* read params used a lot in main loop into registers */
- queue = &(tmp_qp->tx_q);
- base_addr = (uint8_t *)queue->base_addr;
- tail = queue->tail;
-
- /* Find how many can actually fit on the ring */
- tmp_qp->inflights16 += nb_ops;
- overflow = tmp_qp->inflights16 - queue->max_inflights;
- if (overflow > 0) {
- tmp_qp->inflights16 -= overflow;
- nb_ops_possible = nb_ops - overflow;
- if (nb_ops_possible == 0)
- return 0;
- }
-
- while (nb_ops_sent != nb_ops_possible) {
- ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
- if (ret != 0) {
- tmp_qp->stats.enqueue_err_count++;
- /*
- * This message cannot be enqueued,
- * decrease number of ops that wasn't sent
- */
- tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
- if (nb_ops_sent == 0)
- return 0;
- goto kick_tail;
- }
-
- tail = adf_modulo(tail + queue->msg_size, queue->modulo);
- nb_ops_sent++;
- cur_op++;
- }
-kick_tail:
- queue->tail = tail;
- tmp_qp->stats.enqueued_count += nb_ops_sent;
- queue->nb_pending_requests += nb_ops_sent;
- if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
- queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
- txq_write_tail(tmp_qp, queue);
- }
- return nb_ops_sent;
-}
-
-static inline
-void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
-{
- uint32_t old_head, new_head;
- uint32_t max_head;
-
- old_head = q->csr_head;
- new_head = q->head;
- max_head = qp->nb_descriptors * q->msg_size;
-
- /* write out free descriptors */
- void *cur_desc = (uint8_t *)q->base_addr + old_head;
-
- if (new_head < old_head) {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head);
- memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head);
- } else {
- memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head);
- }
- q->nb_processed_responses = 0;
- q->csr_head = new_head;
-
- /* write current head to CSR */
- WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
- q->hw_queue_number, new_head);
-}
-
-uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops)
-{
- struct qat_queue *rx_queue, *tx_queue;
- struct qat_qp *tmp_qp = (struct qat_qp *)qp;
- uint32_t msg_counter = 0;
- struct rte_crypto_op *rx_op;
- struct icp_qat_fw_comn_resp *resp_msg;
- uint32_t head;
-
- rx_queue = &(tmp_qp->rx_q);
- tx_queue = &(tmp_qp->tx_q);
- head = rx_queue->head;
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
-
- while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
- msg_counter != nb_ops) {
- rx_op = (struct rte_crypto_op *)(uintptr_t)
- (resp_msg->opaque_data);
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
- rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
- sizeof(struct icp_qat_fw_comn_resp));
-#endif
- if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
- ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
- resp_msg->comn_hdr.comn_status)) {
- rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
- } else {
- struct qat_session *sess = (struct qat_session *)
- get_session_private_data(
- rx_op->sym->session,
- cryptodev_qat_driver_id);
-
- if (sess->bpi_ctx)
- qat_bpicipher_postprocess(sess, rx_op);
- rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
- }
-
- head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
- resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)rx_queue->base_addr + head);
- *ops = rx_op;
- ops++;
- msg_counter++;
- }
- if (msg_counter > 0) {
- rx_queue->head = head;
- tmp_qp->stats.dequeued_count += msg_counter;
- rx_queue->nb_processed_responses += msg_counter;
- tmp_qp->inflights16 -= msg_counter;
-
- if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
- rxq_free_desc(tmp_qp, rx_queue);
- }
- /* also check if tail needs to be advanced */
- if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
- tx_queue->tail != tx_queue->csr_tail) {
- txq_write_tail(tmp_qp, tx_queue);
- }
- return msg_counter;
-}
-
-static inline int
-qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
- struct qat_alg_buf_list *list, uint32_t data_len)
-{
- int nr = 1;
-
- uint32_t buf_len = rte_pktmbuf_iova(buf) -
- buff_start + rte_pktmbuf_data_len(buf);
-
- list->bufers[0].addr = buff_start;
- list->bufers[0].resrvd = 0;
- list->bufers[0].len = buf_len;
-
- if (data_len <= buf_len) {
- list->num_bufs = nr;
- list->bufers[0].len = data_len;
- return 0;
- }
-
- buf = buf->next;
- while (buf) {
- if (unlikely(nr == QAT_SGL_MAX_NUMBER)) {
- PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL"
- " entry(%u)",
- QAT_SGL_MAX_NUMBER);
- return -EINVAL;
- }
-
- list->bufers[nr].len = rte_pktmbuf_data_len(buf);
- list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_iova(buf);
-
- buf_len += list->bufers[nr].len;
- buf = buf->next;
-
- if (buf_len > data_len) {
- list->bufers[nr].len -=
- buf_len - data_len;
- buf = NULL;
- }
- ++nr;
- }
- list->num_bufs = nr;
-
- return 0;
-}
-
-static inline void
-set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op,
- struct icp_qat_fw_la_bulk_req *qat_req)
-{
- /* copy IV into request if it fits */
- if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset),
- iv_length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- rte_crypto_op_ctophys_offset(op,
- iv_offset);
- }
-}
-
-/** Set IV for CCM is special case, 0th byte is set to q-1
- * where q is padding of nonce in 16 byte block
- */
-static inline void
-set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
- struct icp_qat_fw_la_cipher_req_params *cipher_param,
- struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
-{
- rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
- *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
- q - ICP_QAT_HW_CCM_NONCE_OFFSET;
-
- if (aad_len_field_sz)
- rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
- rte_crypto_op_ctod_offset(op, uint8_t *,
- iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
- iv_length);
-}
-
-static inline int
-qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
-{
- int ret = 0;
- struct qat_session *ctx;
- struct icp_qat_fw_la_cipher_req_params *cipher_param;
- struct icp_qat_fw_la_auth_req_params *auth_param;
- register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
- uint32_t cipher_len = 0, cipher_ofs = 0;
- uint32_t auth_len = 0, auth_ofs = 0;
- uint32_t min_ofs = 0;
- uint64_t src_buf_start = 0, dst_buf_start = 0;
- uint8_t do_sgl = 0;
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
- "operation requests, op (%p) is not a "
- "symmetric operation.", op);
- return -EINVAL;
- }
-#endif
- if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
- " requests, op (%p) is sessionless.", op);
- return -EINVAL;
- }
-
- ctx = (struct qat_session *)get_session_private_data(
- op->sym->session, cryptodev_qat_driver_id);
-
- if (unlikely(ctx == NULL)) {
- PMD_DRV_LOG(ERR, "Session was not created for this device");
- return -EINVAL;
- }
-
- if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
- PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
- return -EINVAL;
- }
-
-
-
- qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
- rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
- qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
- cipher_param = (void *)&qat_req->serv_specif_rqpars;
- auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
-
- if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM or AES-CCM */
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
- (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
- && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
- && ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
- do_aead = 1;
- } else {
- do_auth = 1;
- do_cipher = 1;
- }
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- do_auth = 1;
- do_cipher = 0;
- } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- do_auth = 0;
- do_cipher = 1;
- }
-
- if (do_cipher) {
-
- if (ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
- ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
- ctx->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
-
- if (unlikely(
- (cipher_param->cipher_length % BYTE_LENGTH != 0)
- || (cipher_param->cipher_offset
- % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- cipher_len = op->sym->cipher.data.length >> 3;
- cipher_ofs = op->sym->cipher.data.offset >> 3;
-
- } else if (ctx->bpi_ctx) {
- /* DOCSIS - only send complete blocks to device
- * Process any partial block using CFB mode.
- * Even if 0 complete blocks, still send this to device
- * to get into rx queue for post-process and dequeuing
- */
- cipher_len = qat_bpicipher_preprocess(ctx, op);
- cipher_ofs = op->sym->cipher.data.offset;
- } else {
- cipher_len = op->sym->cipher.data.length;
- cipher_ofs = op->sym->cipher.data.offset;
- }
-
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
- min_ofs = cipher_ofs;
- }
-
- if (do_auth) {
-
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
- if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0)
- || (auth_param->auth_len % BYTE_LENGTH != 0))) {
- PMD_DRV_LOG(ERR,
- "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
- op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- return -EINVAL;
- }
- auth_ofs = op->sym->auth.data.offset >> 3;
- auth_len = op->sym->auth.data.length >> 3;
-
- auth_param->u1.aad_adr =
- rte_crypto_op_ctophys_offset(op,
- ctx->auth_iv.offset);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /* AES-GMAC */
- set_cipher_iv(ctx->auth_iv.length,
- ctx->auth_iv.offset,
- cipher_param, op, qat_req);
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- auth_param->u1.aad_adr = 0;
- auth_param->u2.aad_sz = 0;
-
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->auth_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
-
- }
- } else {
- auth_ofs = op->sym->auth.data.offset;
- auth_len = op->sym->auth.data.length;
-
- }
- min_ofs = auth_ofs;
-
- if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
- auth_param->auth_res_addr =
- op->sym->auth.digest.phys_addr;
-
- }
-
- if (do_aead) {
- /*
- * This address may used for setting AAD physical pointer
- * into IV offset from op
- */
- rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- /*
- * If len(iv)==12B fw computes J0
- */
- if (ctx->cipher_iv.length == 12) {
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
-
- set_cipher_iv(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
-
- } else if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
-
- /* In case of AES-CCM this may point to user selected memory
- * or iv offset in cypto_op
- */
- uint8_t *aad_data = op->sym->aead.aad.data;
- /* This is true AAD length, it not includes 18 bytes of
- * preceding data
- */
- uint8_t aad_ccm_real_len = 0;
-
- uint8_t aad_len_field_sz = 0;
- uint32_t msg_len_be =
- rte_bswap32(op->sym->aead.data.length);
-
- if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
- aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
- aad_ccm_real_len = ctx->aad_len -
- ICP_QAT_HW_CCM_AAD_B0_LEN -
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- } else {
- /*
- * aad_len not greater than 18, so no actual aad data,
- * then use IV after op for B0 block
- */
- aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
- ctx->cipher_iv.offset);
- aad_phys_addr_aead =
- rte_crypto_op_ctophys_offset(op,
- ctx->cipher_iv.offset);
- }
-
- uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
-
- aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
- ctx->digest_length, q);
-
- if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET
- + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
- (uint8_t *)&msg_len_be,
- ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
- } else {
- memcpy(aad_data + ctx->cipher_iv.length +
- ICP_QAT_HW_CCM_NONCE_OFFSET,
- (uint8_t *)&msg_len_be
- + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
- - q), q);
- }
-
- if (aad_len_field_sz > 0) {
- *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
- = rte_bswap16(aad_ccm_real_len);
-
- if ((aad_ccm_real_len + aad_len_field_sz)
- % ICP_QAT_HW_CCM_AAD_B0_LEN) {
- uint8_t pad_len = 0;
- uint8_t pad_idx = 0;
-
- pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
- ((aad_ccm_real_len + aad_len_field_sz) %
- ICP_QAT_HW_CCM_AAD_B0_LEN);
- pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
- aad_ccm_real_len + aad_len_field_sz;
- memset(&aad_data[pad_idx],
- 0, pad_len);
- }
-
- }
-
- set_cipher_iv_ccm(ctx->cipher_iv.length,
- ctx->cipher_iv.offset,
- cipher_param, op, q,
- aad_len_field_sz);
-
- }
-
- cipher_len = op->sym->aead.data.length;
- cipher_ofs = op->sym->aead.data.offset;
- auth_len = op->sym->aead.data.length;
- auth_ofs = op->sym->aead.data.offset;
-
- auth_param->u1.aad_adr = aad_phys_addr_aead;
- auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- min_ofs = op->sym->aead.data.offset;
- }
-
- if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
- do_sgl = 1;
-
- /* adjust for chain case */
- if (do_cipher && do_auth)
- min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
-
- if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
- min_ofs = 0;
-
- if (unlikely(op->sym->m_dst != NULL)) {
- /* Out-of-place operation (OOP)
- * Don't align DMA start. DMA the minimum data-set
- * so as not to overwrite data in dest buffer
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
- dst_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
-
- } else {
- /* In-place operation
- * Start DMA at nearest aligned address below min_ofs
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
- & QAT_64_BTYE_ALIGN_MASK;
-
- if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
- rte_pktmbuf_headroom(op->sym->m_src))
- > src_buf_start)) {
- /* alignment has pushed addr ahead of start of mbuf
- * so revert and take the performance hit
- */
- src_buf_start =
- rte_pktmbuf_iova_offset(op->sym->m_src,
- min_ofs);
- }
- dst_buf_start = src_buf_start;
- }
-
- if (do_cipher || do_aead) {
- cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, cipher_ofs) - src_buf_start;
- cipher_param->cipher_length = cipher_len;
- } else {
- cipher_param->cipher_offset = 0;
- cipher_param->cipher_length = 0;
- }
-
- if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
- op->sym->m_src, auth_ofs) - src_buf_start;
- auth_param->auth_len = auth_len;
- } else {
- auth_param->auth_off = 0;
- auth_param->auth_len = 0;
- }
-
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- > (auth_param->auth_off + auth_param->auth_len) ?
- (cipher_param->cipher_offset + cipher_param->cipher_length)
- : (auth_param->auth_off + auth_param->auth_len);
-
- if (do_sgl) {
-
- ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
- QAT_COMN_PTR_TYPE_SGL);
- ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start,
- &qat_op_cookie->qat_sgl_list_src,
- qat_req->comn_mid.src_length);
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array");
- return ret;
- }
-
- if (likely(op->sym->m_dst == NULL))
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- else {
- ret = qat_sgl_fill_array(op->sym->m_dst,
- dst_buf_start,
- &qat_op_cookie->qat_sgl_list_dst,
- qat_req->comn_mid.dst_length);
-
- if (ret) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot "
- "fill sgl array");
- return ret;
- }
-
- qat_req->comn_mid.src_data_addr =
- qat_op_cookie->qat_sgl_src_phys_addr;
- qat_req->comn_mid.dest_data_addr =
- qat_op_cookie->qat_sgl_dst_phys_addr;
- }
- } else {
- qat_req->comn_mid.src_data_addr = src_buf_start;
- qat_req->comn_mid.dest_data_addr = dst_buf_start;
- }
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
- rte_hexdump(stdout, "qat_req:", qat_req,
- sizeof(struct icp_qat_fw_la_bulk_req));
- rte_hexdump(stdout, "src_data:",
- rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
- rte_pktmbuf_data_len(op->sym->m_src));
- if (do_cipher) {
- uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->cipher_iv.offset);
- rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
- ctx->cipher_iv.length);
- }
-
- if (do_auth) {
- if (ctx->auth_iv.length) {
- uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
- uint8_t *,
- ctx->auth_iv.offset);
- rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
- ctx->auth_iv.length);
- }
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- ctx->digest_length);
- }
-
- if (do_aead) {
- rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
- ctx->digest_length);
- rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
- ctx->aad_len);
- }
-#endif
- return 0;
-}
-
-static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
-{
- uint32_t div = data >> shift;
- uint32_t mult = div << shift;
-
- return data - mult;
-}
-
-int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
- __rte_unused struct rte_cryptodev_config *config)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-int qat_dev_start(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
- return 0;
-}
-
-void qat_dev_stop(__rte_unused struct rte_cryptodev *dev)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
-int qat_dev_close(struct rte_cryptodev *dev)
-{
- int i, ret;
-
- PMD_INIT_FUNC_TRACE();
-
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- ret = qat_crypto_sym_qp_release(dev, i);
- if (ret < 0)
- return ret;
- }
-
- return 0;
-}
-
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
-{
- struct qat_pmd_private *internals = dev->data->dev_private;
-
- PMD_INIT_FUNC_TRACE();
- if (info != NULL) {
- info->max_nb_queue_pairs =
- ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV;
- info->feature_flags = dev->feature_flags;
- info->capabilities = internals->qat_dev_capabilities;
- info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->driver_id = cryptodev_qat_driver_id;
- info->pci_dev = RTE_DEV_TO_PCI(dev->device);
- }
-}
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
- return;
- }
- for (i = 0; i < dev->data->nb_queue_pairs; i++) {
- if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
- continue;
- }
-
- stats->enqueued_count += qp[i]->stats.enqueued_count;
- stats->dequeued_count += qp[i]->stats.dequeued_count;
- stats->enqueue_err_count += qp[i]->stats.enqueue_err_count;
- stats->dequeue_err_count += qp[i]->stats.dequeue_err_count;
- }
-}
-
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev)
-{
- int i;
- struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs);
-
- PMD_INIT_FUNC_TRACE();
- for (i = 0; i < dev->data->nb_queue_pairs; i++)
- memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats));
- PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared");
-}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
deleted file mode 100644
index c182af2e..00000000
--- a/drivers/crypto/qat/qat_crypto.h
+++ /dev/null
@@ -1,150 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
- */
-
-#ifndef _QAT_CRYPTO_H_
-#define _QAT_CRYPTO_H_
-
-#include <rte_cryptodev_pmd.h>
-#include <rte_memzone.h>
-
-#include "qat_crypto_capabilities.h"
-
-#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
-/**< Intel QAT Symmetric Crypto PMD device name */
-
-/*
- * This macro rounds up a number to a be a multiple of
- * the alignment when the alignment is a power of 2
- */
-#define ALIGN_POW2_ROUNDUP(num, align) \
- (((num) + (align) - 1) & ~((align) - 1))
-#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
-
-#define QAT_CSR_HEAD_WRITE_THRESH 32U
-/* number of requests to accumulate before writing head CSR */
-#define QAT_CSR_TAIL_WRITE_THRESH 32U
-/* number of requests to accumulate before writing tail CSR */
-#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
-/* number of inflights below which no tail write coalescing should occur */
-
-struct qat_session;
-
-enum qat_device_gen {
- QAT_GEN1 = 1,
- QAT_GEN2,
-};
-
-/**
- * Structure associated with each queue.
- */
-struct qat_queue {
- char memz_name[RTE_MEMZONE_NAMESIZE];
- void *base_addr; /* Base address */
- rte_iova_t base_phys_addr; /* Queue physical address */
- uint32_t head; /* Shadow copy of the head */
- uint32_t tail; /* Shadow copy of the tail */
- uint32_t modulo;
- uint32_t msg_size;
- uint16_t max_inflights;
- uint32_t queue_size;
- uint8_t hw_bundle_number;
- uint8_t hw_queue_number;
- /* HW queue aka ring offset on bundle */
- uint32_t csr_head; /* last written head value */
- uint32_t csr_tail; /* last written tail value */
- uint16_t nb_processed_responses;
- /* number of responses processed since last CSR head write */
- uint16_t nb_pending_requests;
- /* number of requests pending since last CSR tail write */
-};
-
-struct qat_qp {
- void *mmap_bar_addr;
- uint16_t inflights16;
- struct qat_queue tx_q;
- struct qat_queue rx_q;
- struct rte_cryptodev_stats stats;
- struct rte_mempool *op_cookie_pool;
- void **op_cookies;
- uint32_t nb_descriptors;
- enum qat_device_gen qat_dev_gen;
-} __rte_cache_aligned;
-
-/** private data structure for each QAT device */
-struct qat_pmd_private {
- unsigned max_nb_queue_pairs;
- /**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
- enum qat_device_gen qat_dev_gen;
- /**< QAT device generation */
- const struct rte_cryptodev_capabilities *qat_dev_capabilities;
-};
-
-extern uint8_t cryptodev_qat_driver_id;
-
-int qat_dev_config(struct rte_cryptodev *dev,
- struct rte_cryptodev_config *config);
-int qat_dev_start(struct rte_cryptodev *dev);
-void qat_dev_stop(struct rte_cryptodev *dev);
-int qat_dev_close(struct rte_cryptodev *dev);
-void qat_dev_info_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info);
-
-void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
- struct rte_cryptodev_stats *stats);
-void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
- struct rte_mempool *session_pool);
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
- uint16_t queue_pair_id);
-
-int
-qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
- unsigned nb_objs, unsigned obj_cache_size, int socket_id);
-
-extern unsigned
-qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-
-extern int
-qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct rte_cryptodev_sym_session *sess,
- struct rte_mempool *mempool);
-
-
-int
-qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
-
-int
-qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-int
-qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform,
- struct qat_session *session);
-
-
-extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- struct rte_cryptodev_sym_session *session);
-
-extern uint16_t
-qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-extern uint16_t
-qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
- uint16_t nb_ops);
-
-#endif /* _QAT_CRYPTO_H_ */
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
deleted file mode 100644
index 565089a4..00000000
--- a/drivers/crypto/qat/qat_logs.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#ifndef _QAT_LOGS_H_
-#define _QAT_LOGS_H_
-
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
- "PMD: %s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE
-#define PMD_TX_FREE_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
-
-#endif /* _QAT_LOGS_H_ */
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
deleted file mode 100644
index 87b9ce0b..00000000
--- a/drivers/crypto/qat/qat_qp.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
- */
-
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_memzone.h>
-#include <rte_cryptodev_pmd.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
-#include <rte_atomic.h>
-#include <rte_prefetch.h>
-
-#include "qat_logs.h"
-#include "qat_crypto.h"
-#include "qat_algs.h"
-#include "adf_transport_access_macros.h"
-
-#define ADF_MAX_SYM_DESC 4096
-#define ADF_MIN_SYM_DESC 128
-#define ADF_SYM_TX_RING_DESC_SIZE 128
-#define ADF_SYM_RX_RING_DESC_SIZE 32
-#define ADF_SYM_TX_QUEUE_STARTOFF 2
-/* Offset from bundle start to 1st Sym Tx queue */
-#define ADF_SYM_RX_QUEUE_STARTOFF 10
-#define ADF_ARB_REG_SLOT 0x1000
-#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
-
-#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
- ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
- (ADF_ARB_REG_SLOT * index), value)
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes);
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t id, uint32_t nb_desc,
- int socket_id);
-static void qat_queue_delete(struct qat_queue *queue);
-static int qat_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size,
- int socket_id);
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *queue_size_for_csr);
-static void adf_configure_queues(struct qat_qp *queue);
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr);
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr);
-
-static const struct rte_memzone *
-queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
- int socket_id)
-{
- const struct rte_memzone *mz;
- unsigned memzone_flags = 0;
- const struct rte_memseg *ms;
-
- PMD_INIT_FUNC_TRACE();
- mz = rte_memzone_lookup(queue_name);
- if (mz != 0) {
- if (((size_t)queue_size <= mz->len) &&
- ((socket_id == SOCKET_ID_ANY) ||
- (socket_id == mz->socket_id))) {
- PMD_DRV_LOG(DEBUG, "re-use memzone already "
- "allocated for %s", queue_name);
- return mz;
- }
-
- PMD_DRV_LOG(ERR, "Incompatible memzone already "
- "allocated %s, size %u, socket %d. "
- "Requested size %u, socket %u",
- queue_name, (uint32_t)mz->len,
- mz->socket_id, queue_size, socket_id);
- return NULL;
- }
-
- PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
- queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
- switch (ms[0].hugepage_sz) {
- case(RTE_PGSIZE_2M):
- memzone_flags = RTE_MEMZONE_2MB;
- break;
- case(RTE_PGSIZE_1G):
- memzone_flags = RTE_MEMZONE_1GB;
- break;
- case(RTE_PGSIZE_16M):
- memzone_flags = RTE_MEMZONE_16MB;
- break;
- case(RTE_PGSIZE_16G):
- memzone_flags = RTE_MEMZONE_16GB;
- break;
- default:
- memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
- }
- return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
- memzone_flags, queue_size);
-}
-
-int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id, struct rte_mempool *session_pool __rte_unused)
-{
- struct qat_qp *qp;
- struct rte_pci_device *pci_dev;
- int ret;
- char op_cookie_pool_name[RTE_RING_NAMESIZE];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
-
- /* If qp is already in use free ring memory and qp metadata. */
- if (dev->data->queue_pairs[queue_pair_id] != NULL) {
- ret = qat_crypto_sym_qp_release(dev, queue_pair_id);
- if (ret < 0)
- return ret;
- }
-
- if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) ||
- (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
- PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
- qp_conf->nb_descriptors);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- if (pci_dev->mem_resource[0].addr == NULL) {
- PMD_DRV_LOG(ERR, "Could not find VF config space "
- "(UIO driver attached?).");
- return -EINVAL;
- }
-
- if (queue_pair_id >=
- (ADF_NUM_SYM_QPS_PER_BUNDLE *
- ADF_NUM_BUNDLES_PER_DEV)) {
- PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
- queue_pair_id);
- return -EINVAL;
- }
- /* Allocate the queue pair data structure. */
- qp = rte_zmalloc("qat PMD qp metadata",
- sizeof(*qp), RTE_CACHE_LINE_SIZE);
- if (qp == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
- return -ENOMEM;
- }
- qp->nb_descriptors = qp_conf->nb_descriptors;
- qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
- qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
- RTE_CACHE_LINE_SIZE);
- if (qp->op_cookies == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
- rte_free(qp);
- return -ENOMEM;
- }
-
- qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
- qp->inflights16 = 0;
-
- if (qat_tx_queue_create(dev, &(qp->tx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_INIT_LOG(ERR, "Tx queue create failed "
- "queue_pair_id=%u", queue_pair_id);
- goto create_err;
- }
-
- if (qat_rx_queue_create(dev, &(qp->rx_q),
- queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
- PMD_DRV_LOG(ERR, "Rx queue create failed "
- "queue_pair_id=%hu", queue_pair_id);
- qat_queue_delete(&(qp->tx_q));
- goto create_err;
- }
-
- adf_configure_queues(qp);
- adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
- snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- pci_dev->driver->driver.name, dev->data->dev_id,
- queue_pair_id);
-
- qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
- if (qp->op_cookie_pool == NULL)
- qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name,
- qp->nb_descriptors,
- sizeof(struct qat_crypto_op_cookie), 64, 0,
- NULL, NULL, NULL, NULL, socket_id,
- 0);
- if (!qp->op_cookie_pool) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot create"
- " op mempool");
- goto create_err;
- }
-
- for (i = 0; i < qp->nb_descriptors; i++) {
- if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
- PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
- goto create_err;
- }
-
- struct qat_crypto_op_cookie *sql_cookie =
- qp->op_cookies[i];
-
- sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_src);
-
- sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2iova(sql_cookie) +
- offsetof(struct qat_crypto_op_cookie,
- qat_sgl_list_dst);
- }
-
- struct qat_pmd_private *internals
- = dev->data->dev_private;
- qp->qat_dev_gen = internals->qat_dev_gen;
-
- dev->data->queue_pairs[queue_pair_id] = qp;
- return 0;
-
-create_err:
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
- rte_free(qp->op_cookies);
- rte_free(qp);
- return -EFAULT;
-}
-
-int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
-{
- struct qat_qp *qp =
- (struct qat_qp *)dev->data->queue_pairs[queue_pair_id];
- uint32_t i;
-
- PMD_INIT_FUNC_TRACE();
- if (qp == NULL) {
- PMD_DRV_LOG(DEBUG, "qp already freed");
- return 0;
- }
-
- /* Don't free memory if there are still responses to be processed */
- if (qp->inflights16 == 0) {
- qat_queue_delete(&(qp->tx_q));
- qat_queue_delete(&(qp->rx_q));
- } else {
- return -EAGAIN;
- }
-
- adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr);
-
- for (i = 0; i < qp->nb_descriptors; i++)
- rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]);
-
- if (qp->op_cookie_pool)
- rte_mempool_free(qp->op_cookie_pool);
-
- rte_free(qp->op_cookies);
- rte_free(qp);
- dev->data->queue_pairs[queue_pair_id] = NULL;
- return 0;
-}
-
-static int qat_tx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id,
- uint32_t nb_desc, int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_TX_QUEUE_STARTOFF;
- PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
-
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_TX_RING_DESC_SIZE, socket_id);
-}
-
-static int qat_rx_queue_create(struct rte_cryptodev *dev,
- struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc,
- int socket_id)
-{
- PMD_INIT_FUNC_TRACE();
- queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE;
- queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) +
- ADF_SYM_RX_QUEUE_STARTOFF;
-
- PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u",
- nb_desc, qp_id, queue->hw_bundle_number,
- queue->hw_queue_number);
- return qat_queue_create(dev, queue, nb_desc,
- ADF_SYM_RX_RING_DESC_SIZE, socket_id);
-}
-
-static void qat_queue_delete(struct qat_queue *queue)
-{
- const struct rte_memzone *mz;
- int status = 0;
-
- if (queue == NULL) {
- PMD_DRV_LOG(DEBUG, "Invalid queue");
- return;
- }
- mz = rte_memzone_lookup(queue->memz_name);
- if (mz != NULL) {
- /* Write an unused pattern to the queue memory. */
- memset(queue->base_addr, 0x7F, queue->queue_size);
- status = rte_memzone_free(mz);
- if (status != 0)
- PMD_DRV_LOG(ERR, "Error %d on freeing queue %s",
- status, queue->memz_name);
- } else {
- PMD_DRV_LOG(DEBUG, "queue %s doesn't exist",
- queue->memz_name);
- }
-}
-
-static int
-qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
- uint32_t nb_desc, uint8_t desc_size, int socket_id)
-{
- uint64_t queue_base;
- void *io_addr;
- const struct rte_memzone *qp_mz;
- uint32_t queue_size_bytes = nb_desc*desc_size;
- struct rte_pci_device *pci_dev;
-
- PMD_INIT_FUNC_TRACE();
- if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
- PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
- return -EINVAL;
- }
-
- pci_dev = RTE_DEV_TO_PCI(dev->device);
-
- /*
- * Allocate a memzone for the queue - create a unique name.
- */
- snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
- queue->hw_bundle_number, queue->hw_queue_number);
- qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
- socket_id);
- if (qp_mz == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
- return -ENOMEM;
- }
-
- queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->iova;
- if (qat_qp_check_queue_alignment(queue->base_phys_addr,
- queue_size_bytes)) {
- PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
- " 0x%"PRIx64"\n",
- queue->base_phys_addr);
- return -EFAULT;
- }
-
- if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
- != 0) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
-
- queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
- ADF_BYTES_TO_MSG_SIZE(desc_size));
- queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size);
- PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u,"
- " msg_size %u, max_inflights %u modulo %u",
- queue->queue_size, queue_size_bytes,
- nb_desc, desc_size, queue->max_inflights,
- queue->modulo);
-
- if (queue->max_inflights < 2) {
- PMD_DRV_LOG(ERR, "Invalid num inflights");
- return -EINVAL;
- }
- queue->head = 0;
- queue->tail = 0;
- queue->msg_size = desc_size;
-
- /*
- * Write an unused pattern to the queue memory.
- */
- memset(queue->base_addr, 0x7F, queue_size_bytes);
-
- queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
- queue->queue_size);
-
- io_addr = pci_dev->mem_resource[0].addr;
-
- WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_base);
- return 0;
-}
-
-static int qat_qp_check_queue_alignment(uint64_t phys_addr,
- uint32_t queue_size_bytes)
-{
- PMD_INIT_FUNC_TRACE();
- if (((queue_size_bytes - 1) & phys_addr) != 0)
- return -EINVAL;
- return 0;
-}
-
-static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num,
- uint32_t *p_queue_size_for_csr)
-{
- uint8_t i = ADF_MIN_RING_SIZE;
-
- PMD_INIT_FUNC_TRACE();
- for (; i <= ADF_MAX_RING_SIZE; i++)
- if ((msg_size * msg_num) ==
- (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) {
- *p_queue_size_for_csr = i;
- return 0;
- }
- PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
- return -EINVAL;
-}
-
-static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value |= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr)
-{
- uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET +
- (ADF_ARB_REG_SLOT *
- txq->hw_bundle_number);
- uint32_t value;
-
- PMD_INIT_FUNC_TRACE();
- value = ADF_CSR_RD(base_addr, arb_csr_offset);
- value ^= (0x01 << txq->hw_queue_number);
- ADF_CSR_WR(base_addr, arb_csr_offset, value);
-}
-
-static void adf_configure_queues(struct qat_qp *qp)
-{
- uint32_t queue_config;
- struct qat_queue *queue = &qp->tx_q;
-
- PMD_INIT_FUNC_TRACE();
- queue_config = BUILD_RING_CONFIG(queue->queue_size);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-
- queue = &qp->rx_q;
- queue_config =
- BUILD_RESP_RING_CONFIG(queue->queue_size,
- ADF_RING_NEAR_WATERMARK_512,
- ADF_RING_NEAR_WATERMARK_0);
-
- WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, queue_config);
-}
diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c
new file mode 100644
index 00000000..10cdf2e1
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.c
@@ -0,0 +1,569 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <openssl/evp.h>
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_crypto_sym.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+
+#include "qat_sym.h"
+
+/** Decrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_decrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_decrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed");
+ return -EINVAL;
+}
+
+
+static inline uint32_t
+qat_bpicipher_preprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
+
+ /* Decrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = last_block - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_decrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:",
+ dst, last_block_len);
+#endif
+ }
+
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen)
+{
+ int ret = 0;
+ struct qat_sym_session *ctx;
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_auth_req_params *auth_param;
+ register struct icp_qat_fw_la_bulk_req *qat_req;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
+ uint32_t cipher_len = 0, cipher_ofs = 0;
+ uint32_t auth_len = 0, auth_ofs = 0;
+ uint32_t min_ofs = 0;
+ uint64_t src_buf_start = 0, dst_buf_start = 0;
+ uint8_t do_sgl = 0;
+ struct rte_crypto_op *op = (struct rte_crypto_op *)in_op;
+ struct qat_sym_op_cookie *cookie =
+ (struct qat_sym_op_cookie *)op_cookie;
+
+ if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto "
+ "operation requests, op (%p) is not a "
+ "symmetric operation.", op);
+ return -EINVAL;
+ }
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports session oriented"
+ " requests, op (%p) is sessionless.", op);
+ return -EINVAL;
+ }
+
+ ctx = (struct qat_sym_session *)get_sym_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
+ QAT_DP_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) {
+ QAT_DP_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
+ qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
+ rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
+ qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+ cipher_param = (void *)&qat_req->serv_specif_rqpars;
+ auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+ if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM or AES-CCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
+ do_auth = 1;
+ do_cipher = 0;
+ } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
+ do_auth = 0;
+ do_cipher = 1;
+ }
+
+ if (do_cipher) {
+
+ if (ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 ||
+ ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI ||
+ ctx->qat_cipher_alg ==
+ ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
+
+ if (unlikely(
+ (op->sym->cipher.data.length % BYTE_LENGTH != 0) ||
+ (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ cipher_len = op->sym->cipher.data.length >> 3;
+ cipher_ofs = op->sym->cipher.data.offset >> 3;
+
+ } else if (ctx->bpi_ctx) {
+ /* DOCSIS - only send complete blocks to device
+ * Process any partial block using CFB mode.
+ * Even if 0 complete blocks, still send this to device
+ * to get into rx queue for post-process and dequeuing
+ */
+ cipher_len = qat_bpicipher_preprocess(ctx, op);
+ cipher_ofs = op->sym->cipher.data.offset;
+ } else {
+ cipher_len = op->sym->cipher.data.length;
+ cipher_ofs = op->sym->cipher.data.offset;
+ }
+
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = cipher_ofs;
+ }
+
+ if (do_auth) {
+
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) {
+ if (unlikely(
+ (op->sym->auth.data.offset % BYTE_LENGTH != 0) ||
+ (op->sym->auth.data.length % BYTE_LENGTH != 0))) {
+ QAT_DP_LOG(ERR,
+ "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ return -EINVAL;
+ }
+ auth_ofs = op->sym->auth.data.offset >> 3;
+ auth_len = op->sym->auth.data.length >> 3;
+
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
+ } else {
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ }
+ min_ofs = auth_ofs;
+
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
+
+ }
+
+ if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected
+ * memory or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad
+ * data, then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST -
+ ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(
+ aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET +
+ (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ min_ofs = op->sym->aead.data.offset;
+ }
+
+ if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
+ do_sgl = 1;
+
+ /* adjust for chain case */
+ if (do_cipher && do_auth)
+ min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs;
+
+ if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl))
+ min_ofs = 0;
+
+ if (unlikely(op->sym->m_dst != NULL)) {
+ /* Out-of-place operation (OOP)
+ * Don't align DMA start. DMA the minimum data-set
+ * so as not to overwrite data in dest buffer
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
+ dst_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
+
+ } else {
+ /* In-place operation
+ * Start DMA at nearest aligned address below min_ofs
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
+ & QAT_64_BTYE_ALIGN_MASK;
+
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
+ rte_pktmbuf_headroom(op->sym->m_src))
+ > src_buf_start)) {
+ /* alignment has pushed addr ahead of start of mbuf
+ * so revert and take the performance hit
+ */
+ src_buf_start =
+ rte_pktmbuf_iova_offset(op->sym->m_src,
+ min_ofs);
+ }
+ dst_buf_start = src_buf_start;
+ }
+
+ if (do_cipher || do_aead) {
+ cipher_param->cipher_offset =
+ (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, cipher_ofs) - src_buf_start;
+ cipher_param->cipher_length = cipher_len;
+ } else {
+ cipher_param->cipher_offset = 0;
+ cipher_param->cipher_length = 0;
+ }
+
+ if (do_auth || do_aead) {
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
+ op->sym->m_src, auth_ofs) - src_buf_start;
+ auth_param->auth_len = auth_len;
+ } else {
+ auth_param->auth_off = 0;
+ auth_param->auth_len = 0;
+ }
+
+ qat_req->comn_mid.dst_length =
+ qat_req->comn_mid.src_length =
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ > (auth_param->auth_off + auth_param->auth_len) ?
+ (cipher_param->cipher_offset + cipher_param->cipher_length)
+ : (auth_param->auth_off + auth_param->auth_len);
+
+ if (do_sgl) {
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+ ret = qat_sgl_fill_array(op->sym->m_src,
+ (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)),
+ &cookie->qat_sgl_src,
+ qat_req->comn_mid.src_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array");
+ return ret;
+ }
+
+ if (likely(op->sym->m_dst == NULL))
+ qat_req->comn_mid.dest_data_addr =
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ else {
+ ret = qat_sgl_fill_array(op->sym->m_dst,
+ (int64_t)(dst_buf_start -
+ rte_pktmbuf_iova(op->sym->m_dst)),
+ &cookie->qat_sgl_dst,
+ qat_req->comn_mid.dst_length,
+ QAT_SYM_SGL_MAX_NUMBER);
+
+ if (unlikely(ret)) {
+ QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array");
+ return ret;
+ }
+
+ qat_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ qat_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ }
+ } else {
+ qat_req->comn_mid.src_data_addr = src_buf_start;
+ qat_req->comn_mid.dest_data_addr = dst_buf_start;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req,
+ sizeof(struct icp_qat_fw_la_bulk_req));
+ QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:",
+ rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
+#endif
+ return 0;
+}
diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h
new file mode 100644
index 00000000..bc6426c3
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_H_
+#define _QAT_SYM_H_
+
+#include <rte_cryptodev_pmd.h>
+
+#ifdef BUILD_QAT_SYM
+#include <openssl/evp.h>
+
+#include "qat_common.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+#include "qat_logs.h"
+
+#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
+
+/*
+ * Maximum number of SGL entries
+ */
+#define QAT_SYM_SGL_MAX_NUMBER 16
+
+struct qat_sym_session;
+
+struct qat_sym_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_op_cookie {
+ struct qat_sym_sgl qat_sgl_src;
+ struct qat_sym_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+int
+qat_sym_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie, enum qat_device_gen qat_dev_gen);
+
+
+/** Encrypt a single partial block
+ * Depends on openssl libcrypto
+ * Uses ECB+XOR to do CFB encryption, same result, more performant
+ */
+static inline int
+bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
+ uint8_t *iv, int ivlen, int srclen,
+ void *bpi_ctx)
+{
+ EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
+ int encrypted_ivlen;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
+
+ /* ECB method: encrypt the IV, then XOR this with plaintext */
+ if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
+ <= 0)
+ goto cipher_encrypt_err;
+
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
+
+ return 0;
+
+cipher_encrypt_err:
+ QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed");
+ return -EINVAL;
+}
+
+static inline uint32_t
+qat_bpicipher_postprocess(struct qat_sym_session *ctx,
+ struct rte_crypto_op *op)
+{
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
+
+ if (last_block_len > 0 &&
+ ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
+
+ /* Encrypt last block */
+ uint8_t *last_block, *dst, *iv;
+ uint32_t last_block_offset;
+
+ last_block_offset = sym_op->cipher.data.offset +
+ sym_op->cipher.data.length - last_block_len;
+ last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src,
+ uint8_t *, last_block_offset);
+
+ if (unlikely(sym_op->m_dst != NULL))
+ /* out-of-place operation (OOP) */
+ dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst,
+ uint8_t *, last_block_offset);
+ else
+ dst = last_block;
+
+ if (last_block_len < sym_op->cipher.data.length)
+ /* use previous block ciphertext as IV */
+ iv = dst - block_len;
+ else
+ /* runt block, i.e. less than one full block */
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst before post-process:",
+ dst, last_block_len);
+#endif
+ bpi_cipher_encrypt(last_block, dst, iv, block_len,
+ last_block_len, ctx->bpi_ctx);
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:",
+ last_block, last_block_len);
+ if (sym_op->m_dst != NULL)
+ QAT_DP_HEXDUMP_LOG(DEBUG,
+ "BPI: dst after post-process:",
+ dst, last_block_len);
+#endif
+ }
+ return sym_op->cipher.data.length - last_block_len;
+}
+
+static inline void
+qat_sym_process_response(void **op, uint8_t *resp)
+{
+
+ struct icp_qat_fw_comn_resp *resp_msg =
+ (struct icp_qat_fw_comn_resp *)resp;
+ struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comn_resp));
+#endif
+
+ if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
+ ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
+ resp_msg->comn_hdr.comn_status)) {
+
+ rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ struct qat_sym_session *sess = (struct qat_sym_session *)
+ get_sym_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
+
+ if (sess->bpi_ctx)
+ qat_bpicipher_postprocess(sess, rx_op);
+ rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ *op = (void *)rx_op;
+}
+#else
+
+static inline void
+qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused)
+{
+}
+#endif
+#endif /* _QAT_SYM_H_ */
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index 37a6b7cb..eea08bc7 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -1,9 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
+ * Copyright(c) 2017-2018 Intel Corporation
*/
-#ifndef _QAT_CRYPTO_CAPABILITIES_H_
-#define _QAT_CRYPTO_CAPABILITIES_H_
+#ifndef _QAT_SYM_CAPABILITIES_H_
+#define _QAT_SYM_CAPABILITIES_H_
#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
@@ -434,7 +434,7 @@
.algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
.block_size = 8, \
.key_size = { \
- .min = 16, \
+ .min = 8, \
.max = 24, \
.increment = 8 \
}, \
@@ -554,4 +554,4 @@
}, } \
}
-#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */
+#endif /* _QAT_SYM_CAPABILITIES_H_ */
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
new file mode 100644
index 00000000..96f442e8
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+#include <rte_pci.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_logs.h"
+#include "qat_sym.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
+
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+
+static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev)
+{
+ return;
+}
+
+static int qat_sym_dev_close(struct rte_cryptodev *dev)
+{
+ int i, ret;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_sym_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void qat_sym_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[internals->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = internals->qat_dev_capabilities;
+ info->driver_id = cryptodev_qat_driver_id;
+ /* No limit of number of sessions */
+ info->sym.max_nb_sessions = 0;
+ }
+}
+
+static void qat_sym_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_sym_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void qat_sym_stats_reset(struct rte_cryptodev *dev)
+{
+ struct qat_sym_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid cryptodev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC);
+
+}
+
+static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release sym qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_sym_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *sym_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_SYMMETRIC];
+ const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_sym_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_sym_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie);
+ qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "sym";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_sym_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_sym_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_cryptodev_ops crypto_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_sym_dev_config,
+ .dev_start = qat_sym_dev_start,
+ .dev_stop = qat_sym_dev_stop,
+ .dev_close = qat_sym_dev_close,
+ .dev_infos_get = qat_sym_dev_info_get,
+
+ .stats_get = qat_sym_stats_get,
+ .stats_reset = qat_sym_stats_reset,
+ .queue_pair_setup = qat_sym_qp_setup,
+ .queue_pair_release = qat_sym_qp_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = qat_sym_session_get_private_size,
+ .sym_session_configure = qat_sym_session_configure,
+ .sym_session_clear = qat_sym_session_clear
+};
+
+static uint16_t
+qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+/* An rte_driver is needed in the registration of both the device and the driver
+ * with cryptodev.
+ * The actual qat pci's rte_driver can't be used as its name represents
+ * the whole pci device with all services. Think of this as a holder for a name
+ * for the crypto part of the pci device.
+ */
+static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD);
+static const struct rte_driver cryptodev_qat_sym_driver = {
+ .name = qat_sym_drv_name,
+ .alias = qat_sym_drv_name
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ .private_data_size = sizeof(struct qat_sym_dev_private)
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ struct rte_cryptodev *cryptodev;
+ struct qat_sym_dev_private *internals;
+
+ snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "sym");
+ QAT_LOG(DEBUG, "Creating QAT SYM device %s", name);
+
+ /* Populate subset device to use in cryptodev device creation */
+ qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver;
+ qat_pci_dev->sym_rte_dev.numa_node =
+ qat_pci_dev->pci_dev->device.numa_node;
+ qat_pci_dev->sym_rte_dev.devargs = NULL;
+
+ cryptodev = rte_cryptodev_pmd_create(name,
+ &(qat_pci_dev->sym_rte_dev), &init_params);
+
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ qat_pci_dev->sym_rte_dev.name = cryptodev->data->name;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
+ cryptodev->dev_ops = &crypto_qat_ops;
+
+ cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst;
+ cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ internals = cryptodev->data->dev_private;
+ internals->qat_dev = qat_pci_dev;
+ qat_pci_dev->sym_dev = internals;
+
+ internals->sym_dev_id = cryptodev->data->dev_id;
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
+ break;
+ case QAT_GEN2:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ break;
+ default:
+ internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN2",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d",
+ cryptodev->data->name, internals->sym_dev_id);
+ return 0;
+}
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+ if (qat_pci_dev->sym_dev == NULL)
+ return 0;
+
+ /* free crypto device */
+ cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id);
+ rte_cryptodev_pmd_destroy(cryptodev);
+ qat_pci_dev->sym_rte_dev.name = NULL;
+ qat_pci_dev->sym_dev = NULL;
+
+ return 0;
+}
+
+
+static struct cryptodev_driver qat_crypto_drv;
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv,
+ cryptodev_qat_sym_driver,
+ cryptodev_qat_driver_id);
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
new file mode 100644
index 00000000..d3432854
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_pmd.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_SYM_PMD_H_
+#define _QAT_SYM_PMD_H_
+
+#ifdef BUILD_QAT_SYM
+
+#include <rte_cryptodev.h>
+
+#include "qat_sym_capabilities.h"
+#include "qat_device.h"
+
+/**< Intel(R) QAT Symmetric Crypto PMD device name */
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+
+extern uint8_t cryptodev_qat_driver_id;
+
+/** private data structure for a QAT device.
+ * This QAT device is a device offering only symmetric crypto service,
+ * there can be one of these on each qat_pci_device (VF),
+ * in future there may also be private data structures for other services.
+ */
+struct qat_sym_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ uint8_t sym_dev_id;
+ /**< Device instance for this rte_cryptodev */
+ const struct rte_cryptodev_capabilities *qat_dev_capabilities;
+ /* QAT device symmetric crypto capabilities */
+};
+
+int
+qat_sym_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_SYM_PMD_H_ */
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_sym_session.c
index 26f854c2..1d58220a 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -1,50 +1,12 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
+#include <openssl/sha.h> /* Needed to calculate pre-compute values */
+#include <openssl/aes.h> /* Needed to calculate pre-compute values */
+#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+#include <openssl/evp.h> /* Needed for bpi runt block processing */
+
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_spinlock.h>
@@ -53,13 +15,714 @@
#include <rte_malloc.h>
#include <rte_crypto_sym.h>
-#include "../qat_logs.h"
+#include "qat_logs.h"
+#include "qat_sym_session.h"
+#include "qat_sym_pmd.h"
-#include <openssl/sha.h> /* Needed to calculate pre-compute values */
-#include <openssl/aes.h> /* Needed to calculate pre-compute values */
-#include <openssl/md5.h> /* Needed to calculate pre-compute values */
+/** Frees a context previously created
+ * Depends on openssl libcrypto
+ */
+static void
+bpi_cipher_ctx_free(void *bpi_ctx)
+{
+ if (bpi_ctx != NULL)
+ EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx);
+}
+
+/** Creates a context in either AES or DES in ECB mode
+ * Depends on openssl libcrypto
+ */
+static int
+bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
+ enum rte_crypto_cipher_operation direction __rte_unused,
+ uint8_t *key, void **ctx)
+{
+ const EVP_CIPHER *algo = NULL;
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
+
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
+ goto ctx_init_err;
+ }
+
+ if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
+ algo = EVP_des_ecb();
+ else
+ algo = EVP_aes_128_ecb();
+
+ /* IV will be ECB encrypted whether direction is encrypt or decrypt*/
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
+ goto ctx_init_err;
+ }
+
+ return 0;
+
+ctx_init_err:
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
+}
+
+static int
+qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
+ continue;
+
+ if (capability->sym.cipher.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+static int
+qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo,
+ struct qat_sym_dev_private *internals)
+{
+ int i = 0;
+ const struct rte_cryptodev_capabilities *capability;
+
+ while ((capability = &(internals->qat_dev_capabilities[i++]))->op !=
+ RTE_CRYPTO_OP_TYPE_UNDEFINED) {
+ if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)
+ continue;
+
+ if (capability->sym.auth.algo == algo)
+ return 1;
+ }
+ return 0;
+}
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct qat_sym_session *s = (struct qat_sym_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_sym_session_get_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
+ return ICP_QAT_FW_LA_CMD_AUTH;
+
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ }
+
+ if (xform->next == NULL)
+ return -1;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+
+ return -1;
+}
+
+static struct rte_crypto_auth_xform *
+qat_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_cipher_xform *
+qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = qat_get_cipher_xform(xform);
+
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ if (qat_sym_validate_aes_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ if (qat_sym_validate_snow3g_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ if (qat_sym_validate_kasumi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CTR:
+ if (qat_sym_validate_3des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create DES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_des_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
+ ret = bpi_cipher_ctx_init(
+ cipher_xform->algo,
+ cipher_xform->op,
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
+ QAT_LOG(ERR, "failed to create AES BPI ctx");
+ goto error_out;
+ }
+ if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ if (!qat_is_cipher_alg_supported(
+ cipher_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_cipher_algorithm_strings
+ [cipher_xform->algo]);
+ ret = -ENOTSUP;
+ goto error_out;
+ }
+ if (qat_sym_validate_zuc_key(cipher_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
+ goto error_out;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_F8:
+ case RTE_CRYPTO_CIPHER_AES_XTS:
+ case RTE_CRYPTO_CIPHER_ARC4:
+ QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ ret = -ENOTSUP;
+ goto error_out;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ else
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ cipher_xform->key.data,
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
+ goto error_out;
+ }
+
+ return 0;
+
+error_out:
+ if (session->bpi_ctx) {
+ bpi_cipher_ctx_free(session->bpi_ctx);
+ session->bpi_ctx = NULL;
+ }
+ return ret;
+}
-#include "qat_algs.h"
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_sym_session_set_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ QAT_LOG(ERR,
+ "Crypto QAT PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private)
+{
+ struct qat_sym_session *session = session_private;
+ int ret;
+ int qat_cmd_id;
+
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2iova(session) +
+ offsetof(struct qat_sym_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
+ /* Get requested QAT command id */
+ qat_cmd_id = qat_get_cmd_id(xform);
+ if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
+ QAT_LOG(ERR, "Unsupported xform chain requested");
+ return -ENOTSUP;
+ }
+ session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
+ switch (session->qat_cmd) {
+ case ICP_QAT_FW_LA_CMD_CIPHER:
+ ret = qat_sym_session_configure_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_AUTH:
+ ret = qat_sym_session_configure_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
+ break;
+ case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_sym_session_configure_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_sym_session_configure_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_sym_session_configure_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
+ case ICP_QAT_FW_LA_CMD_TRNG_TEST:
+ case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE:
+ case ICP_QAT_FW_LA_CMD_MGF1:
+ case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP:
+ case ICP_QAT_FW_LA_CMD_DELIMITER:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Unsupported Service %u",
+ session->qat_cmd);
+ return -ENOTSUP;
+ }
+
+ return 0;
+}
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform);
+ struct qat_sym_dev_private *internals = dev->data->dev_private;
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512;
+ break;
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_sym_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
+ break;
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) {
+ QAT_LOG(ERR, "%s not supported on this device",
+ rte_crypto_auth_algorithm_strings
+ [auth_xform->algo]);
+ return -ENOTSUP;
+ }
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
+ auth_xform->algo);
+ return -ENOTSUP;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -EINVAL;
+ }
+
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
+ if (qat_sym_session_aead_create_cd_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ if (qat_sym_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ QAT_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
+ default:
+ QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+ } else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ if (qat_sym_session_aead_create_cd_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ crypto_operation))
+ return -EINVAL;
+
+ if (qat_sym_session_aead_create_cd_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+ }
+
+ session->digest_length = aead_xform->digest_length;
+ return 0;
+}
+
+unsigned int qat_sym_session_get_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8);
+}
/* returns block size in bytes per cipher algo */
int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
@@ -74,7 +737,7 @@ int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
case ICP_QAT_HW_CIPHER_ALGO_AES256:
return ICP_QAT_HW_AES_BLK_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
+ QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
return -EFAULT;
};
return -EFAULT;
@@ -121,18 +784,18 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_NULL:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -158,7 +821,7 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -186,7 +849,7 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
/* return maximum block size in this case */
return SHA512_CBLOCK;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
return -EFAULT;
};
return -EFAULT;
@@ -270,7 +933,6 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
uint64_t *hash_state_out_be64;
int i;
- PMD_INIT_FUNC_TRACE();
digest_size = qat_hash_get_digest_size(hash_alg);
if (digest_size <= 0)
return -EFAULT;
@@ -319,7 +981,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
return -EFAULT;
break;
default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
+ QAT_LOG(ERR, "invalid hash alg %u", hash_alg);
return -EFAULT;
}
@@ -329,7 +991,7 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
#define HMAC_OPAD_VALUE 0x5c
#define HASH_XCBC_PRECOMP_KEY_NUM 3
-static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
+static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
const uint8_t *auth_key,
uint16_t auth_keylen,
uint8_t *p_state_buf,
@@ -340,7 +1002,6 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
int i;
- PMD_INIT_FUNC_TRACE();
if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
static uint8_t qat_aes_xcbc_key_seed[
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
@@ -360,7 +1021,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -395,7 +1056,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_GALOIS_H_SZ, 16);
if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ QAT_LOG(ERR, "Failed to alloc memory");
return -ENOMEM;
}
@@ -420,7 +1081,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
memset(opad, 0, block_size);
if (auth_keylen > (unsigned int)block_size) {
- PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
+ QAT_LOG(ERR, "invalid keylen %u", auth_keylen);
return -EFAULT;
}
rte_memcpy(ipad, auth_key, auth_keylen);
@@ -437,7 +1098,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "ipad precompute failed");
+ QAT_LOG(ERR, "ipad precompute failed");
return -EFAULT;
}
@@ -449,7 +1110,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
memset(ipad, 0, block_size);
memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "opad precompute failed");
+ QAT_LOG(ERR, "opad precompute failed");
return -EFAULT;
}
@@ -459,10 +1120,10 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
return 0;
}
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags)
+static void
+qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags)
{
- PMD_INIT_FUNC_TRACE();
header->hdr_flags =
ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
@@ -508,11 +1169,11 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
* and set its protocol flag in both cipher and auth part of content
* descriptor building function
*/
-static enum qat_crypto_proto_flag
+static enum qat_sym_proto_flag
qat_get_crypto_proto_flag(uint16_t flags)
{
int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
switch (proto) {
@@ -527,7 +1188,7 @@ qat_get_crypto_proto_flag(uint16_t flags)
return qat_proto_flag;
}
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc,
uint8_t *cipherkey,
uint32_t cipherkeylen)
{
@@ -539,13 +1200,12 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
enum icp_qat_hw_cipher_convert key_convert;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
uint32_t total_key_size;
uint16_t cipher_offset, cd_size;
uint32_t wordIndex = 0;
uint32_t *temp_key = NULL;
- PMD_INIT_FUNC_TRACE();
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
@@ -570,7 +1230,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
+ QAT_LOG(ERR, "Invalid param, must be a cipher command.");
return -EFAULT;
}
@@ -631,7 +1291,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
header->service_cmd_id = cdesc->qat_cmd;
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
cipher->cipher_config.val =
@@ -662,11 +1322,19 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
if (total_key_size > cipherkeylen) {
uint32_t padding_size = total_key_size-cipherkeylen;
if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
- && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) {
/* K3 not provided so use K1 = K3*/
memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
- else
+ } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
+ && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) {
+ /* K2 and K3 not provided so use K1 = K2 = K3*/
+ memcpy(cdesc->cd_cur_ptr, cipherkey,
+ cipherkeylen);
+ memcpy(cdesc->cd_cur_ptr+cipherkeylen,
+ cipherkey, cipherkeylen);
+ } else
memset(cdesc->cd_cur_ptr, 0, padding_size);
+
cdesc->cd_cur_ptr += padding_size;
}
cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
@@ -675,7 +1343,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
return 0;
}
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
+int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
uint32_t aad_length,
@@ -699,11 +1367,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint32_t *aad_len = NULL;
uint32_t wordIndex = 0;
uint32_t *pTempKey;
- enum qat_crypto_proto_flag qat_proto_flag =
+ enum qat_sym_proto_flag qat_proto_flag =
QAT_CRYPTO_PROTO_FLAG_NONE;
- PMD_INIT_FUNC_TRACE();
-
if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
ICP_QAT_FW_SLICE_AUTH);
@@ -721,7 +1387,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_FW_SLICE_DRAM_WR);
cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
} else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
+ QAT_LOG(ERR, "Invalid param, must be a hash command.");
return -EFAULT;
}
@@ -764,51 +1430,51 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
*/
switch (cdesc->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
+ QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
+ QAT_LOG(ERR, "(XCBC)precompute failed");
return -EFAULT;
}
break;
@@ -816,10 +1482,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
&state2_size)) {
- PMD_DRV_LOG(ERR, "(GCM)precompute failed");
+ QAT_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
/*
@@ -876,10 +1542,10 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
authkey, authkeylen, cdesc->cd_cur_ptr,
&state1_size)) {
- PMD_DRV_LOG(ERR, "(MD5)precompute failed");
+ QAT_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
@@ -898,14 +1564,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
if (aad_length > 0) {
aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(aad_length,
- ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
} else {
auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
}
-
cdesc->aad_len = aad_length;
hash->auth_counter.counter = 0;
@@ -935,12 +1600,12 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
break;
default:
- PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
+ QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
return -EFAULT;
}
/* Request template setup */
- qat_alg_init_common_hdr(header, qat_proto_flag);
+ qat_sym_session_init_common_hdr(header, qat_proto_flag);
header->service_cmd_id = cdesc->qat_cmd;
/* Auth CD config setup */
@@ -966,7 +1631,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
return 0;
}
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_AES_128_KEY_SZ:
@@ -984,7 +1649,7 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
+int qat_sym_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
@@ -997,7 +1662,7 @@ int qat_alg_validate_aes_docsisbpi_key(int key_len,
return 0;
}
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
@@ -1009,7 +1674,7 @@ int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_KASUMI_KEY_SZ:
@@ -1021,7 +1686,7 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_DES_KEY_SZ:
@@ -1033,11 +1698,12 @@ int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case QAT_3DES_KEY_SZ_OPT1:
case QAT_3DES_KEY_SZ_OPT2:
+ case QAT_3DES_KEY_SZ_OPT3:
*alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
break;
default:
@@ -1046,7 +1712,7 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
return 0;
}
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
+int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
{
switch (key_len) {
case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
new file mode 100644
index 00000000..e8f51e5b
--- /dev/null
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+#ifndef _QAT_SYM_SESSION_H_
+#define _QAT_SYM_SESSION_H_
+
+#include <rte_crypto.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+/*
+ * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
+ * Integrity Key (IK)
+ */
+#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
+
+#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
+
+/* 3DES key sizes */
+#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
+#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
+#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */
+
+
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
+ ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
+
+enum qat_sym_proto_flag {
+ QAT_CRYPTO_PROTO_FLAG_NONE = 0,
+ QAT_CRYPTO_PROTO_FLAG_CCM = 1,
+ QAT_CRYPTO_PROTO_FLAG_GCM = 2,
+ QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
+ QAT_CRYPTO_PROTO_FLAG_ZUC = 4
+};
+
+/* Common content descriptor */
+struct qat_sym_cd {
+ struct icp_qat_hw_cipher_algo_blk cipher;
+ struct icp_qat_hw_auth_algo_blk hash;
+} __rte_packed __rte_cache_aligned;
+
+struct qat_sym_session {
+ enum icp_qat_fw_la_cmd_id qat_cmd;
+ enum icp_qat_hw_cipher_algo qat_cipher_alg;
+ enum icp_qat_hw_cipher_dir qat_dir;
+ enum icp_qat_hw_cipher_mode qat_mode;
+ enum icp_qat_hw_auth_algo qat_hash_alg;
+ enum icp_qat_hw_auth_op auth_op;
+ void *bpi_ctx;
+ struct qat_sym_cd cd;
+ uint8_t *cd_cur_ptr;
+ phys_addr_t cd_paddr;
+ struct icp_qat_fw_la_bulk_req fw_req;
+ uint8_t aad_len;
+ struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
+ rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
+};
+
+int
+qat_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+int
+qat_sym_session_set_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *session_private);
+
+int
+qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_cipher(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_configure_auth(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct qat_sym_session *session);
+
+int
+qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd,
+ uint8_t *enckey,
+ uint32_t enckeylen);
+
+int
+qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
+ uint8_t *authkey,
+ uint32_t authkeylen,
+ uint32_t aad_length,
+ uint32_t digestsize,
+ unsigned int operation);
+
+void
+qat_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
+
+unsigned int
+qat_sym_session_get_private_size(struct rte_cryptodev *dev);
+
+void
+qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_sym_proto_flag proto_flags);
+int
+qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_aes_docsisbpi_key(int key_len,
+ enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+int
+qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
+int
+qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
+
+#endif /* _QAT_SYM_SESSION_H_ */
diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map
deleted file mode 100644
index bbaf1c85..00000000
--- a/drivers/crypto/qat/rte_pmd_qat_version.map
+++ /dev/null
@@ -1,3 +0,0 @@
-DPDK_2.2 {
- local: *;
-}; \ No newline at end of file
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
deleted file mode 100644
index bf837401..00000000
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
- */
-
-#include <rte_bus_pci.h>
-#include <rte_common.h>
-#include <rte_dev.h>
-#include <rte_malloc.h>
-#include <rte_pci.h>
-#include <rte_cryptodev_pmd.h>
-
-#include "qat_crypto.h"
-#include "qat_logs.h"
-
-uint8_t cryptodev_qat_driver_id;
-
-static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
- QAT_BASE_GEN1_SYM_CAPABILITIES,
- QAT_EXTRA_GEN2_SYM_CAPABILITIES,
- RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
-};
-
-static struct rte_cryptodev_ops crypto_qat_ops = {
-
- /* Device related operations */
- .dev_configure = qat_dev_config,
- .dev_start = qat_dev_start,
- .dev_stop = qat_dev_stop,
- .dev_close = qat_dev_close,
- .dev_infos_get = qat_dev_info_get,
-
- .stats_get = qat_crypto_sym_stats_get,
- .stats_reset = qat_crypto_sym_stats_reset,
- .queue_pair_setup = qat_crypto_sym_qp_setup,
- .queue_pair_release = qat_crypto_sym_qp_release,
- .queue_pair_start = NULL,
- .queue_pair_stop = NULL,
- .queue_pair_count = NULL,
-
- /* Crypto related operations */
- .session_get_size = qat_crypto_sym_get_session_private_size,
- .session_configure = qat_crypto_sym_configure_session,
- .session_clear = qat_crypto_sym_clear_session
-};
-
-/*
- * The set of PCI devices this driver supports
- */
-
-static const struct rte_pci_id pci_id_qat_map[] = {
- {
- RTE_PCI_DEVICE(0x8086, 0x0443),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x37c9),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x19e3),
- },
- {
- RTE_PCI_DEVICE(0x8086, 0x6f55),
- },
- {.device_id = 0},
-};
-
-static int
-crypto_qat_create(const char *name, struct rte_pci_device *pci_dev,
- struct rte_cryptodev_pmd_init_params *init_params)
-{
- struct rte_cryptodev *cryptodev;
- struct qat_pmd_private *internals;
-
- PMD_INIT_FUNC_TRACE();
-
- cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
- init_params);
- if (cryptodev == NULL)
- return -ENODEV;
-
- cryptodev->driver_id = cryptodev_qat_driver_id;
- cryptodev->dev_ops = &crypto_qat_ops;
-
- cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
- cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst;
-
- cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
- RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
-
- internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = init_params->max_nb_sessions;
- switch (pci_dev->id.device_id) {
- case 0x0443:
- internals->qat_dev_gen = QAT_GEN1;
- internals->qat_dev_capabilities = qat_gen1_capabilities;
- break;
- case 0x37c9:
- case 0x19e3:
- case 0x6f55:
- internals->qat_dev_gen = QAT_GEN2;
- internals->qat_dev_capabilities = qat_gen2_capabilities;
- break;
- default:
- PMD_DRV_LOG(ERR,
- "Invalid dev_id, can't determine capabilities");
- break;
- }
-
- /*
- * For secondary processes, we don't initialise any further as primary
- * has already done this work. Only check we don't need a different
- * RX function
- */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_DRV_LOG(DEBUG, "Device already initialised by primary process");
- return 0;
- }
-
- return 0;
-}
-
-static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev_pmd_init_params init_params = {
- .name = "",
- .socket_id = rte_socket_id(),
- .private_data_size = sizeof(struct qat_pmd_private),
- .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
- };
- char name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
- pci_dev->addr.bus,
- pci_dev->addr.devid,
- pci_dev->addr.function);
-
- rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
-
- return crypto_qat_create(name, pci_dev, &init_params);
-}
-
-static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
-{
- struct rte_cryptodev *cryptodev;
- char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
-
- if (pci_dev == NULL)
- return -EINVAL;
-
- rte_pci_device_name(&pci_dev->addr, cryptodev_name,
- sizeof(cryptodev_name));
-
- cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
- if (cryptodev == NULL)
- return -ENODEV;
-
- /* free crypto device */
- return rte_cryptodev_pmd_destroy(cryptodev);
-}
-
-static struct rte_pci_driver rte_qat_pmd = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = crypto_qat_pci_probe,
- .remove = crypto_qat_pci_remove
-};
-
-static struct cryptodev_driver qat_crypto_drv;
-
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
-RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
- cryptodev_qat_driver_id);