summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat/qat_adf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_adf')
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h176
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw.h316
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw_la.h404
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h329
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h169
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c1059
6 files changed, 0 insertions, 2453 deletions
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
deleted file mode 100644
index 4f8f3d13..00000000
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
-#define ADF_TRANSPORT_ACCESS_MACROS_H
-
-#include <rte_io.h>
-
-/* CSR write macro */
-#define ADF_CSR_WR(csrAddr, csrOffset, val) \
- rte_write32(val, (((uint8_t *)csrAddr) + csrOffset))
-
-/* CSR read macro */
-#define ADF_CSR_RD(csrAddr, csrOffset) \
- rte_read32((((uint8_t *)csrAddr) + csrOffset))
-
-#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
-#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
-#define ADF_RING_CSR_RING_CONFIG 0x000
-#define ADF_RING_CSR_RING_LBASE 0x040
-#define ADF_RING_CSR_RING_UBASE 0x080
-#define ADF_RING_CSR_RING_HEAD 0x0C0
-#define ADF_RING_CSR_RING_TAIL 0x100
-#define ADF_RING_CSR_E_STAT 0x14C
-#define ADF_RING_CSR_INT_SRCSEL 0x174
-#define ADF_RING_CSR_INT_SRCSEL_2 0x178
-#define ADF_RING_CSR_INT_COL_EN 0x17C
-#define ADF_RING_CSR_INT_COL_CTL 0x180
-#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
-#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
-#define ADF_RING_BUNDLE_SIZE 0x1000
-#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
-#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
-#define ADF_COALESCING_MIN_TIME 0x1FF
-#define ADF_COALESCING_MAX_TIME 0xFFFFF
-#define ADF_COALESCING_DEF_TIME 0x27FF
-#define ADF_RING_NEAR_WATERMARK_512 0x08
-#define ADF_RING_NEAR_WATERMARK_0 0x00
-#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
-#define ADF_RING_EMPTY_SIG_BYTE 0x7F
-
-/* Valid internal ring size values */
-#define ADF_RING_SIZE_128 0x01
-#define ADF_RING_SIZE_256 0x02
-#define ADF_RING_SIZE_512 0x03
-#define ADF_RING_SIZE_4K 0x06
-#define ADF_RING_SIZE_16K 0x08
-#define ADF_RING_SIZE_4M 0x10
-#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
-#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
-#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
-
-#define ADF_NUM_BUNDLES_PER_DEV 1
-#define ADF_NUM_SYM_QPS_PER_BUNDLE 2
-
-/* Valid internal msg size values */
-#define ADF_MSG_SIZE_32 0x01
-#define ADF_MSG_SIZE_64 0x02
-#define ADF_MSG_SIZE_128 0x04
-#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
-#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
-
-/* Size to bytes conversion macros for ring and msg size values */
-#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
-#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
-#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
-#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-
-/* Minimum ring bufer size for memory allocation */
-#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
- ADF_RING_SIZE_4K : SIZE)
-#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
-#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
- SIZE) & ~0x4)
-/* Max outstanding requests */
-#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
- ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1)
-#define BUILD_RING_CONFIG(size) \
- ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
- | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
- | size)
-#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
- ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
- | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
- | size)
-#define BUILD_RING_BASE_ADDR(addr, size) \
- ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
-#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_HEAD + (ring << 2))
-#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_TAIL + (ring << 2))
-#define READ_CSR_E_STAT(csr_base_addr, bank) \
- ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_E_STAT)
-#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
-#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
-do { \
- uint32_t l_base = 0, u_base = 0; \
- l_base = (uint32_t)(value & 0xFFFFFFFF); \
- u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
-} while (0)
-#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_HEAD + (ring << 2), value)
-#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_RING_TAIL + (ring << 2), value)
-#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
-do { \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
-} while (0)
-#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_COL_EN, value)
-#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_COL_CTL, \
- ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
-#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
- ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
- ADF_RING_CSR_INT_FLAG_AND_COL, value)
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
deleted file mode 100644
index 5de34d55..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_FW_H_
-#define _ICP_QAT_FW_H_
-#include <sys/types.h>
-#include "icp_qat_hw.h"
-
-#define QAT_FIELD_SET(flags, val, bitpos, mask) \
-{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
- (((val) & (mask)) << (bitpos))) ; }
-
-#define QAT_FIELD_GET(flags, bitpos, mask) \
- (((flags) >> (bitpos)) & (mask))
-
-#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
-#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
-#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
-#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
-#define ICP_QAT_FW_NUM_LONGWORDS_1 1
-#define ICP_QAT_FW_NUM_LONGWORDS_2 2
-#define ICP_QAT_FW_NUM_LONGWORDS_3 3
-#define ICP_QAT_FW_NUM_LONGWORDS_4 4
-#define ICP_QAT_FW_NUM_LONGWORDS_5 5
-#define ICP_QAT_FW_NUM_LONGWORDS_6 6
-#define ICP_QAT_FW_NUM_LONGWORDS_7 7
-#define ICP_QAT_FW_NUM_LONGWORDS_10 10
-#define ICP_QAT_FW_NUM_LONGWORDS_13 13
-#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
-
-enum icp_qat_fw_comn_resp_serv_id {
- ICP_QAT_FW_COMN_RESP_SERV_NULL,
- ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
- ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
-};
-
-enum icp_qat_fw_comn_request_id {
- ICP_QAT_FW_COMN_REQ_NULL = 0,
- ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
- ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
- ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
- ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
- ICP_QAT_FW_COMN_REQ_DELIMITER
-};
-
-struct icp_qat_fw_comn_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t serv_specif_fields[4];
- } s1;
- } u;
-};
-
-struct icp_qat_fw_comn_req_mid {
- uint64_t opaque_data;
- uint64_t src_data_addr;
- uint64_t dest_data_addr;
- uint32_t src_length;
- uint32_t dst_length;
-};
-
-struct icp_qat_fw_comn_req_cd_ctrl {
- uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
-};
-
-struct icp_qat_fw_comn_req_hdr {
- uint8_t resrvd1;
- uint8_t service_cmd_id;
- uint8_t service_type;
- uint8_t hdr_flags;
- uint16_t serv_specif_flags;
- uint16_t comn_req_flags;
-};
-
-struct icp_qat_fw_comn_req_rqpars {
- uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
-};
-
-struct icp_qat_fw_comn_req {
- struct icp_qat_fw_comn_req_hdr comn_hdr;
- struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
- struct icp_qat_fw_comn_req_mid comn_mid;
- struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
- struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-struct icp_qat_fw_comn_error {
- uint8_t xlat_err_code;
- uint8_t cmp_err_code;
-};
-
-struct icp_qat_fw_comn_resp_hdr {
- uint8_t resrvd1;
- uint8_t service_id;
- uint8_t response_type;
- uint8_t hdr_flags;
- struct icp_qat_fw_comn_error comn_error;
- uint8_t comn_status;
- uint8_t cmd_id;
-};
-
-struct icp_qat_fw_comn_resp {
- struct icp_qat_fw_comn_resp_hdr comn_hdr;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
-#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
-#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
- icp_qat_fw_comn_req_hdr_t.service_type
-
-#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
- icp_qat_fw_comn_req_hdr_t.service_type = val
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
- icp_qat_fw_comn_req_hdr_t.service_cmd_id
-
-#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
- icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
- ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
-
-#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
- ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
- QAT_FIELD_GET(hdr_flags, \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
- (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
-
-#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
- QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
- ICP_QAT_FW_COMN_VALID_FLAG_MASK)
-
-#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
- (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
- ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
-
-#define QAT_COMN_PTR_TYPE_BITPOS 0
-#define QAT_COMN_PTR_TYPE_MASK 0x1
-#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
-#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
-#define QAT_COMN_PTR_TYPE_FLAT 0x0
-#define QAT_COMN_PTR_TYPE_SGL 0x1
-#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
-#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
-
-#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
- ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
- | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
- QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
- QAT_COMN_PTR_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
- QAT_COMN_CD_FLD_TYPE_MASK)
-
-#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
-#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
-#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
-#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
-
-#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
- >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
- { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
-
-#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
- { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
-
-#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
-#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
-#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
-#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
-#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
-#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
-
-#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
- ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
- QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
- (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
- QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
- (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
- QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
- (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
-
-#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
- QAT_COMN_RESP_CRYPTO_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
- QAT_COMN_RESP_CMP_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
- QAT_COMN_RESP_XLAT_STATUS_MASK)
-
-#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
- QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
- QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
-
-#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
-#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
-#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
-#define ERR_CODE_NO_ERROR 0
-#define ERR_CODE_INVALID_BLOCK_TYPE -1
-#define ERR_CODE_NO_MATCH_ONES_COMP -2
-#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
-#define ERR_CODE_INCOMPLETE_LEN -4
-#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
-#define ERR_CODE_RPT_GT_SPEC_LEN -6
-#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
-#define ERR_CODE_INV_DIS_CODE_LEN -8
-#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
-#define ERR_CODE_DIS_TOO_FAR_BACK -10
-#define ERR_CODE_OVERFLOW_ERROR -11
-#define ERR_CODE_SOFT_ERROR -12
-#define ERR_CODE_FATAL_ERROR -13
-#define ERR_CODE_SSM_ERROR -14
-#define ERR_CODE_ENDPOINT_ERROR -15
-
-enum icp_qat_fw_slice {
- ICP_QAT_FW_SLICE_NULL = 0,
- ICP_QAT_FW_SLICE_CIPHER = 1,
- ICP_QAT_FW_SLICE_AUTH = 2,
- ICP_QAT_FW_SLICE_DRAM_RD = 3,
- ICP_QAT_FW_SLICE_DRAM_WR = 4,
- ICP_QAT_FW_SLICE_COMP = 5,
- ICP_QAT_FW_SLICE_XLAT = 6,
- ICP_QAT_FW_SLICE_DELIMITER
-};
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
deleted file mode 100644
index fbf2b839..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ /dev/null
@@ -1,404 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_FW_LA_H_
-#define _ICP_QAT_FW_LA_H_
-#include "icp_qat_fw.h"
-
-enum icp_qat_fw_la_cmd_id {
- ICP_QAT_FW_LA_CMD_CIPHER = 0,
- ICP_QAT_FW_LA_CMD_AUTH = 1,
- ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
- ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
- ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
- ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
- ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
- ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
- ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
- ICP_QAT_FW_LA_CMD_MGF1 = 9,
- ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
- ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
- ICP_QAT_FW_LA_CMD_DELIMITER = 12
-};
-
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
-#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
-
-struct icp_qat_fw_la_bulk_req {
- struct icp_qat_fw_comn_req_hdr comn_hdr;
- struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
- struct icp_qat_fw_comn_req_mid comn_mid;
- struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
- struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
-};
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
-#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
-#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
-#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
-#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
-#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
-#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
-#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
-#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
-#define ICP_QAT_FW_LA_GCM_PROTO 2
-#define ICP_QAT_FW_LA_CCM_PROTO 1
-#define ICP_QAT_FW_LA_NO_PROTO 0
-#define QAT_LA_PROTO_BITPOS 7
-#define QAT_LA_PROTO_MASK 0x7
-#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
-#define QAT_LA_CMP_AUTH_RES_BITPOS 6
-#define QAT_LA_CMP_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_RET_AUTH_RES 1
-#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
-#define QAT_LA_RET_AUTH_RES_BITPOS 5
-#define QAT_LA_RET_AUTH_RES_MASK 0x1
-#define ICP_QAT_FW_LA_UPDATE_STATE 1
-#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
-#define QAT_LA_UPDATE_STATE_BITPOS 4
-#define QAT_LA_UPDATE_STATE_MASK 0x1
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
-#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
-#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
-#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
-#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
-#define QAT_LA_CIPH_IV_FLD_BITPOS 2
-#define QAT_LA_CIPH_IV_FLD_MASK 0x1
-#define ICP_QAT_FW_LA_PARTIAL_NONE 0
-#define ICP_QAT_FW_LA_PARTIAL_START 1
-#define ICP_QAT_FW_LA_PARTIAL_MID 3
-#define ICP_QAT_FW_LA_PARTIAL_END 2
-#define QAT_LA_PARTIAL_BITPOS 0
-#define QAT_LA_PARTIAL_MASK 0x3
-#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
- cmp_auth, ret_auth, update_state, \
- ciph_iv, ciphcfg, partial) \
- (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
- ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
- QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
- ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
- QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
- ((proto & QAT_LA_PROTO_MASK) << \
- QAT_LA_PROTO_BITPOS) | \
- ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
- QAT_LA_CMP_AUTH_RES_BITPOS) | \
- ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
- QAT_LA_RET_AUTH_RES_BITPOS) | \
- ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
- QAT_LA_UPDATE_STATE_BITPOS) | \
- ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
- QAT_LA_CIPH_IV_FLD_BITPOS) | \
- ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
- ((partial & QAT_LA_PARTIAL_MASK) << \
- QAT_LA_PARTIAL_BITPOS))
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
- QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
- QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
- QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
- QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
- QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
- QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
- QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
- QAT_LA_PARTIAL_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
- QAT_LA_CIPH_IV_FLD_MASK)
-
-#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
- QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
-
-#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
- QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
- QAT_LA_GCM_IV_LEN_FLAG_MASK)
-
-#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
- QAT_LA_PROTO_MASK)
-
-#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
- QAT_LA_CMP_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
- QAT_LA_RET_AUTH_RES_MASK)
-
-#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
- QAT_LA_DIGEST_IN_BUFFER_MASK)
-
-#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
- QAT_LA_UPDATE_STATE_MASK)
-
-#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
- QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
- QAT_LA_PARTIAL_MASK)
-
-struct icp_qat_fw_cipher_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- } s1;
- } u;
-};
-
-struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
- union {
- struct {
- uint64_t content_desc_addr;
- uint16_t content_desc_resrvd1;
- uint8_t content_desc_params_sz;
- uint8_t content_desc_hdr_resrvd2;
- uint32_t content_desc_resrvd3;
- } s;
- struct {
- uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- } sl;
- } u;
-};
-
-struct icp_qat_fw_cipher_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id;
- uint8_t cipher_padding_sz;
- uint8_t resrvd1;
- uint16_t resrvd2;
- uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
-};
-
-struct icp_qat_fw_auth_cd_ctrl_hdr {
- uint32_t resrvd1;
- uint8_t resrvd2;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id;
- uint8_t resrvd3;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd4;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
-};
-
-struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
- uint8_t cipher_state_sz;
- uint8_t cipher_key_sz;
- uint8_t cipher_cfg_offset;
- uint8_t next_curr_id_cipher;
- uint8_t cipher_padding_sz;
- uint8_t hash_flags;
- uint8_t hash_cfg_offset;
- uint8_t next_curr_id_auth;
- uint8_t resrvd1;
- uint8_t outer_prefix_sz;
- uint8_t final_sz;
- uint8_t inner_res_sz;
- uint8_t resrvd2;
- uint8_t inner_state1_sz;
- uint8_t inner_state2_offset;
- uint8_t inner_state2_sz;
- uint8_t outer_config_offset;
- uint8_t outer_state1_sz;
- uint8_t outer_res_sz;
- uint8_t outer_prefix_offset;
-};
-
-#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
-#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
-#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240
-#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
- (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
-#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
-
-struct icp_qat_fw_la_cipher_req_params {
- uint32_t cipher_offset;
- uint32_t cipher_length;
- union {
- uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
- struct {
- uint64_t cipher_IV_ptr;
- uint64_t resrvd1;
- } s;
- } u;
-};
-
-struct icp_qat_fw_la_auth_req_params {
- uint32_t auth_off;
- uint32_t auth_len;
- union {
- uint64_t auth_partial_st_prefix;
- uint64_t aad_adr;
- } u1;
- uint64_t auth_res_addr;
- union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
- } u2;
- uint8_t resrvd1;
- uint8_t hash_state_sz;
- uint8_t auth_res_sz;
-} __rte_packed;
-
-struct icp_qat_fw_la_auth_req_params_resrvd_flds {
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
- union {
- uint8_t inner_prefix_sz;
- uint8_t aad_sz;
- } u2;
- uint8_t resrvd1;
- uint16_t resrvd2;
-};
-
-struct icp_qat_fw_la_resp {
- struct icp_qat_fw_comn_resp_hdr comn_resp;
- uint64_t opaque_data;
- uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
-};
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
- ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
- ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
- >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
-
-#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
- ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
-
-#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
- (((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_CURR_ID_MASK)
-
-#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
-{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
- ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
- & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
- ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
-
-#endif
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
deleted file mode 100644
index d03688c7..00000000
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_HW_H_
-#define _ICP_QAT_HW_H_
-
-enum icp_qat_hw_ae_id {
- ICP_QAT_HW_AE_0 = 0,
- ICP_QAT_HW_AE_1 = 1,
- ICP_QAT_HW_AE_2 = 2,
- ICP_QAT_HW_AE_3 = 3,
- ICP_QAT_HW_AE_4 = 4,
- ICP_QAT_HW_AE_5 = 5,
- ICP_QAT_HW_AE_6 = 6,
- ICP_QAT_HW_AE_7 = 7,
- ICP_QAT_HW_AE_8 = 8,
- ICP_QAT_HW_AE_9 = 9,
- ICP_QAT_HW_AE_10 = 10,
- ICP_QAT_HW_AE_11 = 11,
- ICP_QAT_HW_AE_DELIMITER = 12
-};
-
-enum icp_qat_hw_qat_id {
- ICP_QAT_HW_QAT_0 = 0,
- ICP_QAT_HW_QAT_1 = 1,
- ICP_QAT_HW_QAT_2 = 2,
- ICP_QAT_HW_QAT_3 = 3,
- ICP_QAT_HW_QAT_4 = 4,
- ICP_QAT_HW_QAT_5 = 5,
- ICP_QAT_HW_QAT_DELIMITER = 6
-};
-
-enum icp_qat_hw_auth_algo {
- ICP_QAT_HW_AUTH_ALGO_NULL = 0,
- ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
- ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
- ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
- ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
- ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
- ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
- ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
- ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
- ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
- ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
- ICP_QAT_HW_AUTH_RESERVED_1 = 15,
- ICP_QAT_HW_AUTH_RESERVED_2 = 16,
- ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
- ICP_QAT_HW_AUTH_RESERVED_3 = 18,
- ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
- ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
-};
-
-enum icp_qat_hw_auth_mode {
- ICP_QAT_HW_AUTH_MODE0 = 0,
- ICP_QAT_HW_AUTH_MODE1 = 1,
- ICP_QAT_HW_AUTH_MODE2 = 2,
- ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
-};
-
-struct icp_qat_hw_auth_config {
- uint32_t config;
- uint32_t reserved;
-};
-
-#define QAT_AUTH_MODE_BITPOS 4
-#define QAT_AUTH_MODE_MASK 0xF
-#define QAT_AUTH_ALGO_BITPOS 0
-#define QAT_AUTH_ALGO_MASK 0xF
-#define QAT_AUTH_CMP_BITPOS 8
-#define QAT_AUTH_CMP_MASK 0x7F
-#define QAT_AUTH_SHA3_PADDING_BITPOS 16
-#define QAT_AUTH_SHA3_PADDING_MASK 0x1
-#define QAT_AUTH_ALGO_SHA3_BITPOS 22
-#define QAT_AUTH_ALGO_SHA3_MASK 0x3
-#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
- (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
- ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
- (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
- QAT_AUTH_ALGO_SHA3_BITPOS) | \
- (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
- (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
- & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
- ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
-
-struct icp_qat_hw_auth_counter {
- uint32_t counter;
- uint32_t reserved;
-};
-
-#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
-#define QAT_AUTH_COUNT_BITPOS 0
-#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
- (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
-
-struct icp_qat_hw_auth_setup {
- struct icp_qat_hw_auth_config auth_config;
- struct icp_qat_hw_auth_counter auth_counter;
-};
-
-#define QAT_HW_DEFAULT_ALIGNMENT 8
-#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1)))
-#define ICP_QAT_HW_NULL_STATE1_SZ 32
-#define ICP_QAT_HW_MD5_STATE1_SZ 16
-#define ICP_QAT_HW_SHA1_STATE1_SZ 20
-#define ICP_QAT_HW_SHA224_STATE1_SZ 32
-#define ICP_QAT_HW_SHA256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
-#define ICP_QAT_HW_SHA384_STATE1_SZ 64
-#define ICP_QAT_HW_SHA512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
-#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
-#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
-#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
-#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
-#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
-#define ICP_QAT_HW_NULL_STATE2_SZ 32
-#define ICP_QAT_HW_MD5_STATE2_SZ 16
-#define ICP_QAT_HW_SHA1_STATE2_SZ 20
-#define ICP_QAT_HW_SHA224_STATE2_SZ 32
-#define ICP_QAT_HW_SHA256_STATE2_SZ 32
-#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
-#define ICP_QAT_HW_SHA384_STATE2_SZ 64
-#define ICP_QAT_HW_SHA512_STATE2_SZ 64
-#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
-#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
-#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48
-#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
-#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
-#define ICP_QAT_HW_F9_IK_SZ 16
-#define ICP_QAT_HW_F9_FK_SZ 16
-#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
- ICP_QAT_HW_F9_FK_SZ)
-#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
-#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
-#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
-#define ICP_QAT_HW_GALOIS_H_SZ 16
-#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
-#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
-
-struct icp_qat_hw_auth_sha512 {
- struct icp_qat_hw_auth_setup inner_setup;
- uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
- struct icp_qat_hw_auth_setup outer_setup;
- uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
-};
-
-struct icp_qat_hw_auth_algo_blk {
- struct icp_qat_hw_auth_sha512 sha;
-};
-
-#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
-#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
-
-enum icp_qat_hw_cipher_algo {
- ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
- ICP_QAT_HW_CIPHER_ALGO_DES = 1,
- ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
- ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
- ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
- ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
- ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
- ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
- ICP_QAT_HW_CIPHER_DELIMITER = 10
-};
-
-enum icp_qat_hw_cipher_mode {
- ICP_QAT_HW_CIPHER_ECB_MODE = 0,
- ICP_QAT_HW_CIPHER_CBC_MODE = 1,
- ICP_QAT_HW_CIPHER_CTR_MODE = 2,
- ICP_QAT_HW_CIPHER_F8_MODE = 3,
- ICP_QAT_HW_CIPHER_XTS_MODE = 6,
- ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
-};
-
-struct icp_qat_hw_cipher_config {
- uint32_t val;
- uint32_t reserved;
-};
-
-enum icp_qat_hw_cipher_dir {
- ICP_QAT_HW_CIPHER_ENCRYPT = 0,
- ICP_QAT_HW_CIPHER_DECRYPT = 1,
-};
-
-enum icp_qat_hw_auth_op {
- ICP_QAT_HW_AUTH_VERIFY = 0,
- ICP_QAT_HW_AUTH_GENERATE = 1,
-};
-
-enum icp_qat_hw_cipher_convert {
- ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
- ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
-};
-
-#define QAT_CIPHER_MODE_BITPOS 4
-#define QAT_CIPHER_MODE_MASK 0xF
-#define QAT_CIPHER_ALGO_BITPOS 0
-#define QAT_CIPHER_ALGO_MASK 0xF
-#define QAT_CIPHER_CONVERT_BITPOS 9
-#define QAT_CIPHER_CONVERT_MASK 0x1
-#define QAT_CIPHER_DIR_BITPOS 8
-#define QAT_CIPHER_DIR_MASK 0x1
-#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
-#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
-#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
- (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
- ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
- ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
- ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
-#define ICP_QAT_HW_DES_BLK_SZ 8
-#define ICP_QAT_HW_3DES_BLK_SZ 8
-#define ICP_QAT_HW_NULL_BLK_SZ 8
-#define ICP_QAT_HW_AES_BLK_SZ 16
-#define ICP_QAT_HW_KASUMI_BLK_SZ 8
-#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
-#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
-#define ICP_QAT_HW_NULL_KEY_SZ 256
-#define ICP_QAT_HW_DES_KEY_SZ 8
-#define ICP_QAT_HW_3DES_KEY_SZ 24
-#define ICP_QAT_HW_AES_128_KEY_SZ 16
-#define ICP_QAT_HW_AES_192_KEY_SZ 24
-#define ICP_QAT_HW_AES_256_KEY_SZ 32
-#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_KASUMI_KEY_SZ 16
-#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
- QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
- QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
-#define ICP_QAT_HW_ARC4_KEY_SZ 256
-#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
-#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
-#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
-#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
-
-#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
-
-/* These defines describe position of the bit-fields
- * in the flags byte in B0
- */
-#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
-#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
-
-#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
- ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
- | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
- | ((q) - 1))
-
-#define ICP_QAT_HW_CCM_NQ_CONST 15
-#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
-#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
-#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
- ICP_QAT_HW_CCM_AAD_LEN_INFO)
-#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
-#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
-#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
-
-struct icp_qat_hw_cipher_algo_blk {
- struct icp_qat_hw_cipher_config cipher_config;
- uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
-} __rte_cache_aligned;
-
-#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
deleted file mode 100644
index 802ba95d..00000000
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef _ICP_QAT_ALGS_H_
-#define _ICP_QAT_ALGS_H_
-#include <rte_memory.h>
-#include <rte_crypto.h>
-#include "icp_qat_hw.h"
-#include "icp_qat_fw.h"
-#include "icp_qat_fw_la.h"
-#include "../qat_crypto.h"
-
-/*
- * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
- * Integrity Key (IK)
- */
-#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA
-
-#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555
-
-/* 3DES key sizes */
-#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */
-#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */
-
-#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
-
-#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
-
-struct qat_alg_buf {
- uint32_t len;
- uint32_t resrvd;
- uint64_t addr;
-} __rte_packed;
-
-enum qat_crypto_proto_flag {
- QAT_CRYPTO_PROTO_FLAG_NONE = 0,
- QAT_CRYPTO_PROTO_FLAG_CCM = 1,
- QAT_CRYPTO_PROTO_FLAG_GCM = 2,
- QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3,
- QAT_CRYPTO_PROTO_FLAG_ZUC = 4
-};
-
-/*
- * Maximum number of SGL entries
- */
-#define QAT_SGL_MAX_NUMBER 16
-
-struct qat_alg_buf_list {
- uint64_t resrvd;
- uint32_t num_bufs;
- uint32_t num_mapped_bufs;
- struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER];
-} __rte_packed __rte_cache_aligned;
-
-struct qat_crypto_op_cookie {
- struct qat_alg_buf_list qat_sgl_list_src;
- struct qat_alg_buf_list qat_sgl_list_dst;
- rte_iova_t qat_sgl_src_phys_addr;
- rte_iova_t qat_sgl_dst_phys_addr;
-};
-
-/* Common content descriptor */
-struct qat_alg_cd {
- struct icp_qat_hw_cipher_algo_blk cipher;
- struct icp_qat_hw_auth_algo_blk hash;
-} __rte_packed __rte_cache_aligned;
-
-struct qat_session {
- enum icp_qat_fw_la_cmd_id qat_cmd;
- enum icp_qat_hw_cipher_algo qat_cipher_alg;
- enum icp_qat_hw_cipher_dir qat_dir;
- enum icp_qat_hw_cipher_mode qat_mode;
- enum icp_qat_hw_auth_algo qat_hash_alg;
- enum icp_qat_hw_auth_op auth_op;
- void *bpi_ctx;
- struct qat_alg_cd cd;
- uint8_t *cd_cur_ptr;
- rte_iova_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- uint8_t aad_len;
- struct qat_crypto_instance *inst;
- struct {
- uint16_t offset;
- uint16_t length;
- } cipher_iv;
- struct {
- uint16_t offset;
- uint16_t length;
- } auth_iv;
- uint16_t digest_length;
- rte_spinlock_t lock; /* protects this struct */
- enum qat_device_gen min_qat_dev_gen;
-};
-
-int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
-
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
- uint8_t *enckey,
- uint32_t enckeylen);
-
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
- uint8_t *authkey,
- uint32_t authkeylen,
- uint32_t aad_length,
- uint32_t digestsize,
- unsigned int operation);
-
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags);
-
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
- enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg);
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
-#endif
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
deleted file mode 100644
index 26f854c2..00000000
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ /dev/null
@@ -1,1059 +0,0 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rte_memcpy.h>
-#include <rte_common.h>
-#include <rte_spinlock.h>
-#include <rte_byteorder.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_crypto_sym.h>
-
-#include "../qat_logs.h"
-
-#include <openssl/sha.h> /* Needed to calculate pre-compute values */
-#include <openssl/aes.h> /* Needed to calculate pre-compute values */
-#include <openssl/md5.h> /* Needed to calculate pre-compute values */
-
-#include "qat_algs.h"
-
-/* returns block size in bytes per cipher algo */
-int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg)
-{
- switch (qat_cipher_alg) {
- case ICP_QAT_HW_CIPHER_ALGO_DES:
- return ICP_QAT_HW_DES_BLK_SZ;
- case ICP_QAT_HW_CIPHER_ALGO_3DES:
- return ICP_QAT_HW_3DES_BLK_SZ;
- case ICP_QAT_HW_CIPHER_ALGO_AES128:
- case ICP_QAT_HW_CIPHER_ALGO_AES192:
- case ICP_QAT_HW_CIPHER_ALGO_AES256:
- return ICP_QAT_HW_AES_BLK_SZ;
- default:
- PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg);
- return -EFAULT;
- };
- return -EFAULT;
-}
-
-/*
- * Returns size in bytes per hash algo for state1 size field in cd_ctrl
- * This is digest size rounded up to nearest quadword
- */
-static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_SHA224:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_SHA384:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_MD5:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_NULL:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
- return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
- /* return maximum state1 size in this case */
- return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
- QAT_HW_DEFAULT_ALIGNMENT);
- default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
- return -EFAULT;
- };
- return -EFAULT;
-}
-
-/* returns digest size in bytes per hash algo */
-static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return ICP_QAT_HW_SHA1_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA224:
- return ICP_QAT_HW_SHA224_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return ICP_QAT_HW_SHA256_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA384:
- return ICP_QAT_HW_SHA384_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_MD5:
- return ICP_QAT_HW_MD5_STATE1_SZ;
- case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
- /* return maximum digest size in this case */
- return ICP_QAT_HW_SHA512_STATE1_SZ;
- default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
- return -EFAULT;
- };
- return -EFAULT;
-}
-
-/* returns block size in byes per hash algo */
-static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
-{
- switch (qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- return SHA_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_SHA224:
- return SHA256_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- return SHA256_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_SHA384:
- return SHA512_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- return SHA512_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- return 16;
- case ICP_QAT_HW_AUTH_ALGO_MD5:
- return MD5_CBLOCK;
- case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
- /* return maximum block size in this case */
- return SHA512_CBLOCK;
- default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg);
- return -EFAULT;
- };
- return -EFAULT;
-}
-
-static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
-{
- SHA_CTX ctx;
-
- if (!SHA1_Init(&ctx))
- return -EFAULT;
- SHA1_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
- return 0;
-}
-
-static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
-{
- SHA256_CTX ctx;
-
- if (!SHA224_Init(&ctx))
- return -EFAULT;
- SHA256_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
- return 0;
-}
-
-static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
-{
- SHA256_CTX ctx;
-
- if (!SHA256_Init(&ctx))
- return -EFAULT;
- SHA256_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH);
- return 0;
-}
-
-static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
-{
- SHA512_CTX ctx;
-
- if (!SHA384_Init(&ctx))
- return -EFAULT;
- SHA512_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
- return 0;
-}
-
-static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
-{
- SHA512_CTX ctx;
-
- if (!SHA512_Init(&ctx))
- return -EFAULT;
- SHA512_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH);
- return 0;
-}
-
-static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out)
-{
- MD5_CTX ctx;
-
- if (!MD5_Init(&ctx))
- return -EFAULT;
- MD5_Transform(&ctx, data_in);
- rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH);
-
- return 0;
-}
-
-static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
- uint8_t *data_in,
- uint8_t *data_out)
-{
- int digest_size;
- uint8_t digest[qat_hash_get_digest_size(
- ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
- uint32_t *hash_state_out_be32;
- uint64_t *hash_state_out_be64;
- int i;
-
- PMD_INIT_FUNC_TRACE();
- digest_size = qat_hash_get_digest_size(hash_alg);
- if (digest_size <= 0)
- return -EFAULT;
-
- hash_state_out_be32 = (uint32_t *)data_out;
- hash_state_out_be64 = (uint64_t *)data_out;
-
- switch (hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (partial_hash_sha1(data_in, digest))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
- *hash_state_out_be32 =
- rte_bswap32(*(((uint32_t *)digest)+i));
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (partial_hash_sha224(data_in, digest))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
- *hash_state_out_be32 =
- rte_bswap32(*(((uint32_t *)digest)+i));
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (partial_hash_sha256(data_in, digest))
- return -EFAULT;
- for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++)
- *hash_state_out_be32 =
- rte_bswap32(*(((uint32_t *)digest)+i));
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (partial_hash_sha384(data_in, digest))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
- *hash_state_out_be64 =
- rte_bswap64(*(((uint64_t *)digest)+i));
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (partial_hash_sha512(data_in, digest))
- return -EFAULT;
- for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++)
- *hash_state_out_be64 =
- rte_bswap64(*(((uint64_t *)digest)+i));
- break;
- case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (partial_hash_md5(data_in, data_out))
- return -EFAULT;
- break;
- default:
- PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg);
- return -EFAULT;
- }
-
- return 0;
-}
-#define HMAC_IPAD_VALUE 0x36
-#define HMAC_OPAD_VALUE 0x5c
-#define HASH_XCBC_PRECOMP_KEY_NUM 3
-
-static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
- const uint8_t *auth_key,
- uint16_t auth_keylen,
- uint8_t *p_state_buf,
- uint16_t *p_state_len)
-{
- int block_size;
- uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
- uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
- int i;
-
- PMD_INIT_FUNC_TRACE();
- if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
- static uint8_t qat_aes_xcbc_key_seed[
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- };
-
- uint8_t *in = NULL;
- uint8_t *out = p_state_buf;
- int x;
- AES_KEY enc_key;
-
- in = rte_zmalloc("working mem for key",
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
- if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
- return -ENOMEM;
- }
-
- rte_memcpy(in, qat_aes_xcbc_key_seed,
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
- for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
- if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
- &enc_key) != 0) {
- rte_free(in -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
- memset(out -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
- 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
- return -EFAULT;
- }
- AES_encrypt(in, out, &enc_key);
- in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
- out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
- }
- *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
- rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
- return 0;
- } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
- uint8_t *in = NULL;
- uint8_t *out = p_state_buf;
- AES_KEY enc_key;
-
- memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ +
- ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ);
- in = rte_zmalloc("working mem for key",
- ICP_QAT_HW_GALOIS_H_SZ, 16);
- if (in == NULL) {
- PMD_DRV_LOG(ERR, "Failed to alloc memory");
- return -ENOMEM;
- }
-
- memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
- if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
- &enc_key) != 0) {
- return -EFAULT;
- }
- AES_encrypt(in, out, &enc_key);
- *p_state_len = ICP_QAT_HW_GALOIS_H_SZ +
- ICP_QAT_HW_GALOIS_LEN_A_SZ +
- ICP_QAT_HW_GALOIS_E_CTR0_SZ;
- rte_free(in);
- return 0;
- }
-
- block_size = qat_hash_get_block_size(hash_alg);
- if (block_size <= 0)
- return -EFAULT;
- /* init ipad and opad from key and xor with fixed values */
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
-
- if (auth_keylen > (unsigned int)block_size) {
- PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen);
- return -EFAULT;
- }
- rte_memcpy(ipad, auth_key, auth_keylen);
- rte_memcpy(opad, auth_key, auth_keylen);
-
- for (i = 0; i < block_size; i++) {
- uint8_t *ipad_ptr = ipad + i;
- uint8_t *opad_ptr = opad + i;
- *ipad_ptr ^= HMAC_IPAD_VALUE;
- *opad_ptr ^= HMAC_OPAD_VALUE;
- }
-
- /* do partial hash of ipad and copy to state1 */
- if (partial_hash_compute(hash_alg, ipad, p_state_buf)) {
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "ipad precompute failed");
- return -EFAULT;
- }
-
- /*
- * State len is a multiple of 8, so may be larger than the digest.
- * Put the partial hash of opad state_len bytes after state1
- */
- *p_state_len = qat_hash_get_state1_size(hash_alg);
- if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) {
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
- PMD_DRV_LOG(ERR, "opad precompute failed");
- return -EFAULT;
- }
-
- /* don't leave data lying around */
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
- return 0;
-}
-
-void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
- enum qat_crypto_proto_flag proto_flags)
-{
- PMD_INIT_FUNC_TRACE();
- header->hdr_flags =
- ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
- header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
- header->comn_req_flags =
- ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
- QAT_COMN_PTR_TYPE_FLAT);
- ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_PARTIAL_NONE);
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
-
- switch (proto_flags) {
- case QAT_CRYPTO_PROTO_FLAG_NONE:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_PROTO);
- break;
- case QAT_CRYPTO_PROTO_FLAG_CCM:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CCM_PROTO);
- break;
- case QAT_CRYPTO_PROTO_FLAG_GCM:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_GCM_PROTO);
- break;
- case QAT_CRYPTO_PROTO_FLAG_SNOW3G:
- ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_SNOW_3G_PROTO);
- break;
- case QAT_CRYPTO_PROTO_FLAG_ZUC:
- ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_ZUC_3G_PROTO);
- break;
- }
-
- ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_UPDATE_STATE);
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER);
-}
-
-/*
- * Snow3G and ZUC should never use this function
- * and set its protocol flag in both cipher and auth part of content
- * descriptor building function
- */
-static enum qat_crypto_proto_flag
-qat_get_crypto_proto_flag(uint16_t flags)
-{
- int proto = ICP_QAT_FW_LA_PROTO_GET(flags);
- enum qat_crypto_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
-
- switch (proto) {
- case ICP_QAT_FW_LA_GCM_PROTO:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
- break;
- case ICP_QAT_FW_LA_CCM_PROTO:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
- break;
- }
-
- return qat_proto_flag;
-}
-
-int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
- uint8_t *cipherkey,
- uint32_t cipherkeylen)
-{
- struct icp_qat_hw_cipher_algo_blk *cipher;
- struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- void *ptr = &req_tmpl->cd_ctrl;
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
- struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
- enum icp_qat_hw_cipher_convert key_convert;
- enum qat_crypto_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
- uint32_t total_key_size;
- uint16_t cipher_offset, cd_size;
- uint32_t wordIndex = 0;
- uint32_t *temp_key = NULL;
- PMD_INIT_FUNC_TRACE();
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) {
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
- } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command.");
- return -EFAULT;
- }
-
- if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) {
- /*
- * CTR Streaming ciphers are a special case. Decrypt = encrypt
- * Overriding default values previously set
- */
- cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2
- || cdesc->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3)
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
- else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT)
- key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT;
- else
- key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT;
-
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) {
- total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ +
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
- cipher_cd_ctrl->cipher_state_sz =
- ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
-
- } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
- total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3;
- cipher_cd_ctrl->cipher_padding_sz =
- (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3;
- } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) {
- total_key_size = ICP_QAT_HW_3DES_KEY_SZ;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
- } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) {
- total_key_size = ICP_QAT_HW_DES_KEY_SZ;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
- } else if (cdesc->qat_cipher_alg ==
- ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) {
- total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ +
- ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
- cipher_cd_ctrl->cipher_state_sz =
- ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
- cdesc->min_qat_dev_gen = QAT_GEN2;
- } else {
- total_key_size = cipherkeylen;
- cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
- qat_proto_flag =
- qat_get_crypto_proto_flag(header->serv_specif_flags);
- }
- cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3;
- cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
- cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3;
-
- header->service_cmd_id = cdesc->qat_cmd;
- qat_alg_init_common_hdr(header, qat_proto_flag);
-
- cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr;
- cipher->cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode,
- cdesc->qat_cipher_alg, key_convert,
- cdesc->qat_dir);
-
- if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) {
- temp_key = (uint32_t *)(cdesc->cd_cur_ptr +
- sizeof(struct icp_qat_hw_cipher_config)
- + cipherkeylen);
- memcpy(cipher->key, cipherkey, cipherkeylen);
- memcpy(temp_key, cipherkey, cipherkeylen);
-
- /* XOR Key with KASUMI F8 key modifier at 4 bytes level */
- for (wordIndex = 0; wordIndex < (cipherkeylen >> 2);
- wordIndex++)
- temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES;
-
- cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
- cipherkeylen + cipherkeylen;
- } else {
- memcpy(cipher->key, cipherkey, cipherkeylen);
- cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
- cipherkeylen;
- }
-
- if (total_key_size > cipherkeylen) {
- uint32_t padding_size = total_key_size-cipherkeylen;
- if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES)
- && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2))
- /* K3 not provided so use K1 = K3*/
- memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size);
- else
- memset(cdesc->cd_cur_ptr, 0, padding_size);
- cdesc->cd_cur_ptr += padding_size;
- }
- cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
- cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
-
- return 0;
-}
-
-int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
- uint8_t *authkey,
- uint32_t authkeylen,
- uint32_t aad_length,
- uint32_t digestsize,
- unsigned int operation)
-{
- struct icp_qat_hw_auth_setup *hash;
- struct icp_qat_hw_cipher_algo_blk *cipherconfig;
- struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req;
- struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
- struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
- void *ptr = &req_tmpl->cd_ctrl;
- struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
- struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
- struct icp_qat_fw_la_auth_req_params *auth_param =
- (struct icp_qat_fw_la_auth_req_params *)
- ((char *)&req_tmpl->serv_specif_rqpars +
- sizeof(struct icp_qat_fw_la_cipher_req_params));
- uint16_t state1_size = 0, state2_size = 0;
- uint16_t hash_offset, cd_size;
- uint32_t *aad_len = NULL;
- uint32_t wordIndex = 0;
- uint32_t *pTempKey;
- enum qat_crypto_proto_flag qat_proto_flag =
- QAT_CRYPTO_PROTO_FLAG_NONE;
-
- PMD_INIT_FUNC_TRACE();
-
- if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
- } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
- ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_AUTH);
- ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_CIPHER);
- ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl,
- ICP_QAT_FW_SLICE_DRAM_WR);
- cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd;
- } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- PMD_DRV_LOG(ERR, "Invalid param, must be a hash command.");
- return -EFAULT;
- }
-
- if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_CMP_AUTH_RES);
- cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY;
- } else {
- ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_RET_AUTH_RES);
- ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
- cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE;
- }
-
- /*
- * Setup the inner hash config
- */
- hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd);
- hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr;
- hash->auth_config.reserved = 0;
- hash->auth_config.config =
- ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
- cdesc->qat_hash_alg, digestsize);
-
- if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
- || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
- || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
- hash->auth_counter.counter = 0;
- else
- hash->auth_counter.counter = rte_bswap32(
- qat_hash_get_block_size(cdesc->qat_hash_alg));
-
- cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup);
-
- /*
- * cd_cur_ptr now points at the state1 information.
- */
- switch (cdesc->qat_hash_alg) {
- case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
- return -EFAULT;
- }
- state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
- return -EFAULT;
- }
- state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
- return -EFAULT;
- }
- state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
- return -EFAULT;
- }
- state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
- PMD_DRV_LOG(ERR, "(SHA)precompute failed");
- return -EFAULT;
- }
- state2_size = ICP_QAT_HW_SHA512_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
- state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
- authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
- PMD_DRV_LOG(ERR, "(XCBC)precompute failed");
- return -EFAULT;
- }
- break;
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
- case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
- state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_alg_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
- PMD_DRV_LOG(ERR, "(GCM)precompute failed");
- return -EFAULT;
- }
- /*
- * Write (the length of AAD) into bytes 16-19 of state2
- * in big-endian format. This field is 8 bytes
- */
- auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(aad_length, 16);
- auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
-
- aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
- ICP_QAT_HW_GALOIS_128_STATE1_SZ +
- ICP_QAT_HW_GALOIS_H_SZ);
- *aad_len = rte_bswap32(aad_length);
- cdesc->aad_len = aad_length;
- break;
- case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
- state1_size = qat_hash_get_state1_size(
- ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2);
- state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ;
- memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
-
- cipherconfig = (struct icp_qat_hw_cipher_algo_blk *)
- (cdesc->cd_cur_ptr + state1_size + state2_size);
- cipherconfig->cipher_config.val =
- ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE,
- ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2,
- ICP_QAT_HW_CIPHER_KEY_CONVERT,
- ICP_QAT_HW_CIPHER_ENCRYPT);
- memcpy(cipherconfig->key, authkey, authkeylen);
- memset(cipherconfig->key + authkeylen,
- 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
- cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
- authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
- auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
- break;
- case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
- hash->auth_config.config =
- ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0,
- cdesc->qat_hash_alg, digestsize);
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
- state1_size = qat_hash_get_state1_size(
- ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3);
- state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ;
- memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size
- + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ);
-
- memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
- cdesc->cd_cur_ptr += state1_size + state2_size
- + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
- auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
- cdesc->min_qat_dev_gen = QAT_GEN2;
-
- break;
- case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
- authkey, authkeylen, cdesc->cd_cur_ptr,
- &state1_size)) {
- PMD_DRV_LOG(ERR, "(MD5)precompute failed");
- return -EFAULT;
- }
- state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_NULL:
- state1_size = qat_hash_get_state1_size(
- ICP_QAT_HW_AUTH_ALGO_NULL);
- state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
- break;
- case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
- qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
- state1_size = qat_hash_get_state1_size(
- ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
- state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
- ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
-
- if (aad_length > 0) {
- aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
- ICP_QAT_HW_CCM_AAD_LEN_INFO;
- auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(aad_length,
- ICP_QAT_HW_CCM_AAD_ALIGNMENT);
- } else {
- auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
- }
-
- cdesc->aad_len = aad_length;
- hash->auth_counter.counter = 0;
-
- hash_cd_ctrl->outer_prefix_sz = digestsize;
- auth_param->hash_state_sz = digestsize;
-
- memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
- break;
- case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
- state1_size = qat_hash_get_state1_size(
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
- state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ;
- memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size);
- pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size
- + authkeylen);
- /*
- * The Inner Hash Initial State2 block must contain IK
- * (Initialisation Key), followed by IK XOR-ed with KM
- * (Key Modifier): IK||(IK^KM).
- */
- /* write the auth key */
- memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
- /* initialise temp key with auth key */
- memcpy(pTempKey, authkey, authkeylen);
- /* XOR Key with KASUMI F9 key modifier at 4 bytes level */
- for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++)
- pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES;
- break;
- default:
- PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg);
- return -EFAULT;
- }
-
- /* Request template setup */
- qat_alg_init_common_hdr(header, qat_proto_flag);
- header->service_cmd_id = cdesc->qat_cmd;
-
- /* Auth CD config setup */
- hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3;
- hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
- hash_cd_ctrl->inner_res_sz = digestsize;
- hash_cd_ctrl->final_sz = digestsize;
- hash_cd_ctrl->inner_state1_sz = state1_size;
- auth_param->auth_res_sz = digestsize;
-
- hash_cd_ctrl->inner_state2_sz = state2_size;
- hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
- ((sizeof(struct icp_qat_hw_auth_setup) +
- RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8))
- >> 3);
-
- cdesc->cd_cur_ptr += state1_size + state2_size;
- cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd;
-
- cd_pars->u.s.content_desc_addr = cdesc->cd_paddr;
- cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3;
-
- return 0;
-}
-
-int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_AES_128_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
- break;
- case ICP_QAT_HW_AES_192_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
- break;
- case ICP_QAT_HW_AES_256_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_aes_docsisbpi_key(int key_len,
- enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_AES_128_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_KASUMI_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_DES_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_DES;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case QAT_3DES_KEY_SZ_OPT1:
- case QAT_3DES_KEY_SZ_OPT2:
- *alg = ICP_QAT_HW_CIPHER_ALGO_3DES;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg)
-{
- switch (key_len) {
- case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ:
- *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}