From b63264c8342e6a1b6971c79550d2af2024b6a4de Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Tue, 14 Aug 2018 18:52:30 +0100 Subject: New upstream version 18.08 Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi --- drivers/common/qat/Makefile | 66 +++ drivers/common/qat/meson.build | 14 + .../qat/qat_adf/adf_transport_access_macros.h | 136 +++++ drivers/common/qat/qat_adf/icp_qat_fw.h | 318 ++++++++++ drivers/common/qat/qat_adf/icp_qat_fw_comp.h | 482 ++++++++++++++++ drivers/common/qat/qat_adf/icp_qat_fw_la.h | 361 ++++++++++++ drivers/common/qat/qat_adf/icp_qat_hw.h | 386 +++++++++++++ drivers/common/qat/qat_common.c | 123 ++++ drivers/common/qat/qat_common.h | 79 +++ drivers/common/qat/qat_device.c | 279 +++++++++ drivers/common/qat/qat_device.h | 102 ++++ drivers/common/qat/qat_logs.c | 38 ++ drivers/common/qat/qat_logs.h | 34 ++ drivers/common/qat/qat_qp.c | 642 +++++++++++++++++++++ drivers/common/qat/qat_qp.h | 111 ++++ 15 files changed, 3171 insertions(+) create mode 100644 drivers/common/qat/Makefile create mode 100644 drivers/common/qat/meson.build create mode 100644 drivers/common/qat/qat_adf/adf_transport_access_macros.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw_comp.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_fw_la.h create mode 100644 drivers/common/qat/qat_adf/icp_qat_hw.h create mode 100644 drivers/common/qat/qat_common.c create mode 100644 drivers/common/qat/qat_common.h create mode 100644 drivers/common/qat/qat_device.c create mode 100644 drivers/common/qat/qat_device.h create mode 100644 drivers/common/qat/qat_logs.c create mode 100644 drivers/common/qat/qat_logs.h create mode 100644 drivers/common/qat/qat_qp.c create mode 100644 drivers/common/qat/qat_qp.h (limited to 'drivers/common/qat') diff --git a/drivers/common/qat/Makefile b/drivers/common/qat/Makefile new file mode 100644 index 00000000..c68a032a --- /dev/null +++ b/drivers/common/qat/Makefile @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015-2018 Intel Corporation + +include $(RTE_SDK)/mk/rte.vars.mk + +# build directories +QAT_CRYPTO_DIR := $(RTE_SDK)/drivers/crypto/qat +QAT_COMPRESS_DIR := $(RTE_SDK)/drivers/compress/qat +VPATH=$(QAT_CRYPTO_DIR):$(QAT_COMPRESS_DIR) + +# external library include paths +CFLAGS += -I$(SRCDIR)/qat_adf +CFLAGS += -I$(SRCDIR) +CFLAGS += -I$(QAT_CRYPTO_DIR) +CFLAGS += -I$(QAT_COMPRESS_DIR) + + +ifeq ($(CONFIG_RTE_LIBRTE_COMPRESSDEV),y) + CFLAGS += -DALLOW_EXPERIMENTAL_API + LDLIBS += -lrte_compressdev + SRCS-y += qat_comp.c + SRCS-y += qat_comp_pmd.c + build_qat = yes +endif + +# library symmetric crypto source files +ifeq ($(CONFIG_RTE_LIBRTE_CRYPTODEV),y) +ifeq ($(CONFIG_RTE_LIBRTE_PMD_QAT_SYM),y) + LDLIBS += -lrte_cryptodev + LDLIBS += -lcrypto + CFLAGS += -DBUILD_QAT_SYM + SRCS-y += qat_sym.c + SRCS-y += qat_sym_session.c + SRCS-y += qat_sym_pmd.c + build_qat = yes +endif +endif + +ifdef build_qat + + # library name + LIB = librte_pmd_qat.a + + # library version + LIBABIVER := 1 + # build flags + CFLAGS += $(WERROR_FLAGS) + CFLAGS += -O3 + + # library common source files + SRCS-y += qat_device.c + SRCS-y += qat_common.c + SRCS-y += qat_logs.c + SRCS-y += qat_qp.c + + LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool + LDLIBS += -lrte_pci -lrte_bus_pci + + # export include files + SYMLINK-y-include += + + # versioning export map + EXPORT_MAP := ../../compress/qat/rte_pmd_qat_version.map +endif + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/common/qat/meson.build b/drivers/common/qat/meson.build new file mode 100644 index 00000000..80b6b25a --- /dev/null +++ b/drivers/common/qat/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017-2018 Intel Corporation + +# This does not build a driver, but instead holds common files for +# the crypto and compression drivers. +build = false +qat_deps = ['bus_pci'] +qat_sources = files('qat_common.c', + 'qat_qp.c', + 'qat_device.c', + 'qat_logs.c') +qat_includes = [include_directories('.', 'qat_adf')] +qat_ext_deps = [] +qat_cflags = [] diff --git a/drivers/common/qat/qat_adf/adf_transport_access_macros.h b/drivers/common/qat/qat_adf/adf_transport_access_macros.h new file mode 100644 index 00000000..1eef5513 --- /dev/null +++ b/drivers/common/qat/qat_adf/adf_transport_access_macros.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef ADF_TRANSPORT_ACCESS_MACROS_H +#define ADF_TRANSPORT_ACCESS_MACROS_H + +#include + +/* CSR write macro */ +#define ADF_CSR_WR(csrAddr, csrOffset, val) \ + rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) + +/* CSR read macro */ +#define ADF_CSR_RD(csrAddr, csrOffset) \ + rte_read32((((uint8_t *)csrAddr) + csrOffset)) + +#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL +#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL +#define ADF_RING_CSR_RING_CONFIG 0x000 +#define ADF_RING_CSR_RING_LBASE 0x040 +#define ADF_RING_CSR_RING_UBASE 0x080 +#define ADF_RING_CSR_RING_HEAD 0x0C0 +#define ADF_RING_CSR_RING_TAIL 0x100 +#define ADF_RING_CSR_E_STAT 0x14C +#define ADF_RING_CSR_INT_SRCSEL 0x174 +#define ADF_RING_CSR_INT_SRCSEL_2 0x178 +#define ADF_RING_CSR_INT_COL_EN 0x17C +#define ADF_RING_CSR_INT_COL_CTL 0x180 +#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 +#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 +#define ADF_RING_BUNDLE_SIZE 0x1000 +#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A +#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 +#define ADF_COALESCING_MIN_TIME 0x1FF +#define ADF_COALESCING_MAX_TIME 0xFFFFF +#define ADF_COALESCING_DEF_TIME 0x27FF +#define ADF_RING_NEAR_WATERMARK_512 0x08 +#define ADF_RING_NEAR_WATERMARK_0 0x00 +#define ADF_RING_EMPTY_SIG 0x7F7F7F7F +#define ADF_RING_EMPTY_SIG_BYTE 0x7F + +/* Valid internal ring size values */ +#define ADF_RING_SIZE_128 0x01 +#define ADF_RING_SIZE_256 0x02 +#define ADF_RING_SIZE_512 0x03 +#define ADF_RING_SIZE_4K 0x06 +#define ADF_RING_SIZE_16K 0x08 +#define ADF_RING_SIZE_4M 0x10 +#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 +#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M +#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K + +/* Maximum number of qps on a device for any service type */ +#define ADF_MAX_QPS_ON_ANY_SERVICE 2 +#define ADF_RING_DIR_TX 0 +#define ADF_RING_DIR_RX 1 + +/* Valid internal msg size values */ +#define ADF_MSG_SIZE_32 0x01 +#define ADF_MSG_SIZE_64 0x02 +#define ADF_MSG_SIZE_128 0x04 +#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 +#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 + +/* Size to bytes conversion macros for ring and msg size values */ +#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) +#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) +#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) +#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) + +/* Minimum ring bufer size for memory allocation */ +#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ + ADF_RING_SIZE_4K : SIZE) +#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) +#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ + SIZE) & ~0x4) +/* Max outstanding requests */ +#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ + ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) +#define BUILD_RING_CONFIG(size) \ + ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ + ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ + | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ + | size) +#define BUILD_RING_BASE_ADDR(addr, size) \ + ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) +#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2)) +#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2)) +#define READ_CSR_E_STAT(csr_base_addr, bank) \ + ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_E_STAT) +#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_CONFIG + (ring << 2), value) +#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ +do { \ + uint32_t l_base = 0, u_base = 0; \ + l_base = (uint32_t)(value & 0xFFFFFFFF); \ + u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ +} while (0) +#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_HEAD + (ring << 2), value) +#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_RING_TAIL + (ring << 2), value) +#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ +do { \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ +} while (0) +#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_EN, value) +#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_COL_CTL, \ + ADF_RING_CSR_INT_COL_CTL_ENABLE | value) +#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ + ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ + ADF_RING_CSR_INT_FLAG_AND_COL, value) + +#endif /*ADF_TRANSPORT_ACCESS_MACROS_H */ diff --git a/drivers/common/qat/qat_adf/icp_qat_fw.h b/drivers/common/qat/qat_adf/icp_qat_fw.h new file mode 100644 index 00000000..8f7cb37b --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw.h @@ -0,0 +1,318 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_H_ +#define _ICP_QAT_FW_H_ +#include +#include "icp_qat_hw.h" + +#define QAT_FIELD_SET(flags, val, bitpos, mask) \ +{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ + (((val) & (mask)) << (bitpos))) ; } + +#define QAT_FIELD_GET(flags, bitpos, mask) \ + (((flags) >> (bitpos)) & (mask)) + +#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 +#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 +#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 +#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF +#define ICP_QAT_FW_NUM_LONGWORDS_1 1 +#define ICP_QAT_FW_NUM_LONGWORDS_2 2 +#define ICP_QAT_FW_NUM_LONGWORDS_3 3 +#define ICP_QAT_FW_NUM_LONGWORDS_4 4 +#define ICP_QAT_FW_NUM_LONGWORDS_5 5 +#define ICP_QAT_FW_NUM_LONGWORDS_6 6 +#define ICP_QAT_FW_NUM_LONGWORDS_7 7 +#define ICP_QAT_FW_NUM_LONGWORDS_10 10 +#define ICP_QAT_FW_NUM_LONGWORDS_13 13 +#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 + +enum icp_qat_fw_comn_resp_serv_id { + ICP_QAT_FW_COMN_RESP_SERV_NULL, + ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, + ICP_QAT_FW_COMN_RESP_SERV_DELIMITER +}; + +enum icp_qat_fw_comn_request_id { + ICP_QAT_FW_COMN_REQ_NULL = 0, + ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, + ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, + ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, + ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, + ICP_QAT_FW_COMN_REQ_DELIMITER +}; + +struct icp_qat_fw_comn_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t serv_specif_fields[4]; + } s1; + } u; +}; + +struct icp_qat_fw_comn_req_mid { + uint64_t opaque_data; + uint64_t src_data_addr; + uint64_t dest_data_addr; + uint32_t src_length; + uint32_t dst_length; +}; + +struct icp_qat_fw_comn_req_cd_ctrl { + uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; +}; + +struct icp_qat_fw_comn_req_hdr { + uint8_t resrvd1; + uint8_t service_cmd_id; + uint8_t service_type; + uint8_t hdr_flags; + uint16_t serv_specif_flags; + uint16_t comn_req_flags; +}; + +struct icp_qat_fw_comn_req_rqpars { + uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; +}; + +struct icp_qat_fw_comn_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +struct icp_qat_fw_comn_error { + uint8_t xlat_err_code; + uint8_t cmp_err_code; +}; + +struct icp_qat_fw_comn_resp_hdr { + uint8_t resrvd1; + uint8_t service_id; + uint8_t response_type; + uint8_t hdr_flags; + struct icp_qat_fw_comn_error comn_error; + uint8_t comn_status; + uint8_t cmd_id; +}; + +struct icp_qat_fw_comn_resp { + struct icp_qat_fw_comn_resp_hdr comn_hdr; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 +#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 +#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F +#define ICP_QAT_FW_COMN_CNV_FLAG_BITPOS 6 +#define ICP_QAT_FW_COMN_CNV_FLAG_MASK 0x1 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS 5 +#define ICP_QAT_FW_COMN_CNVNR_FLAG_MASK 0x1 + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_type + +#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_type = val + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id + +#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ + icp_qat_fw_comn_req_hdr_t.service_cmd_id = val + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ + ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) + +#define ICP_QAT_FW_COMN_HDR_CNVNR_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNVNR_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_CNV_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_CNV_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ + ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) + +#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ + QAT_FIELD_GET(hdr_flags, \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ + (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) + +#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ + QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ + ICP_QAT_FW_COMN_VALID_FLAG_MASK) + +#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ + (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ + ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) + +#define QAT_COMN_PTR_TYPE_BITPOS 0 +#define QAT_COMN_PTR_TYPE_MASK 0x1 +#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 +#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 +#define QAT_COMN_PTR_TYPE_FLAT 0x0 +#define QAT_COMN_PTR_TYPE_SGL 0x1 +#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 +#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 + +#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ + ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ + | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) + +#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ + QAT_COMN_PTR_TYPE_MASK) + +#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ + QAT_COMN_CD_FLD_TYPE_MASK) + +#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 +#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 +#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 +#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F + +#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } + +#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ + { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } + +#define ICP_QAT_FW_COMN_NEXT_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + (((val) << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK)) \ + } while (0) + +#define ICP_QAT_FW_COMN_CURR_ID_SET_2(next_curr_id, val) \ + do { \ + (next_curr_id) = \ + (((next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) \ + } while (0) + +#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 +#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 +#define QAT_COMN_RESP_PKE_STATUS_BITPOS 6 +#define QAT_COMN_RESP_PKE_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 +#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 +#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 +#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 +#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS 2 +#define QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK 0x1 +#define QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS 0 +#define QAT_COMN_RESP_XLT_WA_APPLIED_MASK 0x1 + +#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ + QAT_COMN_RESP_CRYPTO_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_PKE_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_PKE_STATUS_BITPOS, \ + QAT_COMN_RESP_PKE_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ + QAT_COMN_RESP_CMP_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ + QAT_COMN_RESP_XLAT_STATUS_MASK) + +#define ICP_QAT_FW_COMN_RESP_XLT_WA_APPLIED_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_XLT_WA_APPLIED_BITPOS, \ + QAT_COMN_RESP_XLT_WA_APPLIED_MASK) + +#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ + QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) + +#define ICP_QAT_FW_COMN_RESP_UNSUPPORTED_REQUEST_STAT_GET(status) \ + QAT_FIELD_GET(status, QAT_COMN_RESP_UNSUPPORTED_REQUEST_BITPOS, \ + QAT_COMN_RESP_UNSUPPORTED_REQUEST_MASK) + +#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 +#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 +#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 +#define ERR_CODE_NO_ERROR 0 +#define ERR_CODE_INVALID_BLOCK_TYPE -1 +#define ERR_CODE_NO_MATCH_ONES_COMP -2 +#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 +#define ERR_CODE_INCOMPLETE_LEN -4 +#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 +#define ERR_CODE_RPT_GT_SPEC_LEN -6 +#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 +#define ERR_CODE_INV_DIS_CODE_LEN -8 +#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 +#define ERR_CODE_DIS_TOO_FAR_BACK -10 +#define ERR_CODE_OVERFLOW_ERROR -11 +#define ERR_CODE_SOFT_ERROR -12 +#define ERR_CODE_FATAL_ERROR -13 +#define ERR_CODE_COMP_OUTPUT_CORRUPTION -14 +#define ERR_CODE_HW_INCOMPLETE_FILE -15 +#define ERR_CODE_SSM_ERROR -16 +#define ERR_CODE_ENDPOINT_ERROR -17 +#define ERR_CODE_CNV_ERROR -18 +#define ERR_CODE_EMPTY_DYM_BLOCK -19 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_HANDLE -20 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_HMAC_FAILED -21 +#define ERR_CODE_KPT_CRYPTO_SERVICE_FAIL_INVALID_WRAPPING_ALGO -22 +#define ERR_CODE_KPT_DRNG_SEED_NOT_LOAD -23 + +enum icp_qat_fw_slice { + ICP_QAT_FW_SLICE_NULL = 0, + ICP_QAT_FW_SLICE_CIPHER = 1, + ICP_QAT_FW_SLICE_AUTH = 2, + ICP_QAT_FW_SLICE_DRAM_RD = 3, + ICP_QAT_FW_SLICE_DRAM_WR = 4, + ICP_QAT_FW_SLICE_COMP = 5, + ICP_QAT_FW_SLICE_XLAT = 6, + ICP_QAT_FW_SLICE_DELIMITER +}; +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_comp.h b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h new file mode 100644 index 00000000..81381772 --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw_comp.h @@ -0,0 +1,482 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_COMP_H_ +#define _ICP_QAT_FW_COMP_H_ + +#include "icp_qat_fw.h" + +enum icp_qat_fw_comp_cmd_id { + ICP_QAT_FW_COMP_CMD_STATIC = 0, + /*!< Static Compress Request */ + + ICP_QAT_FW_COMP_CMD_DYNAMIC = 1, + /*!< Dynamic Compress Request */ + + ICP_QAT_FW_COMP_CMD_DECOMPRESS = 2, + /*!< Decompress Request */ + + ICP_QAT_FW_COMP_CMD_DELIMITER + /**< Delimiter type */ +}; + +/**< Flag usage */ + +#define ICP_QAT_FW_COMP_STATELESS_SESSION 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateless + */ + +#define ICP_QAT_FW_COMP_STATEFUL_SESSION 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that session is stateful + */ + +#define ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is NOT used + */ + +#define ICP_QAT_FW_COMP_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that enhanced autoselectbest is used + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is DISABLED. + */ + +#define ICP_QAT_FW_COMP_ENABLE_SECURE_RAM_USED_AS_INTMD_BUF 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing secure RAM from being used as + * an intermediate buffer is ENABLED. + */ + +/**< Flag mask & bit position */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS 2 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the session type + */ + +#define ICP_QAT_FW_COMP_SESSION_TYPE_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine the session type + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS 3 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for auto select best + */ + +#define ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS 4 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for enhanced auto select best + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS 5 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for disabling type zero header write back + * when Enhanced autoselect best is enabled. If set firmware does + * not return type0 store block header, only copies src to dest. + * (if best output is Type0) + */ + +#define ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for auto select best + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS 7 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for flag used to disable secure ram from + * being used as an intermediate buffer. + */ + +#define ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for disable secure ram for use as an intermediate + * buffer. + */ + +#define ICP_QAT_FW_COMP_FLAGS_BUILD(sesstype, autoselect, enhanced_asb, \ + ret_uncomp, secure_ram) \ + ((((sesstype)&ICP_QAT_FW_COMP_SESSION_TYPE_MASK) \ + << ICP_QAT_FW_COMP_SESSION_TYPE_BITPOS) | \ + (((autoselect)&ICP_QAT_FW_COMP_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_AUTO_SELECT_BEST_BITPOS) | \ + (((enhanced_asb)&ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_MASK) \ + << ICP_QAT_FW_COMP_ENHANCED_AUTO_SELECT_BEST_BITPOS) | \ + (((ret_uncomp)&ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_MASK) \ + << ICP_QAT_FW_COMP_RET_DISABLE_TYPE0_HEADER_DATA_BITPOS) | \ + (((secure_ram)&ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_MASK) \ + << ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_AS_INTMD_BUF_BITPOS)) + +union icp_qat_fw_comp_req_hdr_cd_pars { + /**< LWs 2-5 */ + struct { + uint64_t content_desc_addr; + /**< Address of the content descriptor */ + + uint16_t content_desc_resrvd1; + /**< Content descriptor reserved field */ + + uint8_t content_desc_params_sz; + /**< Size of the content descriptor parameters in quad words. + * These parameters describe the session setup configuration + * info for the slices that this request relies upon i.e. + * the configuration word and cipher key needed by the cipher + * slice if there is a request for cipher processing. + */ + + uint8_t content_desc_hdr_resrvd2; + /**< Content descriptor reserved field */ + + uint32_t content_desc_resrvd3; + /**< Content descriptor reserved field */ + } s; + + struct { + uint32_t comp_slice_cfg_word[ICP_QAT_FW_NUM_LONGWORDS_2]; + /* Compression Slice Config Word */ + + uint32_t content_desc_resrvd4; + /**< Content descriptor reserved field */ + + } sl; + +}; + +struct icp_qat_fw_comp_req_params { + /**< LW 14 */ + uint32_t comp_len; + /**< Size of input to process in bytes Note: Only EOP requests can be + * odd for decompression. IA must set LSB to zero for odd sized + * intermediate inputs + */ + + /**< LW 15 */ + uint32_t out_buffer_sz; + /**< Size of output buffer in bytes */ + + /**< LW 16 */ + uint32_t initial_crc32; + /**< CRC of previously processed bytes */ + + /**< LW 17 */ + uint32_t initial_adler; + /**< Adler of previously processed bytes */ + + /**< LW 18 */ + uint32_t req_par_flags; + + /**< LW 19 */ + uint32_t rsrvd; +}; + +#define ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(sop, eop, bfinal, cnv, cnvnr) \ + ((((sop)&ICP_QAT_FW_COMP_SOP_MASK) << ICP_QAT_FW_COMP_SOP_BITPOS) | \ + (((eop)&ICP_QAT_FW_COMP_EOP_MASK) << ICP_QAT_FW_COMP_EOP_BITPOS) | \ + (((bfinal)&ICP_QAT_FW_COMP_BFINAL_MASK) \ + << ICP_QAT_FW_COMP_BFINAL_BITPOS) | \ + ((cnv & ICP_QAT_FW_COMP_CNV_MASK) << ICP_QAT_FW_COMP_CNV_BITPOS) | \ + ((cnvnr & ICP_QAT_FW_COMP_CNV_RECOVERY_MASK) \ + << ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS)) + +#define ICP_QAT_FW_COMP_NOT_SOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_SOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS Start of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_EOP 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request is NOT Start of Packet + */ + +#define ICP_QAT_FW_COMP_EOP 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing that a request IS End of Packet + */ + +#define ICP_QAT_FW_COMP_NOT_BFINAL 0 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is not the last block + */ + +#define ICP_QAT_FW_COMP_BFINAL 1 +/**< @ingroup icp_qat_fw_comp + * Flag representing to indicate firmware this is the last block + */ + +#define ICP_QAT_FW_COMP_NO_CNV 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv check is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv check IS to be performed on the request + */ + +#define ICP_QAT_FW_COMP_NO_CNV_RECOVERY 0 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that NO cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY 1 +/**< @ingroup icp_qat_fw_comp + * Flag indicating that a cnv recovery is to be performed on the request + */ + +#define ICP_QAT_FW_COMP_SOP_BITPOS 0 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for SOP + */ + +#define ICP_QAT_FW_COMP_SOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine SOP + */ + +#define ICP_QAT_FW_COMP_EOP_BITPOS 1 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for EOP + */ + +#define ICP_QAT_FW_COMP_EOP_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask used to determine EOP + */ + +#define ICP_QAT_FW_COMP_BFINAL_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_BFINAL_BITPOS 6 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the bfinal bit + */ + +#define ICP_QAT_FW_COMP_CNV_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_BITPOS 16 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_MASK 0x1 +/**< @ingroup icp_qat_fw_comp + * One bit mask for the CNV Recovery bit + */ + +#define ICP_QAT_FW_COMP_CNV_RECOVERY_BITPOS 17 +/**< @ingroup icp_qat_fw_comp + * Starting bit position for the CNV Recovery bit + */ + +struct icp_qat_fw_xlt_req_params { + /**< LWs 20-21 */ + uint64_t inter_buff_ptr; + /**< This field specifies the physical address of an intermediate + * buffer SGL array. The array contains a pair of 64-bit + * intermediate buffer pointers to SGL buffer descriptors, one pair + * per CPM. Please refer to the CPM1.6 Firmware Interface HLD + * specification for more details. + */ +}; + + +struct icp_qat_fw_comp_cd_hdr { + /**< LW 24 */ + uint16_t ram_bank_flags; + /**< Flags to show which ram banks to access */ + + uint8_t comp_cfg_offset; + /**< Quad word offset from the content descriptor parameters address + * to the parameters for the compression processing + */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the compressed data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * anymore slices after compression + * Current Id: Initialised with the compression slice type + */ + + /**< LW 25 */ + uint32_t resrvd; + /**< LWs 26-27 */ + + uint64_t comp_state_addr; + /**< Pointer to compression state */ + + /**< LWs 28-29 */ + uint64_t ram_banks_addr; + /**< Pointer to banks */ + +}; + + +struct icp_qat_fw_xlt_cd_hdr { + /**< LW 30 */ + uint16_t resrvd1; + /**< Reserved field and assumed set to 0 */ + + uint8_t resrvd2; + /**< Reserved field and assumed set to 0 */ + + uint8_t next_curr_id; + /**< This field combines the next and current id (each four bits) - + * the next id is the most significant nibble. + * Next Id: Set to the next slice to pass the translated data through. + * Set to ICP_QAT_FW_SLICE_DRAM_WR if the data is not to go through + * any more slices after compression + * Current Id: Initialised with the translation slice type + */ + + /**< LW 31 */ + uint32_t resrvd3; + /**< Reserved and should be set to zero, needed for quadword + * alignment + */ +}; + +struct icp_qat_fw_comp_req { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_req_hdr comn_hdr; + /**< Common request header - for Service Command Id, + * use service-specific Compression Command Id. + * Service Specific Flags - use Compression Command Flags + */ + + /**< LWs 2-5 */ + union icp_qat_fw_comp_req_hdr_cd_pars cd_pars; + /**< Compression service-specific content descriptor field which points + * either to a content descriptor parameter block or contains the + * compression slice config word. + */ + + /**< LWs 6-13 */ + struct icp_qat_fw_comn_req_mid comn_mid; + /**< Common request middle section */ + + /**< LWs 14-19 */ + struct icp_qat_fw_comp_req_params comp_pars; + /**< Compression request Parameters block */ + + /**< LWs 20-21 */ + union { + struct icp_qat_fw_xlt_req_params xlt_pars; + /**< Translation request Parameters block */ + uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + + } u1; + + /**< LWs 22-23 */ + union { + uint32_t resrvd2[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved - not used if Batch and Pack is disabled.*/ + + uint64_t bnp_res_table_addr; + /**< A generic pointer to the unbounded list of + * icp_qat_fw_resp_comp_pars members. This pointer is only + * used when the Batch and Pack is enabled. + */ + } u3; + + /**< LWs 24-29 */ + struct icp_qat_fw_comp_cd_hdr comp_cd_ctrl; + /**< Compression request content descriptor control block header */ + + /**< LWs 30-31 */ + union { + struct icp_qat_fw_xlt_cd_hdr xlt_cd_ctrl; + /**< Translation request content descriptor + * control block header + */ + + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_2]; + /**< Reserved if not used for translation */ + } u2; +}; + +struct icp_qat_fw_resp_comp_pars { + /**< LW 4 */ + uint32_t input_byte_counter; + /**< Input byte counter */ + + /**< LW 5 */ + uint32_t output_byte_counter; + /**< Output byte counter */ + + /**< LW 6 & 7*/ + union { + uint64_t curr_chksum; + struct { + /**< LW 6 */ + uint32_t curr_crc32; + /**< LW 7 */ + uint32_t curr_adler_32; + }; + }; +}; + +struct icp_qat_fw_comp_resp { + /**< LWs 0-1 */ + struct icp_qat_fw_comn_resp_hdr comn_resp; + /**< Common interface response format see icp_qat_fw.h */ + + /**< LWs 2-3 */ + uint64_t opaque_data; + /**< Opaque data passed from the request to the response message */ + + /**< LWs 4-7 */ + struct icp_qat_fw_resp_comp_pars comp_resp_pars; + /**< Common response params (checksums and byte counts) */ +}; + +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_fw_la.h b/drivers/common/qat/qat_adf/icp_qat_fw_la.h new file mode 100644 index 00000000..c33bc3fe --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_fw_la.h @@ -0,0 +1,361 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_FW_LA_H_ +#define _ICP_QAT_FW_LA_H_ +#include "icp_qat_fw.h" + +enum icp_qat_fw_la_cmd_id { + ICP_QAT_FW_LA_CMD_CIPHER = 0, + ICP_QAT_FW_LA_CMD_AUTH = 1, + ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, + ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, + ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, + ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, + ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, + ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, + ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, + ICP_QAT_FW_LA_CMD_MGF1 = 9, + ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, + ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, + ICP_QAT_FW_LA_CMD_DELIMITER = 12 +}; + +#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR +#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK +#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR + +struct icp_qat_fw_la_bulk_req { + struct icp_qat_fw_comn_req_hdr comn_hdr; + struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; + struct icp_qat_fw_comn_req_mid comn_mid; + struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; + struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; +}; + +#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 +#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 +#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 +#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 +#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 +#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 +#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 +#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 +#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 +#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 +#define ICP_QAT_FW_LA_GCM_PROTO 2 +#define ICP_QAT_FW_LA_CCM_PROTO 1 +#define ICP_QAT_FW_LA_NO_PROTO 0 +#define QAT_LA_PROTO_BITPOS 7 +#define QAT_LA_PROTO_MASK 0x7 +#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 +#define QAT_LA_CMP_AUTH_RES_BITPOS 6 +#define QAT_LA_CMP_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_RET_AUTH_RES 1 +#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 +#define QAT_LA_RET_AUTH_RES_BITPOS 5 +#define QAT_LA_RET_AUTH_RES_MASK 0x1 +#define ICP_QAT_FW_LA_UPDATE_STATE 1 +#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 +#define QAT_LA_UPDATE_STATE_BITPOS 4 +#define QAT_LA_UPDATE_STATE_MASK 0x1 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 +#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 +#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 +#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 +#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 +#define QAT_LA_CIPH_IV_FLD_BITPOS 2 +#define QAT_LA_CIPH_IV_FLD_MASK 0x1 +#define ICP_QAT_FW_LA_PARTIAL_NONE 0 +#define ICP_QAT_FW_LA_PARTIAL_START 1 +#define ICP_QAT_FW_LA_PARTIAL_MID 3 +#define ICP_QAT_FW_LA_PARTIAL_END 2 +#define QAT_LA_PARTIAL_BITPOS 0 +#define QAT_LA_PARTIAL_MASK 0x3 +#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ + cmp_auth, ret_auth, update_state, \ + ciph_iv, ciphcfg, partial) \ + (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ + ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ + QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ + ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ + QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ + ((proto & QAT_LA_PROTO_MASK) << \ + QAT_LA_PROTO_BITPOS) | \ + ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ + QAT_LA_CMP_AUTH_RES_BITPOS) | \ + ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ + QAT_LA_RET_AUTH_RES_BITPOS) | \ + ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ + QAT_LA_UPDATE_STATE_BITPOS) | \ + ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ + QAT_LA_CIPH_IV_FLD_BITPOS) | \ + ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ + ((partial & QAT_LA_PARTIAL_MASK) << \ + QAT_LA_PARTIAL_BITPOS)) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ + QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ + QAT_LA_CIPH_IV_FLD_MASK) + +#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ + QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) + +#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ + QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) + +#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ + QAT_LA_GCM_IV_LEN_FLAG_MASK) + +#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ + QAT_LA_PROTO_MASK) + +#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ + QAT_LA_CMP_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ + QAT_LA_RET_AUTH_RES_MASK) + +#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ + QAT_LA_DIGEST_IN_BUFFER_MASK) + +#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ + QAT_LA_UPDATE_STATE_MASK) + +#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ + QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ + QAT_LA_PARTIAL_MASK) + +struct icp_qat_fw_cipher_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } s1; + } u; +}; + +struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { + union { + struct { + uint64_t content_desc_addr; + uint16_t content_desc_resrvd1; + uint8_t content_desc_params_sz; + uint8_t content_desc_hdr_resrvd2; + uint32_t content_desc_resrvd3; + } s; + struct { + uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + } sl; + } u; +}; + +struct icp_qat_fw_cipher_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id; + uint8_t cipher_padding_sz; + uint8_t resrvd1; + uint16_t resrvd2; + uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; +}; + +struct icp_qat_fw_auth_cd_ctrl_hdr { + uint32_t resrvd1; + uint8_t resrvd2; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id; + uint8_t resrvd3; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd4; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { + uint8_t cipher_state_sz; + uint8_t cipher_key_sz; + uint8_t cipher_cfg_offset; + uint8_t next_curr_id_cipher; + uint8_t cipher_padding_sz; + uint8_t hash_flags; + uint8_t hash_cfg_offset; + uint8_t next_curr_id_auth; + uint8_t resrvd1; + uint8_t outer_prefix_sz; + uint8_t final_sz; + uint8_t inner_res_sz; + uint8_t resrvd2; + uint8_t inner_state1_sz; + uint8_t inner_state2_offset; + uint8_t inner_state2_sz; + uint8_t outer_config_offset; + uint8_t outer_state1_sz; + uint8_t outer_res_sz; + uint8_t outer_prefix_offset; +}; + +#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 +#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 +#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 +#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ + (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) +#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) + +struct icp_qat_fw_la_cipher_req_params { + uint32_t cipher_offset; + uint32_t cipher_length; + union { + uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; + struct { + uint64_t cipher_IV_ptr; + uint64_t resrvd1; + } s; + } u; +}; + +struct icp_qat_fw_la_auth_req_params { + uint32_t auth_off; + uint32_t auth_len; + union { + uint64_t auth_partial_st_prefix; + uint64_t aad_adr; + } u1; + uint64_t auth_res_addr; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint8_t hash_state_sz; + uint8_t auth_res_sz; +} __rte_packed; + +struct icp_qat_fw_la_auth_req_params_resrvd_flds { + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; + union { + uint8_t inner_prefix_sz; + uint8_t aad_sz; + } u2; + uint8_t resrvd1; + uint16_t resrvd2; +}; + +struct icp_qat_fw_la_resp { + struct icp_qat_fw_comn_resp_hdr comn_resp; + uint64_t opaque_data; + uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; +}; + +#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ + ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ + ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ + >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) + +#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ + ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } + +#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ + (((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_CURR_ID_MASK) + +#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ +{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ + ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ + & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ + ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } + +#endif diff --git a/drivers/common/qat/qat_adf/icp_qat_hw.h b/drivers/common/qat/qat_adf/icp_qat_hw.h new file mode 100644 index 00000000..e7961dba --- /dev/null +++ b/drivers/common/qat/qat_adf/icp_qat_hw.h @@ -0,0 +1,386 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _ICP_QAT_HW_H_ +#define _ICP_QAT_HW_H_ + +enum icp_qat_hw_ae_id { + ICP_QAT_HW_AE_0 = 0, + ICP_QAT_HW_AE_1 = 1, + ICP_QAT_HW_AE_2 = 2, + ICP_QAT_HW_AE_3 = 3, + ICP_QAT_HW_AE_4 = 4, + ICP_QAT_HW_AE_5 = 5, + ICP_QAT_HW_AE_6 = 6, + ICP_QAT_HW_AE_7 = 7, + ICP_QAT_HW_AE_8 = 8, + ICP_QAT_HW_AE_9 = 9, + ICP_QAT_HW_AE_10 = 10, + ICP_QAT_HW_AE_11 = 11, + ICP_QAT_HW_AE_DELIMITER = 12 +}; + +enum icp_qat_hw_qat_id { + ICP_QAT_HW_QAT_0 = 0, + ICP_QAT_HW_QAT_1 = 1, + ICP_QAT_HW_QAT_2 = 2, + ICP_QAT_HW_QAT_3 = 3, + ICP_QAT_HW_QAT_4 = 4, + ICP_QAT_HW_QAT_5 = 5, + ICP_QAT_HW_QAT_DELIMITER = 6 +}; + +enum icp_qat_hw_auth_algo { + ICP_QAT_HW_AUTH_ALGO_NULL = 0, + ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, + ICP_QAT_HW_AUTH_ALGO_MD5 = 2, + ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, + ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, + ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, + ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, + ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, + ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, + ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, + ICP_QAT_HW_AUTH_RESERVED_1 = 15, + ICP_QAT_HW_AUTH_RESERVED_2 = 16, + ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, + ICP_QAT_HW_AUTH_RESERVED_3 = 18, + ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, + ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 +}; + +enum icp_qat_hw_auth_mode { + ICP_QAT_HW_AUTH_MODE0 = 0, + ICP_QAT_HW_AUTH_MODE1 = 1, + ICP_QAT_HW_AUTH_MODE2 = 2, + ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 +}; + +struct icp_qat_hw_auth_config { + uint32_t config; + uint32_t reserved; +}; + +#define QAT_AUTH_MODE_BITPOS 4 +#define QAT_AUTH_MODE_MASK 0xF +#define QAT_AUTH_ALGO_BITPOS 0 +#define QAT_AUTH_ALGO_MASK 0xF +#define QAT_AUTH_CMP_BITPOS 8 +#define QAT_AUTH_CMP_MASK 0x7F +#define QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS 16 +#define QAT_AUTH_SHA3_PADDING_DISABLE_MASK 0x1 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS 17 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK 0x1 +#define QAT_AUTH_ALGO_SHA3_BITPOS 22 +#define QAT_AUTH_ALGO_SHA3_MASK 0x3 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS 16 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK 0xF +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS 24 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK 0xFF +#define QAT_AUTH_SHA3_HW_PADDING_ENABLE 0 +#define QAT_AUTH_SHA3_HW_PADDING_DISABLE 1 +#define QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT 0 +#define QAT_AUTH_SHA3_PADDING_OVERRIDE_PROGRAMMABLE 1 +#define QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED 0 +#define QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED 0 + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ + ((((mode) & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ + (((algo) & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ + (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) \ + << QAT_AUTH_ALGO_SHA3_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_DISABLE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_DISABLE_MASK) \ + << QAT_AUTH_SHA3_PADDING_DISABLE_BITPOS) | \ + (((QAT_AUTH_SHA3_PADDING_OVERRIDE_USE_DEFAULT) & \ + QAT_AUTH_SHA3_PADDING_OVERRIDE_MASK) \ + << QAT_AUTH_SHA3_PADDING_OVERRIDE_BITPOS) | \ + (((cmp_len) & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) + +#define ICP_QAT_HW_AUTH_CONFIG_BUILD_UPPER \ + ((((QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_POSTFIX_BITPOS) | \ + (((QAT_AUTH_SHA3_PROG_PADDING_PREFIX_RESERVED) & \ + QAT_AUTH_SHA3_PROG_PADDING_PREFIX_MASK) \ + << QAT_AUTH_SHA3_PROG_PADDING_PREFIX_BITPOS)) + +struct icp_qat_hw_auth_counter { + uint32_t counter; + uint32_t reserved; +}; + +#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF +#define QAT_AUTH_COUNT_BITPOS 0 +#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ + (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) + +struct icp_qat_hw_auth_setup { + struct icp_qat_hw_auth_config auth_config; + struct icp_qat_hw_auth_counter auth_counter; +}; + +#define QAT_HW_DEFAULT_ALIGNMENT 8 +#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) +#define ICP_QAT_HW_NULL_STATE1_SZ 32 +#define ICP_QAT_HW_MD5_STATE1_SZ 16 +#define ICP_QAT_HW_SHA1_STATE1_SZ 20 +#define ICP_QAT_HW_SHA224_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 +#define ICP_QAT_HW_SHA256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 +#define ICP_QAT_HW_SHA384_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 +#define ICP_QAT_HW_SHA512_STATE1_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 +#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 +#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 +#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 + +#define ICP_QAT_HW_NULL_STATE2_SZ 32 +#define ICP_QAT_HW_MD5_STATE2_SZ 16 +#define ICP_QAT_HW_SHA1_STATE2_SZ 20 +#define ICP_QAT_HW_SHA224_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 +#define ICP_QAT_HW_SHA256_STATE2_SZ 32 +#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 +#define ICP_QAT_HW_SHA384_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 +#define ICP_QAT_HW_SHA512_STATE2_SZ 64 +#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 +#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 +#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 +#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 +#define ICP_QAT_HW_F9_IK_SZ 16 +#define ICP_QAT_HW_F9_FK_SZ 16 +#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ + ICP_QAT_HW_F9_FK_SZ) +#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ +#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 +#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 +#define ICP_QAT_HW_GALOIS_H_SZ 16 +#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 +#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 + +struct icp_qat_hw_auth_sha512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; + uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; +}; + +struct icp_qat_hw_auth_sha3_512 { + struct icp_qat_hw_auth_setup inner_setup; + uint8_t state1[ICP_QAT_HW_SHA3_512_STATE1_SZ]; + struct icp_qat_hw_auth_setup outer_setup; +}; + +struct icp_qat_hw_auth_algo_blk { + struct icp_qat_hw_auth_sha512 sha; +}; + +#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 +#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF + +enum icp_qat_hw_cipher_algo { + ICP_QAT_HW_CIPHER_ALGO_NULL = 0, + ICP_QAT_HW_CIPHER_ALGO_DES = 1, + ICP_QAT_HW_CIPHER_ALGO_3DES = 2, + ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, + ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, + ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, + ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, + ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, + ICP_QAT_HW_CIPHER_DELIMITER = 10 +}; + +enum icp_qat_hw_cipher_mode { + ICP_QAT_HW_CIPHER_ECB_MODE = 0, + ICP_QAT_HW_CIPHER_CBC_MODE = 1, + ICP_QAT_HW_CIPHER_CTR_MODE = 2, + ICP_QAT_HW_CIPHER_F8_MODE = 3, + ICP_QAT_HW_CIPHER_XTS_MODE = 6, + ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 +}; + +struct icp_qat_hw_cipher_config { + uint32_t val; + uint32_t reserved; +}; + +enum icp_qat_hw_cipher_dir { + ICP_QAT_HW_CIPHER_ENCRYPT = 0, + ICP_QAT_HW_CIPHER_DECRYPT = 1, +}; + +enum icp_qat_hw_auth_op { + ICP_QAT_HW_AUTH_VERIFY = 0, + ICP_QAT_HW_AUTH_GENERATE = 1, +}; + +enum icp_qat_hw_cipher_convert { + ICP_QAT_HW_CIPHER_NO_CONVERT = 0, + ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, +}; + +#define QAT_CIPHER_MODE_BITPOS 4 +#define QAT_CIPHER_MODE_MASK 0xF +#define QAT_CIPHER_ALGO_BITPOS 0 +#define QAT_CIPHER_ALGO_MASK 0xF +#define QAT_CIPHER_CONVERT_BITPOS 9 +#define QAT_CIPHER_CONVERT_MASK 0x1 +#define QAT_CIPHER_DIR_BITPOS 8 +#define QAT_CIPHER_DIR_MASK 0x1 +#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 +#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 +#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ + (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ + ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ + ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ + ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) +#define ICP_QAT_HW_DES_BLK_SZ 8 +#define ICP_QAT_HW_3DES_BLK_SZ 8 +#define ICP_QAT_HW_NULL_BLK_SZ 8 +#define ICP_QAT_HW_AES_BLK_SZ 16 +#define ICP_QAT_HW_KASUMI_BLK_SZ 8 +#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 +#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 +#define ICP_QAT_HW_NULL_KEY_SZ 256 +#define ICP_QAT_HW_DES_KEY_SZ 8 +#define ICP_QAT_HW_3DES_KEY_SZ 24 +#define ICP_QAT_HW_AES_128_KEY_SZ 16 +#define ICP_QAT_HW_AES_192_KEY_SZ 24 +#define ICP_QAT_HW_AES_256_KEY_SZ 32 +#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_KASUMI_KEY_SZ 16 +#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ + QAT_CIPHER_MODE_F8_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ + QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) +#define ICP_QAT_HW_ARC4_KEY_SZ 256 +#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 +#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 +#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 +#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 + +#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ + +/* These defines describe position of the bit-fields + * in the flags byte in B0 + */ +#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6 +#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3 + +#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \ + ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \ + | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \ + | ((q) - 1)) + +#define ICP_QAT_HW_CCM_NQ_CONST 15 +#define ICP_QAT_HW_CCM_AAD_B0_LEN 16 +#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2 +#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \ + ICP_QAT_HW_CCM_AAD_LEN_INFO) +#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16 +#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4 +#define ICP_QAT_HW_CCM_NONCE_OFFSET 1 + +struct icp_qat_hw_cipher_algo_blk { + struct icp_qat_hw_cipher_config cipher_config; + uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; +} __rte_cache_aligned; + +/* ========================================================================= */ +/* COMPRESSION SLICE */ +/* ========================================================================= */ + +enum icp_qat_hw_compression_direction { + ICP_QAT_HW_COMPRESSION_DIR_COMPRESS = 0, + ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS = 1, + ICP_QAT_HW_COMPRESSION_DIR_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_delayed_match { + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED = 0, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED = 1, + ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DELIMITER = 2 +}; + +enum icp_qat_hw_compression_algo { + ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE = 0, + ICP_QAT_HW_COMPRESSION_ALGO_LZS = 1, + ICP_QAT_HW_COMPRESSION_ALGO_DELIMITER = 2 +}; + + +enum icp_qat_hw_compression_depth { + ICP_QAT_HW_COMPRESSION_DEPTH_1 = 0, + ICP_QAT_HW_COMPRESSION_DEPTH_4 = 1, + ICP_QAT_HW_COMPRESSION_DEPTH_8 = 2, + ICP_QAT_HW_COMPRESSION_DEPTH_16 = 3, + ICP_QAT_HW_COMPRESSION_DEPTH_DELIMITER = 4 +}; + +enum icp_qat_hw_compression_file_type { + ICP_QAT_HW_COMPRESSION_FILE_TYPE_0 = 0, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_1 = 1, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_2 = 2, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_3 = 3, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_4 = 4, + ICP_QAT_HW_COMPRESSION_FILE_TYPE_DELIMITER = 5 +}; + +struct icp_qat_hw_compression_config { + uint32_t val; + uint32_t reserved; +}; + +#define QAT_COMPRESSION_DIR_BITPOS 4 +#define QAT_COMPRESSION_DIR_MASK 0x7 +#define QAT_COMPRESSION_DELAYED_MATCH_BITPOS 16 +#define QAT_COMPRESSION_DELAYED_MATCH_MASK 0x1 +#define QAT_COMPRESSION_ALGO_BITPOS 31 +#define QAT_COMPRESSION_ALGO_MASK 0x1 +#define QAT_COMPRESSION_DEPTH_BITPOS 28 +#define QAT_COMPRESSION_DEPTH_MASK 0x7 +#define QAT_COMPRESSION_FILE_TYPE_BITPOS 24 +#define QAT_COMPRESSION_FILE_TYPE_MASK 0xF + +#define ICP_QAT_HW_COMPRESSION_CONFIG_BUILD( \ + dir, delayed, algo, depth, filetype) \ + ((((dir) & QAT_COMPRESSION_DIR_MASK) << QAT_COMPRESSION_DIR_BITPOS) | \ + (((delayed) & QAT_COMPRESSION_DELAYED_MATCH_MASK) \ + << QAT_COMPRESSION_DELAYED_MATCH_BITPOS) | \ + (((algo) & QAT_COMPRESSION_ALGO_MASK) \ + << QAT_COMPRESSION_ALGO_BITPOS) | \ + (((depth) & QAT_COMPRESSION_DEPTH_MASK) \ + << QAT_COMPRESSION_DEPTH_BITPOS) | \ + (((filetype) & QAT_COMPRESSION_FILE_TYPE_MASK) \ + << QAT_COMPRESSION_FILE_TYPE_BITPOS)) + +#endif diff --git a/drivers/common/qat/qat_common.c b/drivers/common/qat/qat_common.c new file mode 100644 index 00000000..47538669 --- /dev/null +++ b/drivers/common/qat/qat_common.c @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include "qat_common.h" +#include "qat_device.h" +#include "qat_logs.h" + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs) +{ + int res = -EINVAL; + uint32_t buf_len, nr; + struct qat_sgl *list = (struct qat_sgl *)list_in; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + uint8_t *virt_addr[max_segs]; +#endif + + for (nr = buf_len = 0; buf && nr < max_segs; buf = buf->next) { + if (offset >= rte_pktmbuf_data_len(buf)) { + offset -= rte_pktmbuf_data_len(buf); + continue; + } + + list->buffers[nr].len = rte_pktmbuf_data_len(buf) - offset; + list->buffers[nr].resrvd = 0; + list->buffers[nr].addr = rte_pktmbuf_iova_offset(buf, offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + virt_addr[nr] = rte_pktmbuf_mtod_offset(buf, uint8_t*, offset); +#endif + offset = 0; + buf_len += list->buffers[nr].len; + + if (buf_len >= data_len) { + list->buffers[nr].len -= buf_len - data_len; + res = 0; + break; + } + ++nr; + } + + if (unlikely(res != 0)) { + if (nr == max_segs) { + QAT_DP_LOG(ERR, "Exceeded max segments in QAT SGL (%u)", + max_segs); + } else { + QAT_DP_LOG(ERR, "Mbuf chain is too short"); + } + } else { + + list->num_bufs = ++nr; +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_LOG(INFO, "SGL with %d buffers:", list->num_bufs); + for (nr = 0; nr < list->num_bufs; nr++) { + QAT_DP_LOG(INFO, + "QAT SGL buf %d, len = %d, iova = 0x%012"PRIx64, + nr, list->buffers[nr].len, + list->buffers[nr].addr); + QAT_DP_HEXDUMP_LOG(DEBUG, "qat SGL", + virt_addr[nr], + list->buffers[nr].len); + } +#endif + } + + return res; +} + +void qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (stats == NULL || dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: stats %p, dev %p, service %d", + stats, dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + + stats->enqueued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; + stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; + } +} + +void qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service) +{ + int i; + struct qat_qp **qp; + + if (dev == NULL || service >= QAT_SERVICE_INVALID) { + QAT_LOG(ERR, "invalid param: dev %p, service %d", + dev, service); + return; + } + + qp = dev->qps_in_use[service]; + for (i = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) { + if (qp[i] == NULL) { + QAT_LOG(DEBUG, "Service %d Uninitialised qp %d", + service, i); + continue; + } + memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); + } + + QAT_LOG(DEBUG, "QAT: %d stats cleared", service); +} diff --git a/drivers/common/qat/qat_common.h b/drivers/common/qat/qat_common.h new file mode 100644 index 00000000..d4bef539 --- /dev/null +++ b/drivers/common/qat/qat_common.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_COMMON_H_ +#define _QAT_COMMON_H_ + +#include + +#include + +/**< Intel(R) QAT device name for PCI registration */ +#define QAT_PCI_NAME qat +#define QAT_64_BTYE_ALIGN_MASK (~0x3f) + +/* Intel(R) QuickAssist Technology device generation is enumerated + * from one according to the generation of the device + */ +enum qat_device_gen { + QAT_GEN1 = 1, + QAT_GEN2 +}; + +enum qat_service_type { + QAT_SERVICE_ASYMMETRIC = 0, + QAT_SERVICE_SYMMETRIC, + QAT_SERVICE_COMPRESSION, + QAT_SERVICE_INVALID +}; + +#define QAT_MAX_SERVICES (QAT_SERVICE_INVALID) + +/**< Common struct for scatter-gather list operations */ +struct qat_flat_buf { + uint32_t len; + uint32_t resrvd; + uint64_t addr; +} __rte_packed; + +#define qat_sgl_hdr struct { \ + uint64_t resrvd; \ + uint32_t num_bufs; \ + uint32_t num_mapped_bufs; \ +} + +__extension__ +struct qat_sgl { + qat_sgl_hdr; + /* flexible array of flat buffers*/ + struct qat_flat_buf buffers[0]; +} __rte_packed __rte_cache_aligned; + +/** Common, i.e. not service-specific, statistics */ +struct qat_common_stats { + uint64_t enqueued_count; + /**< Count of all operations enqueued */ + uint64_t dequeued_count; + /**< Count of all operations dequeued */ + + uint64_t enqueue_err_count; + /**< Total error count on operations enqueued */ + uint64_t dequeue_err_count; + /**< Total error count on operations dequeued */ +}; + +struct qat_pci_device; + +int +qat_sgl_fill_array(struct rte_mbuf *buf, int64_t offset, + void *list_in, uint32_t data_len, + const uint16_t max_segs); +void +qat_stats_get(struct qat_pci_device *dev, + struct qat_common_stats *stats, + enum qat_service_type service); +void +qat_stats_reset(struct qat_pci_device *dev, + enum qat_service_type service); + +#endif /* _QAT_COMMON_H_ */ diff --git a/drivers/common/qat/qat_device.c b/drivers/common/qat/qat_device.c new file mode 100644 index 00000000..f32d7235 --- /dev/null +++ b/drivers/common/qat/qat_device.c @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include + +#include "qat_device.h" +#include "adf_transport_access_macros.h" +#include "qat_sym_pmd.h" + +/* Hardware device information per generation */ +__extension__ +struct qat_gen_hw_data qat_gen_config[] = { + [QAT_GEN1] = { + .dev_gen = QAT_GEN1, + .qp_hw_data = qat_gen1_qps, + }, + [QAT_GEN2] = { + .dev_gen = QAT_GEN2, + .qp_hw_data = qat_gen1_qps, + /* gen2 has same ring layout as gen1 */ + }, +}; + + +static struct qat_pci_device qat_pci_devices[RTE_PMD_QAT_MAX_PCI_DEVICES]; +static int qat_nb_pci_devices; + +/* + * The set of PCI devices this driver supports + */ + +static const struct rte_pci_id pci_id_qat_map[] = { + { + RTE_PCI_DEVICE(0x8086, 0x0443), + }, + { + RTE_PCI_DEVICE(0x8086, 0x37c9), + }, + { + RTE_PCI_DEVICE(0x8086, 0x19e3), + }, + { + RTE_PCI_DEVICE(0x8086, 0x6f55), + }, + {.device_id = 0}, +}; + + +static struct qat_pci_device * +qat_pci_get_dev(uint8_t dev_id) +{ + return &qat_pci_devices[dev_id]; +} + +static struct qat_pci_device * +qat_pci_get_named_dev(const char *name) +{ + struct qat_pci_device *dev; + unsigned int i; + + if (name == NULL) + return NULL; + + for (i = 0; i < RTE_PMD_QAT_MAX_PCI_DEVICES; i++) { + dev = &qat_pci_devices[i]; + + if ((dev->attached == QAT_ATTACHED) && + (strcmp(dev->name, name) == 0)) + return dev; + } + + return NULL; +} + +static uint8_t +qat_pci_find_free_device_index(void) +{ + uint8_t dev_id; + + for (dev_id = 0; dev_id < RTE_PMD_QAT_MAX_PCI_DEVICES; dev_id++) { + if (qat_pci_devices[dev_id].attached == QAT_DETACHED) + break; + } + return dev_id; +} + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev) +{ + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + return qat_pci_get_named_dev(name); +} + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + uint8_t qat_dev_id; + char name[QAT_DEV_NAME_MAX_LEN]; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + if (qat_pci_get_named_dev(name) != NULL) { + QAT_LOG(ERR, "QAT device with name %s already allocated!", + name); + return NULL; + } + + qat_dev_id = qat_pci_find_free_device_index(); + if (qat_dev_id == RTE_PMD_QAT_MAX_PCI_DEVICES) { + QAT_LOG(ERR, "Reached maximum number of QAT devices"); + return NULL; + } + + qat_dev = qat_pci_get_dev(qat_dev_id); + memset(qat_dev, 0, sizeof(*qat_dev)); + strlcpy(qat_dev->name, name, QAT_DEV_NAME_MAX_LEN); + qat_dev->qat_dev_id = qat_dev_id; + qat_dev->pci_dev = pci_dev; + switch (qat_dev->pci_dev->id.device_id) { + case 0x0443: + qat_dev->qat_dev_gen = QAT_GEN1; + break; + case 0x37c9: + case 0x19e3: + case 0x6f55: + qat_dev->qat_dev_gen = QAT_GEN2; + break; + default: + QAT_LOG(ERR, "Invalid dev_id, can't determine generation"); + return NULL; + } + + rte_spinlock_init(&qat_dev->arb_csr_lock); + + qat_dev->attached = QAT_ATTACHED; + + qat_nb_pci_devices++; + + QAT_LOG(DEBUG, "QAT device %d allocated, name %s, total QATs %d", + qat_dev->qat_dev_id, qat_dev->name, qat_nb_pci_devices); + + return qat_dev; +} + +int +qat_pci_device_release(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_dev; + char name[QAT_DEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + snprintf(name+strlen(name), QAT_DEV_NAME_MAX_LEN-strlen(name), "_qat"); + qat_dev = qat_pci_get_named_dev(name); + if (qat_dev != NULL) { + + /* Check that there are no service devs still on pci device */ + if (qat_dev->sym_dev != NULL) + return -EBUSY; + + qat_dev->attached = QAT_DETACHED; + qat_nb_pci_devices--; + } + QAT_LOG(DEBUG, "QAT device %s released, total QATs %d", + name, qat_nb_pci_devices); + return 0; +} + +static int +qat_pci_dev_destroy(struct qat_pci_device *qat_pci_dev, + struct rte_pci_device *pci_dev) +{ + qat_sym_dev_destroy(qat_pci_dev); + qat_comp_dev_destroy(qat_pci_dev); + qat_asym_dev_destroy(qat_pci_dev); + return qat_pci_device_release(pci_dev); +} + +static int qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + int ret = 0; + struct qat_pci_device *qat_pci_dev; + + QAT_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + qat_pci_dev = qat_pci_device_allocate(pci_dev); + if (qat_pci_dev == NULL) + return -ENODEV; + + ret = qat_sym_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + ret = qat_comp_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + ret = qat_asym_dev_create(qat_pci_dev); + if (ret != 0) + goto error_out; + + return 0; + +error_out: + qat_pci_dev_destroy(qat_pci_dev, pci_dev); + return ret; + +} + +static int qat_pci_remove(struct rte_pci_device *pci_dev) +{ + struct qat_pci_device *qat_pci_dev; + + if (pci_dev == NULL) + return -EINVAL; + + qat_pci_dev = qat_get_qat_dev_from_pci_dev(pci_dev); + if (qat_pci_dev == NULL) + return 0; + + return qat_pci_dev_destroy(qat_pci_dev, pci_dev); +} + +static struct rte_pci_driver rte_qat_pmd = { + .id_table = pci_id_qat_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = qat_pci_probe, + .remove = qat_pci_remove +}; + +__attribute__((weak)) int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +__attribute__((weak)) int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused) +{ + return 0; +} + +RTE_PMD_REGISTER_PCI(QAT_PCI_NAME, rte_qat_pmd); +RTE_PMD_REGISTER_PCI_TABLE(QAT_PCI_NAME, pci_id_qat_map); diff --git a/drivers/common/qat/qat_device.h b/drivers/common/qat/qat_device.h new file mode 100644 index 00000000..9599fc59 --- /dev/null +++ b/drivers/common/qat/qat_device.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_DEVICE_H_ +#define _QAT_DEVICE_H_ + +#include + +#include "qat_common.h" +#include "qat_logs.h" +#include "adf_transport_access_macros.h" +#include "qat_qp.h" + +#define QAT_DETACHED (0) +#define QAT_ATTACHED (1) + +#define QAT_DEV_NAME_MAX_LEN 64 + +/* + * This struct holds all the data about a QAT pci device + * including data about all services it supports. + * It contains + * - hw_data + * - config data + * - runtime data + */ +struct qat_sym_dev_private; +struct qat_comp_dev_private; + +struct qat_pci_device { + + /* Data used by all services */ + char name[QAT_DEV_NAME_MAX_LEN]; + /**< Name of qat pci device */ + uint8_t qat_dev_id; + /**< Device instance for this qat pci device */ + struct rte_pci_device *pci_dev; + /**< PCI information. */ + enum qat_device_gen qat_dev_gen; + /**< QAT device generation */ + rte_spinlock_t arb_csr_lock; + /**< lock to protect accesses to the arbiter CSR */ + __extension__ + uint8_t attached : 1; + /**< Flag indicating the device is attached */ + + struct qat_qp *qps_in_use[QAT_MAX_SERVICES][ADF_MAX_QPS_ON_ANY_SERVICE]; + /**< links to qps set up for each service, index same as on API */ + + /* Data relating to symmetric crypto service */ + struct qat_sym_dev_private *sym_dev; + /**< link back to cryptodev private data */ + struct rte_device sym_rte_dev; + /**< This represents the crypto subset of this pci device. + * Register with this rather than with the one in + * pci_dev so that its driver can have a crypto-specific name + */ + + /* Data relating to compression service */ + struct qat_comp_dev_private *comp_dev; + /**< link back to compressdev private data */ + + /* Data relating to asymmetric crypto service */ + +}; + +struct qat_gen_hw_data { + enum qat_device_gen dev_gen; + const struct qat_qp_hw_data (*qp_hw_data)[ADF_MAX_QPS_ON_ANY_SERVICE]; +}; + +extern struct qat_gen_hw_data qat_gen_config[]; + +struct qat_pci_device * +qat_pci_device_allocate(struct rte_pci_device *pci_dev); + +int +qat_pci_device_release(struct rte_pci_device *pci_dev); + +struct qat_pci_device * +qat_get_qat_dev_from_pci_dev(struct rte_pci_device *pci_dev); + +/* declaration needed for weak functions */ +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_asym_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_create(struct qat_pci_device *qat_pci_dev __rte_unused); + +int +qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev __rte_unused); + +#endif /* _QAT_DEVICE_H_ */ diff --git a/drivers/common/qat/qat_logs.c b/drivers/common/qat/qat_logs.c new file mode 100644 index 00000000..7a861709 --- /dev/null +++ b/drivers/common/qat/qat_logs.c @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ + +#include +#include + +#include "qat_logs.h" + +int qat_gen_logtype; +int qat_dp_logtype; + +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len) +{ + if (level > rte_log_get_global_level()) + return 0; + if (level > (uint32_t)(rte_log_get_level(logtype))) + return 0; + + rte_hexdump(rte_logs.file == NULL ? stderr : rte_logs.file, + title, buf, len); + return 0; +} + +RTE_INIT(qat_pci_init_log) +{ + /* Non-data-path logging for pci device and all services */ + qat_gen_logtype = rte_log_register("pmd.qat_general"); + if (qat_gen_logtype >= 0) + rte_log_set_level(qat_gen_logtype, RTE_LOG_NOTICE); + + /* data-path logging for all services */ + qat_dp_logtype = rte_log_register("pmd.qat_dp"); + if (qat_dp_logtype >= 0) + rte_log_set_level(qat_dp_logtype, RTE_LOG_NOTICE); +} diff --git a/drivers/common/qat/qat_logs.h b/drivers/common/qat/qat_logs.h new file mode 100644 index 00000000..4baea12c --- /dev/null +++ b/drivers/common/qat/qat_logs.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_LOGS_H_ +#define _QAT_LOGS_H_ + +extern int qat_gen_logtype; +extern int qat_dp_logtype; + +#define QAT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_gen_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, qat_dp_logtype, \ + "%s(): " fmt "\n", __func__, ## args) + +#define QAT_DP_HEXDUMP_LOG(level, title, buf, len) \ + qat_hexdump_log(RTE_LOG_ ## level, qat_dp_logtype, title, buf, len) + +/** + * qat_hexdump_log - Dump out memory in a special hex dump format. + * + * Dump out the message buffer in a special hex dump output format with + * characters printed for each line of 16 hex values. The message will be sent + * to the stream defined by rte_logs.file or to stderr in case of rte_logs.file + * is undefined. + */ +int +qat_hexdump_log(uint32_t level, uint32_t logtype, const char *title, + const void *buf, unsigned int len); + +#endif /* _QAT_LOGS_H_ */ diff --git a/drivers/common/qat/qat_qp.c b/drivers/common/qat/qat_qp.c new file mode 100644 index 00000000..7ca7a45e --- /dev/null +++ b/drivers/common/qat/qat_qp.c @@ -0,0 +1,642 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_device.h" +#include "qat_qp.h" +#include "qat_sym.h" +#include "qat_comp.h" +#include "adf_transport_access_macros.h" + + +#define ADF_MAX_DESC 4096 +#define ADF_MIN_DESC 128 + +#define ADF_ARB_REG_SLOT 0x1000 +#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C + +#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ + ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ + (ADF_ARB_REG_SLOT * index), value) + +__extension__ +const struct qat_qp_hw_data qat_gen1_qps[QAT_MAX_SERVICES] + [ADF_MAX_QPS_ON_ANY_SERVICE] = { + /* queue pairs which provide an asymmetric crypto service */ + [QAT_SERVICE_ASYMMETRIC] = { + { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 0, + .rx_ring_num = 8, + .tx_msg_size = 64, + .rx_msg_size = 32, + + }, { + .service_type = QAT_SERVICE_ASYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 1, + .rx_ring_num = 9, + .tx_msg_size = 64, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a symmetric crypto service */ + [QAT_SERVICE_SYMMETRIC] = { + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 2, + .rx_ring_num = 10, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, + { + .service_type = QAT_SERVICE_SYMMETRIC, + .hw_bundle_num = 0, + .tx_ring_num = 3, + .rx_ring_num = 11, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + }, + /* queue pairs which provide a compression service */ + [QAT_SERVICE_COMPRESSION] = { + { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 6, + .rx_ring_num = 14, + .tx_msg_size = 128, + .rx_msg_size = 32, + }, { + .service_type = QAT_SERVICE_COMPRESSION, + .hw_bundle_num = 0, + .tx_ring_num = 7, + .rx_ring_num = 15, + .tx_msg_size = 128, + .rx_msg_size = 32, + } + } +}; + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes); +static void qat_queue_delete(struct qat_queue *queue); +static int qat_queue_create(struct qat_pci_device *qat_dev, + struct qat_queue *queue, struct qat_qp_config *, uint8_t dir); +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *queue_size_for_csr); +static void adf_configure_queues(struct qat_qp *queue); +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock); + + +int qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service) +{ + int i, count; + + for (i = 0, count = 0; i < ADF_MAX_QPS_ON_ANY_SERVICE; i++) + if (qp_hw_data[i].service_type == service) + count++; + return count; +} + +static const struct rte_memzone * +queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + QAT_LOG(DEBUG, "re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + + QAT_LOG(ERR, "Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + QAT_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +int qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, + uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf) + +{ + struct qat_qp *qp; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + char op_cookie_pool_name[RTE_RING_NAMESIZE]; + uint32_t i; + + QAT_LOG(DEBUG, "Setup qp %u on qat pci device %d gen %d", + queue_pair_id, qat_dev->qat_dev_id, qat_dev->qat_dev_gen); + + if ((qat_qp_conf->nb_descriptors > ADF_MAX_DESC) || + (qat_qp_conf->nb_descriptors < ADF_MIN_DESC)) { + QAT_LOG(ERR, "Can't create qp for %u descriptors", + qat_qp_conf->nb_descriptors); + return -EINVAL; + } + + if (pci_dev->mem_resource[0].addr == NULL) { + QAT_LOG(ERR, "Could not find VF config space " + "(UIO driver attached?)."); + return -EINVAL; + } + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc("qat PMD qp metadata", + sizeof(*qp), RTE_CACHE_LINE_SIZE); + if (qp == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for qp struct"); + return -ENOMEM; + } + qp->nb_descriptors = qat_qp_conf->nb_descriptors; + qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", + qat_qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + RTE_CACHE_LINE_SIZE); + if (qp->op_cookies == NULL) { + QAT_LOG(ERR, "Failed to alloc mem for cookie"); + rte_free(qp); + return -ENOMEM; + } + + qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; + qp->inflights16 = 0; + + if (qat_queue_create(qat_dev, &(qp->tx_q), qat_qp_conf, + ADF_RING_DIR_TX) != 0) { + QAT_LOG(ERR, "Tx queue create failed " + "queue_pair_id=%u", queue_pair_id); + goto create_err; + } + + if (qat_queue_create(qat_dev, &(qp->rx_q), qat_qp_conf, + ADF_RING_DIR_RX) != 0) { + QAT_LOG(ERR, "Rx queue create failed " + "queue_pair_id=%hu", queue_pair_id); + qat_queue_delete(&(qp->tx_q)); + goto create_err; + } + + adf_configure_queues(qp); + adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr, + &qat_dev->arb_csr_lock); + + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, + "%s%d_cookies_%s_qp%hu", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qat_qp_conf->service_str, queue_pair_id); + + QAT_LOG(DEBUG, "cookiepool: %s", op_cookie_pool_name); + qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); + if (qp->op_cookie_pool == NULL) + qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, + qp->nb_descriptors, + qat_qp_conf->cookie_size, 64, 0, + NULL, NULL, NULL, NULL, qat_qp_conf->socket_id, + 0); + if (!qp->op_cookie_pool) { + QAT_LOG(ERR, "QAT PMD Cannot create" + " op mempool"); + goto create_err; + } + + for (i = 0; i < qp->nb_descriptors; i++) { + if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { + QAT_LOG(ERR, "QAT PMD Cannot get op_cookie"); + goto create_err; + } + } + + qp->qat_dev_gen = qat_dev->qat_dev_gen; + qp->build_request = qat_qp_conf->build_request; + qp->service_type = qat_qp_conf->hw->service_type; + qp->qat_dev = qat_dev; + + QAT_LOG(DEBUG, "QP setup complete: id: %d, cookiepool: %s", + queue_pair_id, op_cookie_pool_name); + + *qp_addr = qp; + return 0; + +create_err: + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + rte_free(qp->op_cookies); + rte_free(qp); + return -EFAULT; +} + +int qat_qp_release(struct qat_qp **qp_addr) +{ + struct qat_qp *qp = *qp_addr; + uint32_t i; + + if (qp == NULL) { + QAT_LOG(DEBUG, "qp already freed"); + return 0; + } + + QAT_LOG(DEBUG, "Free qp on qat_pci device %d", + qp->qat_dev->qat_dev_id); + + /* Don't free memory if there are still responses to be processed */ + if (qp->inflights16 == 0) { + qat_queue_delete(&(qp->tx_q)); + qat_queue_delete(&(qp->rx_q)); + } else { + return -EAGAIN; + } + + adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr, + &qp->qat_dev->arb_csr_lock); + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); + rte_free(qp); + *qp_addr = NULL; + return 0; +} + + +static void qat_queue_delete(struct qat_queue *queue) +{ + const struct rte_memzone *mz; + int status = 0; + + if (queue == NULL) { + QAT_LOG(DEBUG, "Invalid queue"); + return; + } + QAT_LOG(DEBUG, "Free ring %d, memzone: %s", + queue->hw_queue_number, queue->memz_name); + + mz = rte_memzone_lookup(queue->memz_name); + if (mz != NULL) { + /* Write an unused pattern to the queue memory. */ + memset(queue->base_addr, 0x7F, queue->queue_size); + status = rte_memzone_free(mz); + if (status != 0) + QAT_LOG(ERR, "Error %d on freeing queue %s", + status, queue->memz_name); + } else { + QAT_LOG(DEBUG, "queue %s doesn't exist", + queue->memz_name); + } +} + +static int +qat_queue_create(struct qat_pci_device *qat_dev, struct qat_queue *queue, + struct qat_qp_config *qp_conf, uint8_t dir) +{ + uint64_t queue_base; + void *io_addr; + const struct rte_memzone *qp_mz; + struct rte_pci_device *pci_dev = qat_dev->pci_dev; + int ret = 0; + uint16_t desc_size = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_msg_size : qp_conf->hw->rx_msg_size); + uint32_t queue_size_bytes = (qp_conf->nb_descriptors)*(desc_size); + + queue->hw_bundle_number = qp_conf->hw->hw_bundle_num; + queue->hw_queue_number = (dir == ADF_RING_DIR_TX ? + qp_conf->hw->tx_ring_num : qp_conf->hw->rx_ring_num); + + if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { + QAT_LOG(ERR, "Invalid descriptor size %d", desc_size); + return -EINVAL; + } + + /* + * Allocate a memzone for the queue - create a unique name. + */ + snprintf(queue->memz_name, sizeof(queue->memz_name), + "%s_%d_%s_%s_%d_%d", + pci_dev->driver->driver.name, qat_dev->qat_dev_id, + qp_conf->service_str, "qp_mem", + queue->hw_bundle_number, queue->hw_queue_number); + qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, + qp_conf->socket_id); + if (qp_mz == NULL) { + QAT_LOG(ERR, "Failed to allocate ring memzone"); + return -ENOMEM; + } + + queue->base_addr = (char *)qp_mz->addr; + queue->base_phys_addr = qp_mz->iova; + if (qat_qp_check_queue_alignment(queue->base_phys_addr, + queue_size_bytes)) { + QAT_LOG(ERR, "Invalid alignment on queue create " + " 0x%"PRIx64"\n", + queue->base_phys_addr); + ret = -EFAULT; + goto queue_create_err; + } + + if (adf_verify_queue_size(desc_size, qp_conf->nb_descriptors, + &(queue->queue_size)) != 0) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + + queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, + ADF_BYTES_TO_MSG_SIZE(desc_size)); + queue->modulo_mask = (1 << ADF_RING_SIZE_MODULO(queue->queue_size)) - 1; + + if (queue->max_inflights < 2) { + QAT_LOG(ERR, "Invalid num inflights"); + ret = -EINVAL; + goto queue_create_err; + } + queue->head = 0; + queue->tail = 0; + queue->msg_size = desc_size; + + /* + * Write an unused pattern to the queue memory. + */ + memset(queue->base_addr, 0x7F, queue_size_bytes); + + queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, + queue->queue_size); + + io_addr = pci_dev->mem_resource[0].addr; + + WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_base); + + QAT_LOG(DEBUG, "RING: Name:%s, size in CSR: %u, in bytes %u," + " nb msgs %u, msg_size %u, max_inflights %u modulo mask %u", + queue->memz_name, + queue->queue_size, queue_size_bytes, + qp_conf->nb_descriptors, desc_size, + queue->max_inflights, queue->modulo_mask); + + return 0; + +queue_create_err: + rte_memzone_free(qp_mz); + return ret; +} + +static int qat_qp_check_queue_alignment(uint64_t phys_addr, + uint32_t queue_size_bytes) +{ + if (((queue_size_bytes - 1) & phys_addr) != 0) + return -EINVAL; + return 0; +} + +static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, + uint32_t *p_queue_size_for_csr) +{ + uint8_t i = ADF_MIN_RING_SIZE; + + for (; i <= ADF_MAX_RING_SIZE; i++) + if ((msg_size * msg_num) == + (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { + *p_queue_size_for_csr = i; + return 0; + } + QAT_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); + return -EINVAL; +} + +static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value |= (0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr, + rte_spinlock_t *lock) +{ + uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + + (ADF_ARB_REG_SLOT * + txq->hw_bundle_number); + uint32_t value; + + rte_spinlock_lock(lock); + value = ADF_CSR_RD(base_addr, arb_csr_offset); + value &= ~(0x01 << txq->hw_queue_number); + ADF_CSR_WR(base_addr, arb_csr_offset, value); + rte_spinlock_unlock(lock); +} + +static void adf_configure_queues(struct qat_qp *qp) +{ + uint32_t queue_config; + struct qat_queue *queue = &qp->tx_q; + + queue_config = BUILD_RING_CONFIG(queue->queue_size); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); + + queue = &qp->rx_q; + queue_config = + BUILD_RESP_RING_CONFIG(queue->queue_size, + ADF_RING_NEAR_WATERMARK_512, + ADF_RING_NEAR_WATERMARK_0); + + WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, + queue->hw_queue_number, queue_config); +} + +static inline uint32_t adf_modulo(uint32_t data, uint32_t modulo_mask) +{ + return data & modulo_mask; +} + +static inline void +txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { + WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, q->tail); + q->nb_pending_requests = 0; + q->csr_tail = q->tail; +} + +static inline +void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) +{ + uint32_t old_head, new_head; + uint32_t max_head; + + old_head = q->csr_head; + new_head = q->head; + max_head = qp->nb_descriptors * q->msg_size; + + /* write out free descriptors */ + void *cur_desc = (uint8_t *)q->base_addr + old_head; + + if (new_head < old_head) { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); + memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); + } else { + memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); + } + q->nb_processed_responses = 0; + q->csr_head = new_head; + + /* write current head to CSR */ + WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, + q->hw_queue_number, new_head); +} + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + register struct qat_queue *queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + register uint32_t nb_ops_sent = 0; + register int ret; + uint16_t nb_ops_possible = nb_ops; + register uint8_t *base_addr; + register uint32_t tail; + int overflow; + + if (unlikely(nb_ops == 0)) + return 0; + + /* read params used a lot in main loop into registers */ + queue = &(tmp_qp->tx_q); + base_addr = (uint8_t *)queue->base_addr; + tail = queue->tail; + + /* Find how many can actually fit on the ring */ + tmp_qp->inflights16 += nb_ops; + overflow = tmp_qp->inflights16 - queue->max_inflights; + if (overflow > 0) { + tmp_qp->inflights16 -= overflow; + nb_ops_possible = nb_ops - overflow; + if (nb_ops_possible == 0) + return 0; + } + + while (nb_ops_sent != nb_ops_possible) { + ret = tmp_qp->build_request(*ops, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size], + tmp_qp->qat_dev_gen); + if (ret != 0) { + tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasn't sent + */ + tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; + if (nb_ops_sent == 0) + return 0; + goto kick_tail; + } + + tail = adf_modulo(tail + queue->msg_size, queue->modulo_mask); + ops++; + nb_ops_sent++; + } +kick_tail: + queue->tail = tail; + tmp_qp->stats.enqueued_count += nb_ops_sent; + queue->nb_pending_requests += nb_ops_sent; + if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || + queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { + txq_write_tail(tmp_qp, queue); + } + return nb_ops_sent; +} + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops) +{ + struct qat_queue *rx_queue, *tx_queue; + struct qat_qp *tmp_qp = (struct qat_qp *)qp; + uint32_t head; + uint32_t resp_counter = 0; + uint8_t *resp_msg; + + rx_queue = &(tmp_qp->rx_q); + tx_queue = &(tmp_qp->tx_q); + head = rx_queue->head; + resp_msg = (uint8_t *)rx_queue->base_addr + rx_queue->head; + + while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && + resp_counter != nb_ops) { + + if (tmp_qp->service_type == QAT_SERVICE_SYMMETRIC) + qat_sym_process_response(ops, resp_msg); + else if (tmp_qp->service_type == QAT_SERVICE_COMPRESSION) + qat_comp_process_response(ops, resp_msg); + + head = adf_modulo(head + rx_queue->msg_size, + rx_queue->modulo_mask); + + resp_msg = (uint8_t *)rx_queue->base_addr + head; + ops++; + resp_counter++; + } + if (resp_counter > 0) { + rx_queue->head = head; + tmp_qp->stats.dequeued_count += resp_counter; + rx_queue->nb_processed_responses += resp_counter; + tmp_qp->inflights16 -= resp_counter; + + if (rx_queue->nb_processed_responses > + QAT_CSR_HEAD_WRITE_THRESH) + rxq_free_desc(tmp_qp, rx_queue); + } + /* also check if tail needs to be advanced */ + if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && + tx_queue->tail != tx_queue->csr_tail) { + txq_write_tail(tmp_qp, tx_queue); + } + return resp_counter; +} + +__attribute__((weak)) int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused) +{ + return 0; +} diff --git a/drivers/common/qat/qat_qp.h b/drivers/common/qat/qat_qp.h new file mode 100644 index 00000000..69f8a613 --- /dev/null +++ b/drivers/common/qat/qat_qp.h @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Intel Corporation + */ +#ifndef _QAT_QP_H_ +#define _QAT_QP_H_ + +#include "qat_common.h" +#include "adf_transport_access_macros.h" + +struct qat_pci_device; + +#define QAT_CSR_HEAD_WRITE_THRESH 32U +/* number of requests to accumulate before writing head CSR */ +#define QAT_CSR_TAIL_WRITE_THRESH 32U +/* number of requests to accumulate before writing tail CSR */ +#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U +/* number of inflights below which no tail write coalescing should occur */ + +typedef int (*build_request_t)(void *op, + uint8_t *req, void *op_cookie, + enum qat_device_gen qat_dev_gen); +/**< Build a request from an op. */ + +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_hw_data { + enum qat_service_type service_type; + uint8_t hw_bundle_num; + uint8_t tx_ring_num; + uint8_t rx_ring_num; + uint16_t tx_msg_size; + uint16_t rx_msg_size; +}; +/** + * Structure with data needed for creation of queue pair. + */ +struct qat_qp_config { + const struct qat_qp_hw_data *hw; + uint32_t nb_descriptors; + uint32_t cookie_size; + int socket_id; + build_request_t build_request; + const char *service_str; +}; + +/** + * Structure associated with each queue. + */ +struct qat_queue { + char memz_name[RTE_MEMZONE_NAMESIZE]; + void *base_addr; /* Base address */ + rte_iova_t base_phys_addr; /* Queue physical address */ + uint32_t head; /* Shadow copy of the head */ + uint32_t tail; /* Shadow copy of the tail */ + uint32_t modulo_mask; + uint32_t msg_size; + uint16_t max_inflights; + uint32_t queue_size; + uint8_t hw_bundle_number; + uint8_t hw_queue_number; + /* HW queue aka ring offset on bundle */ + uint32_t csr_head; /* last written head value */ + uint32_t csr_tail; /* last written tail value */ + uint16_t nb_processed_responses; + /* number of responses processed since last CSR head write */ + uint16_t nb_pending_requests; + /* number of requests pending since last CSR tail write */ +}; + +struct qat_qp { + void *mmap_bar_addr; + uint16_t inflights16; + struct qat_queue tx_q; + struct qat_queue rx_q; + struct qat_common_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; + enum qat_device_gen qat_dev_gen; + build_request_t build_request; + enum qat_service_type service_type; + struct qat_pci_device *qat_dev; + /**< qat device this qp is on */ +} __rte_cache_aligned; + +extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE]; + +uint16_t +qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +uint16_t +qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops); + +int +qat_qp_release(struct qat_qp **qp_addr); + +int +qat_qp_setup(struct qat_pci_device *qat_dev, + struct qat_qp **qp_addr, uint16_t queue_pair_id, + struct qat_qp_config *qat_qp_conf); + +int +qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data, + enum qat_service_type service); + +/* Needed for weak function*/ +int +qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused); + +#endif /* _QAT_QP_H_ */ -- cgit 1.2.3-korg