aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/compress/qat
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:52:30 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2018-08-14 18:53:17 +0100
commitb63264c8342e6a1b6971c79550d2af2024b6a4de (patch)
tree83114aac64286fe616506c0b3dfaec2ab86ef835 /drivers/compress/qat
parentca33590b6af032bff57d9cc70455660466a654b2 (diff)
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/compress/qat')
-rw-r--r--drivers/compress/qat/meson.build18
-rw-r--r--drivers/compress/qat/qat_comp.c393
-rw-r--r--drivers/compress/qat/qat_comp.h65
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c429
-rw-r--r--drivers/compress/qat/qat_comp_pmd.h39
-rw-r--r--drivers/compress/qat/rte_pmd_qat_version.map3
6 files changed, 947 insertions, 0 deletions
diff --git a/drivers/compress/qat/meson.build b/drivers/compress/qat/meson.build
new file mode 100644
index 00000000..9d15076d
--- /dev/null
+++ b/drivers/compress/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+
+# Add our sources files to the list
+allow_experimental_apis = true
+qat_sources += files('qat_comp_pmd.c',
+ 'qat_comp.c')
+qat_includes += include_directories('.')
+qat_deps += 'compressdev'
+qat_ext_deps += dep
+
+# build the whole driver
+sources += qat_sources
+cflags += qat_cflags
+deps += qat_deps
+ext_deps += qat_ext_deps
+includes += qat_includes
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
new file mode 100644
index 00000000..38c8a5b8
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.c
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>
+#include <rte_comp.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "qat_logs.h"
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_xform *qat_xform = op->private_xform;
+ const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
+ "operation requests, op (%p) is not a "
+ "stateless operation.", op);
+ return -EINVAL;
+ }
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
+ (rx_op->private_xform);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ return 0;
+ }
+ }
+
+ if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
+ | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
+ resp_msg->comn_resp.comn_status)) !=
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct qat_comp_xform *qat_xform = rx_op->private_xform;
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+
+unsigned int
+qat_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
+}
+
+static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_comp_request_type request)
+{
+ if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ else if (request == QAT_COMP_REQUEST_DECOMPRESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
+ QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
+}
+
+static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
+ const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_comp_xform *xform)
+{
+ struct icp_qat_fw_comp_req *comp_req;
+ int comp_level, algo;
+ uint32_t req_par_flags;
+ int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
+
+ if (unlikely(qat_xform == NULL)) {
+ QAT_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+
+ } else {
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level == 1)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ else if (xform->compress.level == 2)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
+ else if (xform->compress.level == 3)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level >= 4 &&
+ xform->compress.level <= 9)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
+ else {
+ QAT_LOG(ERR, "compression level not supported");
+ return -EINVAL;
+ }
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY);
+ }
+
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
+ break;
+ case RTE_COMP_ALGO_LZS:
+ default:
+ /* RTE_COMP_NULL */
+ QAT_LOG(ERR, "compression algorithm not supported");
+ return -EINVAL;
+ }
+
+ comp_req = &qat_xform->qat_comp_req_tmpl;
+
+ /* Initialize header */
+ qat_comp_create_req_hdr(&comp_req->comn_hdr,
+ qat_xform->qat_comp_request_type);
+
+ comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
+ direction,
+ /* In CPM 1.6 only valid mode ! */
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
+ /* Translate level to depth */
+ comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ comp_req->comp_pars.initial_adler = 1;
+ comp_req->comp_pars.initial_crc32 = 0;
+ comp_req->comp_pars.req_par_flags = req_par_flags;
+
+
+ if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+ } else if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+
+ QAT_LOG(ERR, "Dynamic huffman encoding not supported");
+ return -EINVAL;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data from application
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct qat_comp_dev_private *qat = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
+ return -EINVAL;
+ }
+ if (unlikely(qat->xformpool == NULL)) {
+ QAT_LOG(ERR, "QAT device has no private_xform mempool");
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(qat->xformpool, private_xform)) {
+ QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
+ return -ENOMEM;
+ }
+
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)*private_xform;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ QAT_LOG(ERR,
+ "QAT device doesn't support dynamic compression");
+ return -ENOTSUP;
+ }
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
+ ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
+ && qat->interm_buff_mz == NULL))
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+
+
+ } else {
+ qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
+
+ if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+ QAT_LOG(ERR, "QAT: Problem with setting compression");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)private_xform;
+
+ if (qat_xform) {
+ memset(qat_xform, 0, qat_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
+
+ rte_mempool_put(mp, qat_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
new file mode 100644
index 00000000..8d315efb
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_H_
+#define _QAT_COMP_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_fw_la.h"
+
+#define ERR_CODE_QAT_COMP_WRONG_FW -99
+
+enum qat_comp_request_type {
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
+ QAT_COMP_REQUEST_DECOMPRESS,
+ REQ_COMP_END
+};
+
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+struct qat_comp_xform {
+ struct icp_qat_fw_comp_req qat_comp_req_tmpl;
+ enum qat_comp_request_type qat_comp_request_type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused);
+
+int
+qat_comp_process_response(void **op, uint8_t *resp);
+
+
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
+
+unsigned int
+qat_comp_xform_size(void);
+
+#endif
+#endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
new file mode 100644
index 00000000..b89975fc
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+
+static void
+qat_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_comp_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void
+qat_comp_stats_reset(struct rte_compressdev *dev)
+{
+ struct qat_comp_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
+
+}
+
+static int
+qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release comp qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+ const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_comp_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
+ qat_qp_conf.nb_descriptors = max_inflight_ops;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "comp";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_mempool *
+qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_xforms", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name,
+ num_elements,
+ qat_comp_xform_size(), 0, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(),
+ 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
+ xform_pool_name, num_elements, qat_comp_xform_size());
+ return NULL;
+ }
+
+ return mp;
+}
+
+static void
+_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
+{
+ /* Free private_xform pool */
+ if (comp_dev->xformpool) {
+ /* Free internal mempool for private xforms */
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+ }
+}
+
+static int
+qat_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ if (config->max_nb_streams != 0) {
+ QAT_LOG(ERR,
+ "QAT device does not support STATEFUL so max_nb_streams must be 0");
+ return -EINVAL;
+ }
+
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+error_out:
+ _qat_comp_dev_config_clear(comp_dev);
+ return ret;
+}
+
+static int
+qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
+{
+
+}
+
+static int
+qat_comp_dev_close(struct rte_compressdev *dev)
+{
+ int i;
+ int ret = 0;
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ _qat_comp_dev_config_clear(comp_dev);
+
+ return ret;
+}
+
+
+static void
+qat_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->qat_dev_capabilities;
+ }
+}
+
+static uint16_t
+qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
+ struct rte_comp_op **ops __rte_unused,
+ uint16_t nb_ops __rte_unused)
+{
+ QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
+ return 0;
+}
+
+static struct rte_compressdev_ops compress_qat_dummy_ops = {
+
+ /* Device related operations */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = NULL,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+static uint16_t
+qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (ret) {
+ if ((*ops)->debug_status ==
+ (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
+ tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+
+ tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
+ &compress_qat_dummy_ops;
+ QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
+
+ } else {
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_dequeue_op_burst;
+ }
+ }
+ return ret;
+}
+
+static struct rte_compressdev_ops compress_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
+ return 0;
+ }
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ };
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct qat_comp_dev_private *comp_dev;
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "comp");
+ QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+
+ compressdev = rte_compressdev_pmd_create(name,
+ &qat_pci_dev->pci_dev->device,
+ sizeof(struct qat_comp_dev_private),
+ &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_qat_ops;
+
+ compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->qat_dev = qat_pci_dev;
+ comp_dev->compressdev = compressdev;
+ qat_pci_dev->comp_dev = comp_dev;
+
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ case QAT_GEN2:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ break;
+ default:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN1",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG,
+ "Created QAT COMP device %s as compressdev instance %d",
+ name, compressdev->data->dev_id);
+ return 0;
+}
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct qat_comp_dev_private *comp_dev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = qat_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ /* clean up any resources used by the device */
+ qat_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ qat_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
new file mode 100644
index 00000000..9ad2a283
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_PMD_H_
+#define _QAT_COMP_PMD_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_device.h"
+
+/** private data structure for a QAT compression device.
+ * This QAT device is a device offering only a compression service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_comp_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *qat_dev_capabilities;
+ /* QAT device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for qat_comp_xforms */
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_COMP_PMD_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};