aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/compress
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/compress')
-rw-r--r--drivers/compress/Makefile10
-rw-r--r--drivers/compress/isal/Makefile31
-rw-r--r--drivers/compress/isal/isal_compress_pmd.c694
-rw-r--r--drivers/compress/isal/isal_compress_pmd_ops.c351
-rw-r--r--drivers/compress/isal/isal_compress_pmd_private.h57
-rw-r--r--drivers/compress/isal/meson.build14
-rw-r--r--drivers/compress/isal/rte_pmd_isal_version.map3
-rw-r--r--drivers/compress/meson.build8
-rw-r--r--drivers/compress/octeontx/Makefile30
-rw-r--r--drivers/compress/octeontx/include/zip_regs.h711
-rw-r--r--drivers/compress/octeontx/meson.build9
-rw-r--r--drivers/compress/octeontx/otx_zip.c180
-rw-r--r--drivers/compress/octeontx/otx_zip.h277
-rw-r--r--drivers/compress/octeontx/otx_zip_pmd.c658
-rw-r--r--drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map3
-rw-r--r--drivers/compress/qat/meson.build18
-rw-r--r--drivers/compress/qat/qat_comp.c393
-rw-r--r--drivers/compress/qat/qat_comp.h65
-rw-r--r--drivers/compress/qat/qat_comp_pmd.c429
-rw-r--r--drivers/compress/qat/qat_comp_pmd.h39
-rw-r--r--drivers/compress/qat/rte_pmd_qat_version.map3
-rw-r--r--drivers/compress/zlib/Makefile29
-rw-r--r--drivers/compress/zlib/meson.build14
-rw-r--r--drivers/compress/zlib/rte_pmd_zlib_version.map3
-rw-r--r--drivers/compress/zlib/zlib_pmd.c436
-rw-r--r--drivers/compress/zlib/zlib_pmd_ops.c307
-rw-r--r--drivers/compress/zlib/zlib_pmd_private.h71
27 files changed, 4843 insertions, 0 deletions
diff --git a/drivers/compress/Makefile b/drivers/compress/Makefile
new file mode 100644
index 00000000..286ea6ee
--- /dev/null
+++ b/drivers/compress/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += octeontx
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib
+
+include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/compress/isal/Makefile b/drivers/compress/isal/Makefile
new file mode 100644
index 00000000..95904f64
--- /dev/null
+++ b/drivers/compress/isal/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_isal_comp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# external library dependencies
+LDLIBS += -lisal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_isal_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ISAL) += isal_compress_pmd_ops.c
+
+# export include files
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/isal/isal_compress_pmd.c b/drivers/compress/isal/isal_compress_pmd.c
new file mode 100644
index 00000000..e943336b
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd.c
@@ -0,0 +1,694 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_compressdev_pmd.h>
+
+#include "isal_compress_pmd_private.h"
+
+#define RTE_COMP_ISAL_WINDOW_SIZE 15
+#define RTE_COMP_ISAL_LEVEL_ZERO 0 /* ISA-L Level 0 used for fixed Huffman */
+#define RTE_COMP_ISAL_LEVEL_ONE 1
+#define RTE_COMP_ISAL_LEVEL_TWO 2
+#define RTE_COMP_ISAL_LEVEL_THREE 3 /* Optimised for AVX512 & AVX2 only */
+
+int isal_logtype_driver;
+
+/* Verify and set private xform parameters */
+int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform)
+{
+ if (xform == NULL)
+ return -EINVAL;
+
+ /* Set compression private xform variables */
+ if (xform->type == RTE_COMP_COMPRESS) {
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_COMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->compress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->compress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By-pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->compress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->compress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->compress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform huffman type */
+ switch (xform->compress.deflate.huffman) {
+ case(RTE_COMP_HUFFMAN_DEFAULT):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DEFAULT;
+ break;
+ case(RTE_COMP_HUFFMAN_FIXED):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_FIXED;
+ break;
+ case(RTE_COMP_HUFFMAN_DYNAMIC):
+ priv_xform->compress.deflate.huffman =
+ RTE_COMP_HUFFMAN_DYNAMIC;
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Huffman code not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform level.
+ * Checking compliance with compressdev API, -1 <= level => 9
+ */
+ if (xform->compress.level < RTE_COMP_LEVEL_PMD_DEFAULT ||
+ xform->compress.level > RTE_COMP_LEVEL_MAX) {
+ ISAL_PMD_LOG(ERR, "Compression level out of range\n");
+ return -EINVAL;
+ }
+ /* Check for Compressdev API level 0, No compression
+ * not supported in ISA-L
+ */
+ else if (xform->compress.level == RTE_COMP_LEVEL_NONE) {
+ ISAL_PMD_LOG(ERR, "No Compression not supported\n");
+ return -ENOTSUP;
+ }
+ /* If using fixed huffman code, level must be 0 */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_FIXED) {
+ ISAL_PMD_LOG(DEBUG, "ISA-L level 0 used due to a"
+ " fixed huffman code\n");
+ priv_xform->compress.level = RTE_COMP_ISAL_LEVEL_ZERO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL0_DEFAULT;
+ } else {
+ /* Mapping API levels to ISA-L levels 1,2 & 3 */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ /* Default is 1 if not using fixed huffman */
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_ONE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL1_DEFAULT;
+ break;
+ case RTE_COMP_ISAL_LEVEL_TWO:
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ break;
+ /* Level 3 or higher requested */
+ default:
+ /* Check for AVX512, to use ISA-L level 3 */
+ if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX512F)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ }
+ /* Check for AVX2, to use ISA-L level 3 */
+ else if (rte_cpu_get_flag_enabled(
+ RTE_CPUFLAG_AVX2)) {
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_THREE;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL3_DEFAULT;
+ } else {
+ ISAL_PMD_LOG(DEBUG, "Requested ISA-L level"
+ " 3 or above; Level 3 optimized"
+ " for AVX512 & AVX2 only."
+ " level changed to 2.\n");
+ priv_xform->compress.level =
+ RTE_COMP_ISAL_LEVEL_TWO;
+ priv_xform->level_buffer_size =
+ ISAL_DEF_LVL2_DEFAULT;
+ }
+ }
+ }
+ }
+
+ /* Set decompression private xform variables */
+ else if (xform->type == RTE_COMP_DECOMPRESS) {
+
+ /* Set private xform type - COMPRESS/DECOMPRESS */
+ priv_xform->type = RTE_COMP_DECOMPRESS;
+
+ /* Set private xform algorithm */
+ if (xform->decompress.algo != RTE_COMP_ALGO_DEFLATE) {
+ if (xform->decompress.algo == RTE_COMP_ALGO_NULL) {
+ ISAL_PMD_LOG(ERR, "By pass not supported\n");
+ return -ENOTSUP;
+ }
+ ISAL_PMD_LOG(ERR, "Algorithm not supported\n");
+ return -ENOTSUP;
+ }
+ priv_xform->decompress.algo = RTE_COMP_ALGO_DEFLATE;
+
+ /* Set private xform checksum - raw deflate by default */
+ if (xform->compress.chksum != RTE_COMP_CHECKSUM_NONE) {
+ ISAL_PMD_LOG(ERR, "Checksum not supported\n");
+ return -ENOTSUP;
+ }
+
+ /* Set private xform window size, 32K supported */
+ if (xform->decompress.window_size == RTE_COMP_ISAL_WINDOW_SIZE)
+ priv_xform->decompress.window_size =
+ RTE_COMP_ISAL_WINDOW_SIZE;
+ else {
+ ISAL_PMD_LOG(ERR, "Window size not supported\n");
+ return -ENOTSUP;
+ }
+ }
+ return 0;
+}
+
+/* Compression using chained mbufs for input/output data */
+static int
+chained_mbuf_compression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for source/destination offset passing multiple segments
+ * and point compression stream to input/output buffer.
+ */
+ remaining_offset = op->src.offset;
+ while (remaining_offset >= src->data_len) {
+ remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->stream->avail_in = RTE_MIN(src->data_len - remaining_offset,
+ op->src.length);
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ remaining_offset);
+
+ remaining_offset = op->dst.offset;
+ while (remaining_offset >= dst->data_len) {
+ remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->stream->avail_out = dst->data_len - remaining_offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ remaining_offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination buffer\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ while (qp->stream->internal_state.state != ZSTATE_END) {
+ /* Last segment of data */
+ if (remaining_data <= src->data_len)
+ qp->stream->end_of_stream = 1;
+
+ /* Execute compression operation */
+ ret = isal_deflate(qp->stream);
+
+ remaining_data = op->src.length - qp->stream->total_in;
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->stream->avail_in == 0 &&
+ qp->stream->total_in != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->stream->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->stream->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough input buffer segments\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+ }
+
+ if (qp->stream->avail_out == 0 &&
+ qp->stream->internal_state.state != ZSTATE_END) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->stream->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->stream->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Decompression using chained mbufs for input/output data */
+static int
+chained_mbuf_decompression(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret;
+ uint32_t consumed_data, src_remaining_offset, dst_remaining_offset;
+ uint32_t remaining_data = op->src.length;
+ struct rte_mbuf *src = op->m_src;
+ struct rte_mbuf *dst = op->m_dst;
+
+ /* check for offset passing multiple segments
+ * and point decompression state to input/output buffer
+ */
+ src_remaining_offset = op->src.offset;
+ while (src_remaining_offset >= src->data_len) {
+ src_remaining_offset -= src->data_len;
+ src = src->next;
+ }
+ qp->state->avail_in = RTE_MIN(src->data_len - src_remaining_offset,
+ op->src.length);
+ qp->state->next_in = rte_pktmbuf_mtod_offset(src, uint8_t *,
+ src_remaining_offset);
+
+ dst_remaining_offset = op->dst.offset;
+ while (dst_remaining_offset >= dst->data_len) {
+ dst_remaining_offset -= dst->data_len;
+ dst = dst->next;
+ }
+ qp->state->avail_out = dst->data_len - dst_remaining_offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(dst, uint8_t *,
+ dst_remaining_offset);
+
+ while (qp->state->block_state != ISAL_BLOCK_FINISH) {
+
+ ret = isal_inflate(qp->state);
+
+ /* Check for first segment, offset needs to be accounted for */
+ if (remaining_data == op->src.length) {
+ consumed_data = src->data_len - qp->state->avail_in -
+ src_remaining_offset;
+ } else
+ consumed_data = src->data_len - qp->state->avail_in;
+
+ op->consumed += consumed_data;
+ remaining_data -= consumed_data;
+
+ if (ret != ISAL_DECOMP_OK) {
+ ISAL_PMD_LOG(ERR, "Decompression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+
+ if (qp->state->avail_in == 0
+ && op->consumed != op->src.length) {
+ if (src->next != NULL) {
+ src = src->next;
+ qp->state->next_in =
+ rte_pktmbuf_mtod(src, uint8_t *);
+ qp->state->avail_in =
+ RTE_MIN(remaining_data, src->data_len);
+ }
+ }
+
+ if (qp->state->avail_out == 0 &&
+ qp->state->block_state != ISAL_BLOCK_FINISH) {
+ if (dst->next != NULL) {
+ dst = dst->next;
+ qp->state->next_out =
+ rte_pktmbuf_mtod(dst, uint8_t *);
+ qp->state->avail_out = dst->data_len;
+ } else {
+ ISAL_PMD_LOG(ERR,
+ "Not enough output buffer segments\n");
+ op->status =
+ RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Stateless Compression Function */
+static int
+process_isal_deflate(struct rte_comp_op *op, struct isal_comp_qp *qp,
+ struct isal_priv_xform *priv_xform)
+{
+ int ret = 0;
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Required due to init clearing level_buf */
+ uint8_t *temp_level_buf = qp->stream->level_buf;
+
+ /* Initialize compression stream */
+ isal_deflate_stateless_init(qp->stream);
+
+ qp->stream->level_buf = temp_level_buf;
+
+ /* Stateless operation, input will be consumed in one go */
+ qp->stream->flush = NO_FLUSH;
+
+ /* set compression level & intermediate level buffer size */
+ qp->stream->level = priv_xform->compress.level;
+ qp->stream->level_buf_size = priv_xform->level_buffer_size;
+
+ /* Set op huffman code */
+ if (priv_xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_STATIC);
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DEFAULT)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+ /* Dynamically change the huffman code to suit the input data */
+ else if (priv_xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC)
+ isal_deflate_set_hufftables(qp->stream, NULL,
+ IGZIP_HUFFTABLE_DEFAULT);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf(s) not big enough"
+ " for offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_compression(op, qp);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ qp->stream->end_of_stream = 1; /* All input consumed in one */
+ /* Point compression stream to input buffer */
+ qp->stream->avail_in = op->src.length;
+ qp->stream->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point compression stream to output buffer */
+ qp->stream->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->stream->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->stream->next_in || !qp->stream->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute compression operation */
+ ret = isal_deflate_stateless(qp->stream);
+
+ /* Check that output buffer did not run out of space */
+ if (ret == STATELESS_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->stream->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != COMP_OK) {
+ ISAL_PMD_LOG(ERR, "Compression operation failed\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ }
+ op->consumed = qp->stream->total_in;
+ op->produced = qp->stream->total_out;
+
+ return ret;
+}
+
+/* Stateless Decompression Function */
+static int
+process_isal_inflate(struct rte_comp_op *op, struct isal_comp_qp *qp)
+{
+ int ret = 0;
+
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ /* Initialize decompression state */
+ isal_inflate_init(qp->state);
+
+ if (op->m_src->pkt_len < (op->src.length + op->src.offset)) {
+ ISAL_PMD_LOG(ERR, "Input mbuf(s) not big enough.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ if (op->dst.offset >= op->m_dst->pkt_len) {
+ ISAL_PMD_LOG(ERR, "Output mbuf not big enough for "
+ "offset provided.\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Chained mbufs */
+ if (op->m_src->nb_segs > 1 || op->m_dst->nb_segs > 1) {
+ ret = chained_mbuf_decompression(op, qp);
+ if (ret != 0)
+ return ret;
+ } else {
+ /* Linear buffer */
+ /* Point decompression state to input buffer */
+ qp->state->avail_in = op->src.length;
+ qp->state->next_in = rte_pktmbuf_mtod_offset(op->m_src,
+ uint8_t *, op->src.offset);
+
+ /* Point decompression state to output buffer */
+ qp->state->avail_out = op->m_dst->data_len - op->dst.offset;
+ qp->state->next_out = rte_pktmbuf_mtod_offset(op->m_dst,
+ uint8_t *, op->dst.offset);
+
+ if (unlikely(!qp->state->next_in || !qp->state->next_out)) {
+ ISAL_PMD_LOG(ERR, "Invalid source or destination"
+ " buffers\n");
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ return -1;
+ }
+
+ /* Execute decompression operation */
+ ret = isal_inflate_stateless(qp->state);
+
+ if (ret == ISAL_OUT_OVERFLOW) {
+ ISAL_PMD_LOG(ERR, "Output buffer not big enough\n");
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ return ret;
+ }
+
+ /* Check that input buffer has been fully consumed */
+ if (qp->state->avail_in != (uint32_t)0) {
+ ISAL_PMD_LOG(ERR, "Input buffer could not be read"
+ " entirely\n");
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return -1;
+ }
+
+ if (ret != ISAL_DECOMP_OK) {
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ return ret;
+ }
+ op->consumed = op->src.length - qp->state->avail_in;
+ }
+ op->produced = qp->state->total_out;
+
+ return ret;
+}
+
+/* Process compression/decompression operation */
+static int
+process_op(struct isal_comp_qp *qp, struct rte_comp_op *op,
+ struct isal_priv_xform *priv_xform)
+{
+ switch (priv_xform->type) {
+ case RTE_COMP_COMPRESS:
+ process_isal_deflate(op, qp, priv_xform);
+ break;
+ case RTE_COMP_DECOMPRESS:
+ process_isal_inflate(op, qp);
+ break;
+ default:
+ ISAL_PMD_LOG(ERR, "Operation Not Supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+/* Enqueue burst */
+static uint16_t
+isal_comp_pmd_enqueue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t i;
+ int retval;
+ int16_t num_enq = RTE_MIN(qp->num_free_elements, nb_ops);
+
+ for (i = 0; i < num_enq; i++) {
+ if (unlikely(ops[i]->op_type != RTE_COMP_OP_STATELESS)) {
+ ops[i]->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ISAL_PMD_LOG(ERR, "Stateful operation not Supported\n");
+ qp->qp_stats.enqueue_err_count++;
+ continue;
+ }
+ retval = process_op(qp, ops[i], ops[i]->private_xform);
+ if (unlikely(retval < 0) ||
+ ops[i]->status != RTE_COMP_OP_STATUS_SUCCESS) {
+ qp->qp_stats.enqueue_err_count++;
+ }
+ }
+
+ retval = rte_ring_enqueue_burst(qp->processed_pkts, (void *)ops,
+ num_enq, NULL);
+ qp->num_free_elements -= retval;
+ qp->qp_stats.enqueued_count += retval;
+
+ return retval;
+}
+
+/* Dequeue burst */
+static uint16_t
+isal_comp_pmd_dequeue_burst(void *queue_pair, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ struct isal_comp_qp *qp = queue_pair;
+ uint16_t nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, (void **)ops,
+ nb_ops, NULL);
+ qp->num_free_elements += nb_dequeued;
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/* Create ISA-L compression device */
+static int
+compdev_isal_create(const char *name, struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct isal_comp_private), init_params);
+ if (dev == NULL) {
+ ISAL_PMD_LOG(ERR, "failed to create compressdev vdev");
+ return -EFAULT;
+ }
+
+ dev->dev_ops = isal_compress_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = isal_comp_pmd_dequeue_burst;
+ dev->enqueue_burst = isal_comp_pmd_enqueue_burst;
+
+ return 0;
+}
+
+/** Remove compression device */
+static int
+compdev_isal_remove_dev(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compdev);
+}
+
+/** Initialise ISA-L compression device */
+static int
+compdev_isal_probe(struct rte_vdev_device *dev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+ const char *name, *args;
+ int retval;
+
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ ISAL_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
+
+ return compdev_isal_create(name, dev, &init_params);
+}
+
+static struct rte_vdev_driver compdev_isal_pmd_drv = {
+ .probe = compdev_isal_probe,
+ .remove = compdev_isal_remove_dev,
+};
+
+RTE_PMD_REGISTER_VDEV(COMPDEV_NAME_ISAL_PMD, compdev_isal_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(COMPDEV_NAME_ISAL_PMD,
+ "socket_id=<int>");
+
+RTE_INIT(isal_init_log)
+{
+ isal_logtype_driver = rte_log_register("pmd.compress.isal");
+ if (isal_logtype_driver >= 0)
+ rte_log_set_level(isal_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/isal/isal_compress_pmd_ops.c b/drivers/compress/isal/isal_compress_pmd_ops.c
new file mode 100644
index 00000000..41cade87
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_ops.c
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <isa-l.h>
+
+#include <rte_common.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+
+#include "isal_compress_pmd_private.h"
+
+static const struct rte_compressdev_capabilities isal_pmd_capabilities[] = {
+ {
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ .window_size = {
+ .min = 15,
+ .max = 15,
+ .increment = 0
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/** Configure device */
+static int
+isal_comp_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int ret = 0;
+ unsigned int n;
+ char mp_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ unsigned int elt_size = sizeof(struct isal_priv_xform);
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ n = snprintf(mp_name, sizeof(mp_name), "compdev_%d_xform_mp",
+ dev->data->dev_id);
+ if (n > sizeof(mp_name)) {
+ ISAL_PMD_LOG(ERR,
+ "Unable to create unique name for xform mempool");
+ return -ENOMEM;
+ }
+
+ internals->priv_xform_mp = rte_mempool_lookup(mp_name);
+
+ if (internals->priv_xform_mp != NULL) {
+ if (((internals->priv_xform_mp)->elt_size != elt_size) ||
+ ((internals->priv_xform_mp)->size <
+ config->max_nb_priv_xforms)) {
+
+ ISAL_PMD_LOG(ERR, "%s mempool already exists with different"
+ " initialization parameters", mp_name);
+ internals->priv_xform_mp = NULL;
+ return -ENOMEM;
+ }
+ } else { /* First time configuration */
+ internals->priv_xform_mp = rte_mempool_create(
+ mp_name, /* mempool name */
+ /* number of elements*/
+ config->max_nb_priv_xforms,
+ elt_size, /* element size*/
+ 0, /* Cache size*/
+ 0, /* private data size */
+ NULL, /* obj initialization constructor */
+ NULL, /* obj initialization constructor arg */
+ NULL, /**< obj constructor*/
+ NULL, /* obj constructor arg */
+ config->socket_id, /* socket id */
+ 0); /* flags */
+ }
+
+ if (internals->priv_xform_mp == NULL) {
+ ISAL_PMD_LOG(ERR, "%s mempool allocation failed", mp_name);
+ return -ENOMEM;
+ }
+
+ dev->data->dev_private = internals;
+
+ return ret;
+}
+
+/** Start device */
+static int
+isal_comp_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+isal_comp_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+isal_comp_pmd_close(struct rte_compressdev *dev)
+{
+ /* Free private data */
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->priv_xform_mp);
+ return 0;
+}
+
+/** Get device statistics */
+static void
+isal_comp_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Get device info */
+static void
+isal_comp_pmd_info_get(struct rte_compressdev *dev __rte_unused,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->capabilities = isal_pmd_capabilities;
+ dev_info->feature_flags = RTE_COMPDEV_FF_CPU_AVX512 |
+ RTE_COMPDEV_FF_CPU_AVX2 |
+ RTE_COMPDEV_FF_CPU_AVX |
+ RTE_COMPDEV_FF_CPU_SSE;
+ }
+}
+
+/** Reset device statistics */
+static void
+isal_comp_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ uint16_t qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Release queue pair */
+static int
+isal_comp_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct isal_comp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp == NULL)
+ return -EINVAL;
+
+ if (qp->stream != NULL)
+ rte_free(qp->stream);
+
+ if (qp->stream->level_buf != NULL)
+ rte_free(qp->stream->level_buf);
+
+ if (qp->state != NULL)
+ rte_free(qp->state);
+
+ if (qp->processed_pkts != NULL)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+isal_comp_pmd_qp_create_processed_pkts_ring(struct isal_comp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ISAL_PMD_LOG(DEBUG,
+ "Reusing existing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+
+ ISAL_PMD_LOG(ERR,
+ "Unable to reuse existing ring %s"
+ " for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+isal_comp_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+struct isal_comp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "isal_compression_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+isal_comp_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct isal_comp_qp *qp = NULL;
+ int retval;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ isal_comp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("Isa-l compression PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ /* Initialize memory for compression stream structure */
+ qp->stream = rte_zmalloc_socket("Isa-l compression stream ",
+ sizeof(struct isal_zstream), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for compression level buffer */
+ qp->stream->level_buf = rte_zmalloc_socket("Isa-l compression lev_buf",
+ ISAL_DEF_LVL3_DEFAULT, RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ /* Initialize memory for decompression state structure */
+ qp->state = rte_zmalloc_socket("Isa-l decompression state",
+ sizeof(struct inflate_state), RTE_CACHE_LINE_SIZE,
+ socket_id);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = isal_comp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = isal_comp_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL) {
+ ISAL_PMD_LOG(ERR, "Failed to create unique name for isal "
+ "compression device");
+ goto qp_setup_cleanup;
+ }
+
+ qp->num_free_elements = rte_ring_free_count(qp->processed_pkts);
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp)
+ rte_free(qp);
+
+ return -1;
+}
+
+/** Set private xform data*/
+static int
+isal_comp_pmd_priv_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **priv_xform)
+{
+ int ret;
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ISAL_PMD_LOG(ERR, "Invalid Xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->priv_xform_mp, priv_xform)) {
+ ISAL_PMD_LOG(ERR,
+ "Couldn't get object from private xform mempool");
+ return -ENOMEM;
+ }
+
+ ret = isal_comp_set_priv_xform_parameters(*priv_xform, xform);
+ if (ret != 0) {
+ ISAL_PMD_LOG(ERR, "Failed to configure private xform parameters");
+
+ /* Return private xform to mempool */
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ return ret;
+ }
+ return 0;
+}
+
+/** Clear memory of the private xform so it doesn't leave key material behind */
+static int
+isal_comp_pmd_priv_xform_free(struct rte_compressdev *dev, void *priv_xform)
+{
+ struct isal_comp_private *internals = dev->data->dev_private;
+
+ /* Zero out the whole structure */
+ if (priv_xform) {
+ memset(priv_xform, 0, sizeof(struct isal_priv_xform));
+ rte_mempool_put(internals->priv_xform_mp, priv_xform);
+ }
+ return 0;
+}
+
+struct rte_compressdev_ops isal_pmd_ops = {
+ .dev_configure = isal_comp_pmd_config,
+ .dev_start = isal_comp_pmd_start,
+ .dev_stop = isal_comp_pmd_stop,
+ .dev_close = isal_comp_pmd_close,
+
+ .stats_get = isal_comp_pmd_stats_get,
+ .stats_reset = isal_comp_pmd_stats_reset,
+
+ .dev_infos_get = isal_comp_pmd_info_get,
+
+ .queue_pair_setup = isal_comp_pmd_qp_setup,
+ .queue_pair_release = isal_comp_pmd_qp_release,
+
+ .private_xform_create = isal_comp_pmd_priv_xform_create,
+ .private_xform_free = isal_comp_pmd_priv_xform_free,
+};
+
+struct rte_compressdev_ops *isal_compress_pmd_ops = &isal_pmd_ops;
diff --git a/drivers/compress/isal/isal_compress_pmd_private.h b/drivers/compress/isal/isal_compress_pmd_private.h
new file mode 100644
index 00000000..46e9fcfa
--- /dev/null
+++ b/drivers/compress/isal/isal_compress_pmd_private.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _ISAL_COMP_PMD_PRIVATE_H_
+#define _ISAL_COMP_PMD_PRIVATE_H_
+
+#define COMPDEV_NAME_ISAL_PMD compress_isal
+/**< ISA-L comp PMD device name */
+
+extern int isal_logtype_driver;
+#define ISAL_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, isal_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+/* private data structure for each ISA-L compression device */
+struct isal_comp_private {
+ struct rte_mempool *priv_xform_mp;
+};
+
+/** ISA-L queue pair */
+struct isal_comp_qp {
+ /* Queue Pair Identifier */
+ uint16_t id;
+ /* Unique Queue Pair Name */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /* Ring for placing process packets */
+ struct rte_ring *processed_pkts;
+ /* Queue pair statistics */
+ struct rte_compressdev_stats qp_stats;
+ /* Compression stream information*/
+ struct isal_zstream *stream;
+ /* Decompression state information*/
+ struct inflate_state *state;
+ /* Number of free elements on ring */
+ uint16_t num_free_elements;
+} __rte_cache_aligned;
+
+/** ISA-L private xform structure */
+struct isal_priv_xform {
+ enum rte_comp_xform_type type;
+ union {
+ struct rte_comp_compress_xform compress;
+ struct rte_comp_decompress_xform decompress;
+ };
+ uint32_t level_buffer_size;
+} __rte_cache_aligned;
+
+/** Set and validate NULL comp private xform parameters */
+extern int
+isal_comp_set_priv_xform_parameters(struct isal_priv_xform *priv_xform,
+ const struct rte_comp_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_compressdev_ops *isal_compress_pmd_ops;
+
+#endif /* _ISAL_COMP_PMD_PRIVATE_H_ */
diff --git a/drivers/compress/isal/meson.build b/drivers/compress/isal/meson.build
new file mode 100644
index 00000000..94c10fd6
--- /dev/null
+++ b/drivers/compress/isal/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 Intel Corporation
+
+dep = dependency('libisal', required: false)
+if not dep.found()
+ build =false
+endif
+
+deps += 'bus_vdev'
+sources = files('isal_compress_pmd.c', 'isal_compress_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lisal'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/isal/rte_pmd_isal_version.map b/drivers/compress/isal/rte_pmd_isal_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/compress/isal/rte_pmd_isal_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/compress/meson.build b/drivers/compress/meson.build
new file mode 100644
index 00000000..817ef3be
--- /dev/null
+++ b/drivers/compress/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+drivers = ['isal', 'octeontx', 'qat', 'zlib']
+
+std_deps = ['compressdev'] # compressdev pulls in all other needed deps
+config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
+driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/compress/octeontx/Makefile b/drivers/compress/octeontx/Makefile
new file mode 100644
index 00000000..f34424c8
--- /dev/null
+++ b/drivers/compress/octeontx/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_zip.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -O3
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+CFLAGS += -I$(RTE_SDK)/drivers/compress/octeontx/include
+
+# external library include paths
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_ZIPVF) += otx_zip.c
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_compress_version.map
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/octeontx/include/zip_regs.h b/drivers/compress/octeontx/include/zip_regs.h
new file mode 100644
index 00000000..1e74db43
--- /dev/null
+++ b/drivers/compress/octeontx/include/zip_regs.h
@@ -0,0 +1,711 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_REGS_H_
+#define _RTE_OCTEONTX_ZIP_REGS_H_
+
+
+/**
+ * Enumeration zip_cc
+ *
+ * ZIP compression coding Enumeration
+ * Enumerates ZIP_INST_S[CC].
+ */
+enum {
+ ZIP_CC_DEFAULT = 0,
+ ZIP_CC_DYN_HUFF,
+ ZIP_CC_FIXED_HUFF,
+ ZIP_CC_LZS
+} zip_cc;
+
+/**
+ * Register (NCB) zip_vq#_ena
+ *
+ * ZIP VF Queue Enable Register
+ * If a queue is disabled, ZIP CTL stops fetching instructions from the queue.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_ena_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1;
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_ena_s cn; */
+} zip_vqx_ena_t;
+
+/**
+ * Register (NCB) zip_vq#_sbuf_addr
+ *
+ * ZIP VF Queue Starting Buffer Address Registers
+ * These registers set the buffer parameters for the instruction queues.
+ * When quiescent (i.e.
+ * outstanding doorbell count is 0), it is safe to rewrite this register
+ * to effectively reset the
+ * command buffer state machine.
+ * These registers must be programmed after software programs the
+ * corresponding ZIP_QUE()_SBUF_CTL.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_vqx_sbuf_addr_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ptr : 42;
+ uint64_t off : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t off : 7;
+ uint64_t ptr : 42;
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_vqx_sbuf_addr_s cn; */
+} zip_vqx_sbuf_addr_t;
+
+/**
+ * Register (NCB) zip_que#_doorbell
+ *
+ * ZIP Queue Doorbell Registers
+ * Doorbells for the ZIP instruction queues.
+ */
+typedef union {
+ uint64_t u;
+ struct zip_quex_doorbell_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t dbell_cnt : 20;
+#else /* Word 0 - Little Endian */
+ uint64_t dbell_cnt : 20;
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_quex_doorbell_s cn; */
+} zip_quex_doorbell_t;
+
+/**
+ * Structure zip_nptr_s
+ *
+ * ZIP Instruction Next-Chunk-Buffer Pointer (NPTR) Structure
+ * This structure is used to chain all the ZIP instruction buffers
+ * together. ZIP instruction buffers are managed
+ * (allocated and released) by software.
+ */
+union zip_nptr_s {
+ uint64_t u;
+ struct zip_nptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct zip_nptr_s_s cn83xx; */
+};
+
+/**
+ * generic ptr address
+ */
+union zip_zptr_addr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr address */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+ } s;
+};
+
+/**
+ * generic ptr ctl
+ */
+union zip_zptr_ctl_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u;
+ /** generic ptr ctl */
+ struct {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ uint64_t reserved_112_127 : 16;
+ uint64_t length : 16;
+ uint64_t reserved_67_95 : 29;
+ uint64_t fw : 1;
+ uint64_t nc : 1;
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } s;
+
+};
+
+/**
+ * Structure zip_inst_s
+ *
+ * ZIP Instruction Structure
+ * Each ZIP instruction has 16 words (they are called IWORD0 to IWORD15
+ * within the structure).
+ */
+union zip_inst_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[16];
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ // uint64_t reserved_3_4 : 2;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ //uint64_t reserved_3_4 : 2;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+
+#endif /* Word 0 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 88xx Instruction Structure */zip88xx;
+
+ /** ZIP Instruction Structure */
+ struct zip_inst_s_cn83xx {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Done interrupt */
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_56_62 : 7;
+ /** Total output length */
+ uint64_t totaloutputlength : 24;
+ /** reserved */
+ uint64_t reserved_27_31 : 5;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** HASH IV */
+ uint64_t iv : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** Hash more-in-file */
+ uint64_t hmif : 1;
+ /** Hash Algorithm and enable */
+ uint64_t halg : 3;
+ /** Sync flush*/
+ uint64_t sf : 1;
+ /** Compression speed/storage */
+ uint64_t ss : 2;
+ /** Compression coding */
+ uint64_t cc : 2;
+ /** End of input data */
+ uint64_t ef : 1;
+ /** Beginning of file */
+ uint64_t bf : 1;
+ /** Comp/decomp operation */
+ uint64_t op : 2;
+ /** Data sactter */
+ uint64_t ds : 1;
+ /** Data gather */
+ uint64_t dg : 1;
+ /** History gather */
+ uint64_t hg : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t hg : 1;
+ uint64_t dg : 1;
+ uint64_t ds : 1;
+ uint64_t op : 2;
+ uint64_t bf : 1;
+ uint64_t ef : 1;
+ uint64_t cc : 2;
+ uint64_t ss : 2;
+ uint64_t sf : 1;
+ uint64_t halg : 3;
+ uint64_t hmif : 1;
+ uint64_t exbits : 7;
+ uint64_t iv : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_27_31 : 5;
+ uint64_t totaloutputlength : 24;
+ uint64_t reserved_56_62 : 7;
+ uint64_t doneint : 1;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** History length */
+ uint64_t historylength : 16;
+ /** reserved */
+ uint64_t reserved_96_111 : 16;
+ /** adler/crc32 checksum*/
+ uint64_t adlercrc32 : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t adlercrc32 : 32;
+ uint64_t reserved_96_111 : 16;
+ uint64_t historylength : 16;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Decompression Context Pointer Address */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#else /* Word 2 - Little Endian */
+ union zip_zptr_addr_s ctx_ptr_addr;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** Decompression Context Pointer Control */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#else /* Word 3 - Little Endian */
+ union zip_zptr_ctl_s ctx_ptr_ctl;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Decompression history pointer address */
+ union zip_zptr_addr_s his_ptr_addr;
+#else /* Word 4 - Little Endian */
+ union zip_zptr_addr_s his_ptr_addr;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Decompression history pointer control */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#else /* Word 5 - Little Endian */
+ union zip_zptr_ctl_s his_ptr_ctl;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Input and compression history pointer address */
+ union zip_zptr_addr_s inp_ptr_addr;
+#else /* Word 6 - Little Endian */
+ union zip_zptr_addr_s inp_ptr_addr;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Input and compression history pointer control */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#else /* Word 7 - Little Endian */
+ union zip_zptr_ctl_s inp_ptr_ctl;
+#endif /* Word 7 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 8 - Big Endian */
+ /** Output pointer address */
+ union zip_zptr_addr_s out_ptr_addr;
+#else /* Word 8 - Little Endian */
+ union zip_zptr_addr_s out_ptr_addr;
+#endif /* Word 8 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 9 - Big Endian */
+ /** Output pointer control */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#else /* Word 9 - Little Endian */
+ union zip_zptr_ctl_s out_ptr_ctl;
+#endif /* Word 9 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 10 - Big Endian */
+ /** Result pointer address */
+ union zip_zptr_addr_s res_ptr_addr;
+#else /* Word 10 - Little Endian */
+ union zip_zptr_addr_s res_ptr_addr;
+#endif /* Word 10 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 11 - Big Endian */
+ /** Result pointer control */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#else /* Word 11 - Little Endian */
+ union zip_zptr_ctl_s res_ptr_ctl;
+#endif /* Word 11 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 12 - Big Endian */
+ /** reserved */
+ uint64_t reserved_812_831 : 20;
+ /** SSO guest group */
+ uint64_t ggrp : 10;
+ /** SSO tag type */
+ uint64_t tt : 2;
+ /** SSO tag */
+ uint64_t tag : 32;
+#else /* Word 12 - Little Endian */
+ uint64_t tag : 32;
+ uint64_t tt : 2;
+ uint64_t ggrp : 10;
+ uint64_t reserved_812_831 : 20;
+#endif /* Word 12 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 13 - Big Endian */
+ /** Work queue entry pointer */
+ uint64_t wq_ptr : 64;
+#else /* Word 13 - Little Endian */
+ uint64_t wq_ptr : 64;
+#endif /* Word 13 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 14 - Big Endian */
+ /** reserved */
+ uint64_t reserved_896_959 : 64;
+#else /* Word 14 - Little Endian */
+ uint64_t reserved_896_959 : 64;
+#endif /* Word 14 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 15 - Big Endian */
+ /** Hash structure pointer */
+ uint64_t hash_ptr : 64;
+#else /* Word 15 - Little Endian */
+ uint64_t hash_ptr : 64;
+#endif /* Word 15 - End */
+ } /** ZIP 83xx Instruction Structure */s;
+};
+
+/**
+ * Structure zip_zres_s
+ *
+ * ZIP Result Structure
+ * The ZIP coprocessor writes the result structure after it completes the
+ * invocation. The result structure is exactly 24 bytes, and each invocation
+ * of the ZIP coprocessor produces exactly one result structure.
+ */
+union zip_zres_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[8];
+ /** ZIP Result Structure */
+ struct zip_zres_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** crc32 checksum of uncompressed stream */
+ uint64_t crc32 : 32;
+ /** adler32 checksum of uncompressed stream*/
+ uint64_t adler32 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t adler32 : 32;
+ uint64_t crc32 : 32;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** Total numer of Bytes produced in output stream */
+ uint64_t totalbyteswritten : 32;
+ /** Total number of bytes processed from the input stream */
+ uint64_t totalbytesread : 32;
+#else /* Word 1 - Little Endian */
+ uint64_t totalbytesread : 32;
+ uint64_t totalbyteswritten : 32;
+#endif /* Word 1 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 2 - Big Endian */
+ /** Total number of compressed input bits
+ * consumed to decompress all blocks in the file
+ */
+ uint64_t totalbitsprocessed : 32;
+ /** Done interrupt*/
+ uint64_t doneint : 1;
+ /** reserved */
+ uint64_t reserved_155_158 : 4;
+ /** EXNUM */
+ uint64_t exn : 3;
+ /** reserved */
+ uint64_t reserved_151 : 1;
+ /** EXBITS */
+ uint64_t exbits : 7;
+ /** reserved */
+ uint64_t reserved_137_143 : 7;
+ /** End of file */
+ uint64_t ef : 1;
+ /** Completion/error code */
+ uint64_t compcode : 8;
+#else /* Word 2 - Little Endian */
+ uint64_t compcode : 8;
+ uint64_t ef : 1;
+ uint64_t reserved_137_143 : 7;
+ uint64_t exbits : 7;
+ uint64_t reserved_151 : 1;
+ uint64_t exn : 3;
+ uint64_t reserved_155_158 : 4;
+ uint64_t doneint : 1;
+ uint64_t totalbitsprocessed : 32;
+#endif /* Word 2 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 3 - Big Endian */
+ /** reserved */
+ uint64_t reserved_253_255 : 3;
+ /** Hash length in bytes */
+ uint64_t hshlen : 61;
+#else /* Word 3 - Little Endian */
+ uint64_t hshlen : 61;
+ uint64_t reserved_253_255 : 3;
+#endif /* Word 3 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 4 - Big Endian */
+ /** Double-word 0 of computed hash */
+ uint64_t hash0 : 64;
+#else /* Word 4 - Little Endian */
+ uint64_t hash0 : 64;
+#endif /* Word 4 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 5 - Big Endian */
+ /** Double-word 1 of computed hash */
+ uint64_t hash1 : 64;
+#else /* Word 5 - Little Endian */
+ uint64_t hash1 : 64;
+#endif /* Word 5 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 6 - Big Endian */
+ /** Double-word 2 of computed hash */
+ uint64_t hash2 : 64;
+#else /* Word 6 - Little Endian */
+ uint64_t hash2 : 64;
+#endif /* Word 6 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 7 - Big Endian */
+ /** Double-word 3 of computed hash */
+ uint64_t hash3 : 64;
+#else /* Word 7 - Little Endian */
+ uint64_t hash3 : 64;
+#endif /* Word 7 - End */
+ } /** ZIP Result Structure */s;
+
+ /* struct zip_zres_s_s cn83xx; */
+};
+
+/**
+ * Structure zip_zptr_s
+ *
+ * ZIP Generic Pointer Structure
+ * This structure is the generic format of pointers in ZIP_INST_S.
+ */
+union zip_zptr_s {
+ /** This field can be used to set/clear all bits, or do bitwise
+ * operations over the entire structure.
+ */
+ uint64_t u[2];
+ /** ZIP Generic Pointer Structure */
+ struct zip_zptr_s_s {
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 0 - Big Endian */
+ /** Pointer to Data or scatter-gather list */
+ uint64_t addr : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64;
+#endif /* Word 0 - End */
+#if defined(__BIG_ENDIAN_BITFIELD) /* Word 1 - Big Endian */
+ /** reserved */
+ uint64_t reserved_112_127 : 16;
+ /** Length of Data or scatter-gather list*/
+ uint64_t length : 16;
+ /** reserved */
+ uint64_t reserved_67_95 : 29;
+ /** Full-block write */
+ uint64_t fw : 1;
+ /** No cache allocation */
+ uint64_t nc : 1;
+ /** reserved */
+ uint64_t data_be : 1;
+#else /* Word 1 - Little Endian */
+ uint64_t data_be : 1;
+ uint64_t nc : 1;
+ uint64_t fw : 1;
+ uint64_t reserved_67_95 : 29;
+ uint64_t length : 16;
+ uint64_t reserved_112_127 : 16;
+#endif /* Word 1 - End */
+ } /** ZIP Generic Pointer Structure */s;
+};
+
+/**
+ * Enumeration zip_comp_e
+ *
+ * ZIP Completion Enumeration
+ * Enumerates the values of ZIP_ZRES_S[COMPCODE].
+ */
+#define ZIP_COMP_E_NOTDONE (0)
+#define ZIP_COMP_E_SUCCESS (1)
+#define ZIP_COMP_E_DTRUNC (2)
+#define ZIP_COMP_E_DSTOP (3)
+#define ZIP_COMP_E_ITRUNC (4)
+#define ZIP_COMP_E_RBLOCK (5)
+#define ZIP_COMP_E_NLEN (6)
+#define ZIP_COMP_E_BADCODE (7)
+#define ZIP_COMP_E_BADCODE2 (8)
+#define ZIP_COMP_E_ZERO_LEN (9)
+#define ZIP_COMP_E_PARITY (0xa)
+#define ZIP_COMP_E_FATAL (0xb)
+#define ZIP_COMP_E_TIMEOUT (0xc)
+#define ZIP_COMP_E_INSTR_ERR (0xd)
+#define ZIP_COMP_E_HCTX_ERR (0xe)
+#define ZIP_COMP_E_STOP (3)
+
+/**
+ * Enumeration zip_op_e
+ *
+ * ZIP Operation Enumeration
+ * Enumerates ZIP_INST_S[OP].
+ * Internal:
+ */
+#define ZIP_OP_E_DECOMP (0)
+#define ZIP_OP_E_NOCOMP (1)
+#define ZIP_OP_E_COMP (2)
+
+/**
+ * Enumeration zip compression levels
+ *
+ * ZIP Compression Level Enumeration
+ * Enumerates ZIP_INST_S[SS].
+ * Internal:
+ */
+#define ZIP_COMP_E_LEVEL_MAX (0)
+#define ZIP_COMP_E_LEVEL_MED (1)
+#define ZIP_COMP_E_LEVEL_LOW (2)
+#define ZIP_COMP_E_LEVEL_MIN (3)
+
+#endif /* _RTE_ZIP_REGS_H_ */
diff --git a/drivers/compress/octeontx/meson.build b/drivers/compress/octeontx/meson.build
new file mode 100644
index 00000000..7cd202d0
--- /dev/null
+++ b/drivers/compress/octeontx/meson.build
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+
+name = 'octeontx_compress'
+sources = files('otx_zip.c', 'otx_zip_pmd.c')
+allow_experimental_apis = true
+includes += include_directories('include')
+deps += ['mempool_octeontx', 'bus_pci']
+ext_deps += dep
diff --git a/drivers/compress/octeontx/otx_zip.c b/drivers/compress/octeontx/otx_zip.c
new file mode 100644
index 00000000..a9046ff3
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include "otx_zip.h"
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset)
+{
+ uint8_t *base = hw_addr;
+ return *(volatile uint64_t *)(base + offset);
+}
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val)
+{
+ uint8_t *base = hw_addr;
+ *(uint64_t *)(base + offset) = val;
+}
+
+static void
+zip_q_enable(struct zipvf_qp *qp)
+{
+ zip_vqx_ena_t que_ena;
+
+ /*ZIP VFx command queue init*/
+ que_ena.u = 0ull;
+ que_ena.s.ena = 1;
+
+ zip_reg_write64(qp->vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+ rte_wmb();
+}
+
+/* initialize given qp on zip device */
+int
+zipvf_q_init(struct zipvf_qp *qp)
+{
+ zip_vqx_sbuf_addr_t que_sbuf_addr;
+
+ uint64_t size;
+ void *cmdq_addr;
+ uint64_t iova;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ struct zip_vf *vf = qp->vf;
+
+ /* allocate and setup instruction queue */
+ size = ZIP_MAX_CMDQ_SIZE;
+ size = ZIP_ALIGN_ROUNDUP(size, ZIP_CMDQ_ALIGN);
+
+ cmdq_addr = rte_zmalloc(qp->name, size, ZIP_CMDQ_ALIGN);
+ if (cmdq_addr == NULL)
+ return -1;
+
+ cmdq->sw_head = (uint64_t *)cmdq_addr;
+ cmdq->va = (uint8_t *)cmdq_addr;
+ iova = rte_mem_virt2iova(cmdq_addr);
+
+ cmdq->iova = iova;
+
+ que_sbuf_addr.u = 0ull;
+ que_sbuf_addr.s.ptr = (cmdq->iova >> 7);
+ zip_reg_write64(vf->vbar0, ZIP_VQ_SBUF_ADDR, que_sbuf_addr.u);
+
+ zip_q_enable(qp);
+
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_spinlock_init(&cmdq->qlock);
+
+ return 0;
+}
+
+int
+zipvf_q_term(struct zipvf_qp *qp)
+{
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ zip_vqx_ena_t que_ena;
+ struct zip_vf *vf = qp->vf;
+
+ if (cmdq->va != NULL) {
+ memset(cmdq->va, 0, ZIP_MAX_CMDQ_SIZE);
+ rte_free(cmdq->va);
+ }
+
+ /*Disabling the ZIP queue*/
+ que_ena.u = 0ull;
+ zip_reg_write64(vf->vbar0, ZIP_VQ_ENA, que_ena.u);
+
+ return 0;
+}
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *cmd)
+{
+ zip_quex_doorbell_t dbell;
+ union zip_nptr_s ncp;
+ uint64_t *ncb_ptr;
+ struct zipvf_cmdq *cmdq = &qp->cmdq;
+ void *reg_base = qp->vf->vbar0;
+
+ /*Held queue lock*/
+ rte_spinlock_lock(&(cmdq->qlock));
+
+ /* Check space availability in zip cmd queue */
+ if ((((cmdq->sw_head - (uint64_t *)cmdq->va) * sizeof(uint64_t *)) +
+ ZIP_CMD_SIZE) == (ZIP_MAX_CMDQ_SIZE - ZIP_MAX_NCBP_SIZE)) {
+ /*Last buffer of the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ /* move pointer to next loc in unit of 64-bit word */
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+
+ /* now, point the "Next-Chunk Buffer Ptr" to sw_head */
+ ncb_ptr = cmdq->sw_head;
+ /* Pointing head again to cmdqueue base*/
+ cmdq->sw_head = (uint64_t *)cmdq->va;
+
+ ncp.u = 0ull;
+ ncp.s.addr = cmdq->iova;
+ *ncb_ptr = ncp.u;
+ } else {
+ /*Enough buffers available in the command queue*/
+ memcpy((uint8_t *)cmdq->sw_head,
+ (uint8_t *)cmd,
+ sizeof(union zip_inst_s));
+ cmdq->sw_head += ZIP_CMD_SIZE_WORDS;
+ }
+
+ rte_wmb();
+
+ /* Ringing ZIP VF doorbell */
+ dbell.u = 0ull;
+ dbell.s.dbell_cnt = 1;
+ zip_reg_write64(reg_base, ZIP_VQ_DOORBELL, dbell.u);
+
+ rte_spinlock_unlock(&(cmdq->qlock));
+}
+
+int
+zipvf_create(struct rte_compressdev *compressdev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(compressdev->device);
+ struct zip_vf *zipvf = NULL;
+ char *dev_name = compressdev->data->name;
+ void *vbar0;
+ uint64_t reg;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ vbar0 = pdev->mem_resource[0].addr;
+ if (!vbar0) {
+ ZIP_PMD_ERR("Failed to map BAR0 of %s", dev_name);
+ return -ENODEV;
+ }
+
+ zipvf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ if (!zipvf)
+ return -ENOMEM;
+
+ zipvf->vbar0 = vbar0;
+ reg = zip_reg_read64(zipvf->vbar0, ZIP_VF_PF_MBOXX(0));
+ /* Storing domain in local to ZIP VF */
+ zipvf->dom_sdom = reg;
+ zipvf->pdev = pdev;
+ zipvf->max_nb_queue_pairs = ZIP_MAX_VF_QUEUE;
+ return 0;
+}
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev)
+{
+ struct zip_vf *vf = (struct zip_vf *)(compressdev->data->dev_private);
+
+ /* Rewriting the domain_id in ZIP_VF_MBOX for app rerun */
+ zip_reg_write64(vf->vbar0, ZIP_VF_PF_MBOXX(0), vf->dom_sdom);
+
+ return 0;
+}
diff --git a/drivers/compress/octeontx/otx_zip.h b/drivers/compress/octeontx/otx_zip.h
new file mode 100644
index 00000000..99a38d00
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip.h
@@ -0,0 +1,277 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _RTE_OCTEONTX_ZIP_VF_H_
+#define _RTE_OCTEONTX_ZIP_VF_H_
+
+#include <unistd.h>
+
+#include <rte_bus_pci.h>
+#include <rte_comp.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+
+#include <zip_regs.h>
+
+int octtx_zip_logtype_driver;
+
+/* ZIP VF Control/Status registers (CSRs): */
+/* VF_BAR0: */
+#define ZIP_VQ_ENA (0x10)
+#define ZIP_VQ_SBUF_ADDR (0x20)
+#define ZIP_VF_PF_MBOXX(x) (0x400 | (x)<<3)
+#define ZIP_VQ_DOORBELL (0x1000)
+
+/**< Vendor ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+/**< PCI device id of ZIP VF */
+#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
+
+/* maxmum number of zip vf devices */
+#define ZIP_MAX_VFS 8
+
+/* max size of one chunk */
+#define ZIP_MAX_CHUNK_SIZE 8192
+
+/* each instruction is fixed 128 bytes */
+#define ZIP_CMD_SIZE 128
+
+#define ZIP_CMD_SIZE_WORDS (ZIP_CMD_SIZE >> 3) /* 16 64_bit words */
+
+/* size of next chunk buffer pointer */
+#define ZIP_MAX_NCBP_SIZE 8
+
+/* size of instruction queue in units of instruction size */
+#define ZIP_MAX_NUM_CMDS ((ZIP_MAX_CHUNK_SIZE - ZIP_MAX_NCBP_SIZE) / \
+ ZIP_CMD_SIZE) /* 63 */
+
+/* size of instruct queue in bytes */
+#define ZIP_MAX_CMDQ_SIZE ((ZIP_MAX_NUM_CMDS * ZIP_CMD_SIZE) + \
+ ZIP_MAX_NCBP_SIZE)/* ~8072ull */
+
+#define ZIP_BUF_SIZE 256
+
+#define ZIP_SGPTR_ALIGN 16
+#define ZIP_CMDQ_ALIGN 128
+#define MAX_SG_LEN ((ZIP_BUF_SIZE - ZIP_SGPTR_ALIGN) / sizeof(void *))
+
+/**< ZIP PMD specified queue pairs */
+#define ZIP_MAX_VF_QUEUE 1
+
+#define ZIP_ALIGN_ROUNDUP(x, _align) \
+ ((_align) * (((x) + (_align) - 1) / (_align)))
+
+/**< ZIP PMD device name */
+#define COMPRESSDEV_NAME_ZIP_PMD compress_octeonx
+
+#define ZIP_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, \
+ octtx_zip_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZIP_PMD_INFO(fmt, args...) \
+ ZIP_PMD_LOG(INFO, fmt, ## args)
+#define ZIP_PMD_ERR(fmt, args...) \
+ ZIP_PMD_LOG(ERR, fmt, ## args)
+
+/* resources required to process stream */
+enum {
+ RES_BUF = 0,
+ CMD_BUF,
+ HASH_CTX_BUF,
+ DECOMP_CTX_BUF,
+ IN_DATA_BUF,
+ OUT_DATA_BUF,
+ HISTORY_DATA_BUF,
+ MAX_BUFS_PER_STREAM
+} NUM_BUFS_PER_STREAM;
+
+struct zip_stream;
+struct zipvf_qp;
+
+/* Algorithm handler function prototype */
+typedef int (*comp_func_t)(struct rte_comp_op *op,
+ struct zipvf_qp *qp, struct zip_stream *zstrm);
+
+/**
+ * ZIP private stream structure
+ */
+struct zip_stream {
+ union zip_inst_s *inst;
+ /* zip instruction pointer */
+ comp_func_t func;
+ /* function to process comp operation */
+ void *bufs[MAX_BUFS_PER_STREAM];
+} _rte_cache_aligned;
+
+
+/**
+ * ZIP instruction Queue
+ */
+struct zipvf_cmdq {
+ rte_spinlock_t qlock;
+ /* queue lock */
+ uint64_t *sw_head;
+ /* pointer to start of 8-byte word length queue-head */
+ uint8_t *va;
+ /* pointer to instruction queue virtual address */
+ rte_iova_t iova;
+ /* iova addr of cmdq head*/
+};
+
+/**
+ * ZIP device queue structure
+ */
+struct zipvf_qp {
+ struct zipvf_cmdq cmdq;
+ /* Hardware instruction queue structure */
+ struct rte_ring *processed_pkts;
+ /* Ring for placing processed packets */
+ struct rte_compressdev_stats qp_stats;
+ /* Queue pair statistics */
+ uint16_t id;
+ /* Queue Pair Identifier */
+ const char *name;
+ /* Unique Queue Pair Name */
+ struct zip_vf *vf;
+ /* pointer to device, queue belongs to */
+} __rte_cache_aligned;
+
+/**
+ * ZIP VF device structure.
+ */
+struct zip_vf {
+ int vfid;
+ /* vf index */
+ struct rte_pci_device *pdev;
+ /* pci device */
+ void *vbar0;
+ /* CSR base address for underlying BAR0 VF.*/
+ uint64_t dom_sdom;
+ /* Storing mbox domain and subdomain id for app rerun*/
+ uint32_t max_nb_queue_pairs;
+ /* pointer to device qps */
+ struct rte_mempool *zip_mp;
+ /* pointer to pools */
+} __rte_cache_aligned;
+
+
+static inline void
+zipvf_prepare_in_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset, inlen;
+ struct rte_mbuf *m_src;
+ union zip_inst_s *inst = zstrm->inst;
+
+ inlen = op->src.length;
+ offset = op->src.offset;
+ m_src = op->m_src;
+
+ /* Prepare direct input data pointer */
+ inst->s.dg = 0;
+ inst->s.inp_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_src, offset);
+ inst->s.inp_ptr_ctl.s.length = inlen;
+}
+
+static inline void
+zipvf_prepare_out_buf(struct zip_stream *zstrm, struct rte_comp_op *op)
+{
+ uint32_t offset;
+ struct rte_mbuf *m_dst;
+ union zip_inst_s *inst = zstrm->inst;
+
+ offset = op->dst.offset;
+ m_dst = op->m_dst;
+
+ /* Prepare direct input data pointer */
+ inst->s.ds = 0;
+ inst->s.out_ptr_addr.s.addr =
+ rte_pktmbuf_iova_offset(m_dst, offset);
+ inst->s.totaloutputlength = rte_pktmbuf_pkt_len(m_dst) -
+ op->dst.offset;
+ inst->s.out_ptr_ctl.s.length = inst->s.totaloutputlength;
+}
+
+static inline void
+zipvf_prepare_cmd_stateless(struct rte_comp_op *op, struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+
+ /* set flush flag to always 1*/
+ inst->s.ef = 1;
+
+ if (inst->s.op == ZIP_OP_E_DECOMP)
+ inst->s.sf = 1;
+ else
+ inst->s.sf = 0;
+
+ /* Set input checksum */
+ inst->s.adlercrc32 = op->input_chksum;
+
+ /* Prepare gather buffers */
+ zipvf_prepare_in_buf(zstrm, op);
+ zipvf_prepare_out_buf(zstrm, op);
+}
+
+#ifdef ZIP_DBG
+static inline void
+zip_dump_instruction(void *inst)
+{
+ union zip_inst_s *cmd83 = (union zip_inst_s *)inst;
+ printf("####### START ########\n");
+ printf("doneint:%d totaloutputlength:%d\n", cmd83->s.doneint,
+ cmd83->s.totaloutputlength);
+ printf("exnum:%d iv:%d exbits:%d hmif:%d halg:%d\n", cmd83->s.exn,
+ cmd83->s.iv, cmd83->s.exbits, cmd83->s.hmif, cmd83->s.halg);
+ printf("flush:%d speed:%d cc:%d\n", cmd83->s.sf,
+ cmd83->s.ss, cmd83->s.cc);
+ printf("eof:%d bof:%d op:%d dscatter:%d dgather:%d hgather:%d\n",
+ cmd83->s.ef, cmd83->s.bf, cmd83->s.op, cmd83->s.ds,
+ cmd83->s.dg, cmd83->s.hg);
+ printf("historylength:%d adler32:%d\n", cmd83->s.historylength,
+ cmd83->s.adlercrc32);
+ printf("ctx_ptr.addr:0x%"PRIx64"\n", cmd83->s.ctx_ptr_addr.s.addr);
+ printf("ctx_ptr.len:%d\n", cmd83->s.ctx_ptr_ctl.s.length);
+ printf("history_ptr.addr:0x%"PRIx64"\n", cmd83->s.his_ptr_addr.s.addr);
+ printf("history_ptr.len:%d\n", cmd83->s.his_ptr_ctl.s.length);
+ printf("inp_ptr.addr:0x%"PRIx64"\n", cmd83->s.inp_ptr_addr.s.addr);
+ printf("inp_ptr.len:%d\n", cmd83->s.inp_ptr_ctl.s.length);
+ printf("out_ptr.addr:0x%"PRIx64"\n", cmd83->s.out_ptr_addr.s.addr);
+ printf("out_ptr.len:%d\n", cmd83->s.out_ptr_ctl.s.length);
+ printf("result_ptr.len:%d\n", cmd83->s.res_ptr_ctl.s.length);
+ printf("####### END ########\n");
+}
+#endif
+
+int
+zipvf_create(struct rte_compressdev *compressdev);
+
+int
+zipvf_destroy(struct rte_compressdev *compressdev);
+
+int
+zipvf_q_init(struct zipvf_qp *qp);
+
+int
+zipvf_q_term(struct zipvf_qp *qp);
+
+void
+zipvf_push_command(struct zipvf_qp *qp, union zip_inst_s *zcmd);
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm);
+
+uint64_t
+zip_reg_read64(uint8_t *hw_addr, uint64_t offset);
+
+void
+zip_reg_write64(uint8_t *hw_addr, uint64_t offset, uint64_t val);
+
+#endif /* _RTE_ZIP_VF_H_ */
diff --git a/drivers/compress/octeontx/otx_zip_pmd.c b/drivers/compress/octeontx/otx_zip_pmd.c
new file mode 100644
index 00000000..9d13f933
--- /dev/null
+++ b/drivers/compress/octeontx/otx_zip_pmd.c
@@ -0,0 +1,658 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <string.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cpuflags.h>
+#include <rte_malloc.h>
+
+#include "otx_zip.h"
+
+static const struct rte_compressdev_capabilities
+ octtx_zip_pmd_capabilities[] = {
+ { .algo = RTE_COMP_ALGO_DEFLATE,
+ /* Deflate */
+ .comp_feature_flags = RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC,
+ /* Non sharable Priv XFORM and Stateless */
+ .window_size = {
+ .min = 1,
+ .max = 14,
+ .increment = 1
+ /* size supported 2^1 to 2^14 */
+ },
+ },
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+};
+
+/*
+ * Reset session to default state for next set of stateless operation
+ */
+static inline void
+reset_stream(struct zip_stream *z_stream)
+{
+ union zip_inst_s *inst = (union zip_inst_s *)(z_stream->inst);
+
+ inst->s.bf = 1;
+ inst->s.ef = 0;
+}
+
+int
+zip_process_op(struct rte_comp_op *op,
+ struct zipvf_qp *qp,
+ struct zip_stream *zstrm)
+{
+ union zip_inst_s *inst = zstrm->inst;
+ volatile union zip_zres_s *zresult = NULL;
+
+
+ if ((op->m_src->nb_segs > 1) || (op->m_dst->nb_segs > 1) ||
+ (op->src.offset > rte_pktmbuf_pkt_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_pkt_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZIP_PMD_ERR("Segmented packet is not supported\n");
+ return 0;
+ }
+
+ zipvf_prepare_cmd_stateless(op, zstrm);
+
+ zresult = (union zip_zres_s *)zstrm->bufs[RES_BUF];
+ zresult->s.compcode = 0;
+
+#ifdef ZIP_DBG
+ zip_dump_instruction(inst);
+#endif
+
+ /* Submit zip command */
+ zipvf_push_command(qp, (void *)inst);
+
+ /* Check and Process results in sync mode */
+ do {
+ } while (!zresult->s.compcode);
+
+ if (zresult->s.compcode == ZIP_COMP_E_SUCCESS) {
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ } else {
+ /* FATAL error cannot do anything */
+ ZIP_PMD_ERR("operation failed with error code:%d\n",
+ zresult->s.compcode);
+ if (zresult->s.compcode == ZIP_COMP_E_DSTOP)
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ else
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ }
+
+ ZIP_PMD_INFO("written %d\n", zresult->s.totalbyteswritten);
+
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed = zresult->s.totalbytesread;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced = zresult->s.totalbyteswritten;
+ break;
+ default:
+ ZIP_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ break;
+ }
+ /* zstream is reset irrespective of result */
+ reset_stream(zstrm);
+
+ zresult->s.compcode = ZIP_COMP_E_NOTDONE;
+ return 0;
+}
+
+/** Parse xform parameters and setup a stream */
+static int
+zip_set_stream_parameters(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ struct zip_stream *z_stream)
+{
+ int ret;
+ union zip_inst_s *inst;
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ void *res;
+
+ /* Allocate resources required by a stream */
+ ret = rte_mempool_get_bulk(vf->zip_mp,
+ z_stream->bufs, MAX_BUFS_PER_STREAM);
+ if (ret < 0)
+ return -1;
+
+ /* get one command buffer from pool and set up */
+ inst = (union zip_inst_s *)z_stream->bufs[CMD_BUF];
+ res = z_stream->bufs[RES_BUF];
+
+ memset(inst->u, 0, sizeof(inst->u));
+
+ /* set bf for only first ops of stream */
+ inst->s.bf = 1;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ inst->s.op = ZIP_OP_E_COMP;
+
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ inst->s.cc = ZIP_CC_DEFAULT;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ inst->s.cc = ZIP_CC_FIXED_HUFF;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ inst->s.cc = ZIP_CC_DYN_HUFF;
+ break;
+ default:
+ ret = -1;
+ goto err;
+ }
+
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_MIN:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MIN;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ inst->s.ss = ZIP_COMP_E_LEVEL_MAX;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ ZIP_PMD_ERR("Compression level not supported");
+ ret = -1;
+ goto err;
+ default:
+ /* for any value between min and max , choose
+ * PMD default.
+ */
+ inst->s.ss = ZIP_COMP_E_LEVEL_MED; /** PMD default **/
+ break;
+ }
+ } else if (xform->type == RTE_COMP_DECOMPRESS) {
+ inst->s.op = ZIP_OP_E_DECOMP;
+ /* from HRM,
+ * For DEFLATE decompression, [CC] must be 0x0.
+ * For decompression, [SS] must be 0x0
+ */
+ inst->s.cc = 0;
+ /* Speed bit should not be set for decompression */
+ inst->s.ss = 0;
+ /* decompression context is supported only for STATEFUL
+ * operations. Currently we support STATELESS ONLY so
+ * skip setting of ctx pointer
+ */
+
+ } else {
+ ZIP_PMD_ERR("\nxform type not supported");
+ ret = -1;
+ goto err;
+ }
+
+ inst->s.res_ptr_addr.s.addr = rte_mempool_virt2iova(res);
+ inst->s.res_ptr_ctl.s.length = 0;
+
+ z_stream->inst = inst;
+ z_stream->func = zip_process_op;
+
+ return 0;
+
+err:
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ return ret;
+}
+
+/** Configure device */
+static int
+zip_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ int nb_streams;
+ char res_pool[RTE_MEMZONE_NAMESIZE];
+ struct zip_vf *vf;
+ struct rte_mempool *zip_buf_mp;
+
+ if (!config || !dev)
+ return -EIO;
+
+ vf = (struct zip_vf *)(dev->data->dev_private);
+
+ /* create pool with maximum numbers of resources
+ * required by streams
+ */
+
+ /* use common pool for non-shareable priv_xform and stream */
+ nb_streams = config->max_nb_priv_xforms + config->max_nb_streams;
+
+ snprintf(res_pool, RTE_MEMZONE_NAMESIZE, "octtx_zip_res_pool%u",
+ dev->data->dev_id);
+
+ /** TBD Should we use the per core object cache for stream resources */
+ zip_buf_mp = rte_mempool_create(
+ res_pool,
+ nb_streams * MAX_BUFS_PER_STREAM,
+ ZIP_BUF_SIZE,
+ 0,
+ 0,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ SOCKET_ID_ANY,
+ 0);
+
+ if (zip_buf_mp == NULL) {
+ ZIP_PMD_ERR(
+ "Failed to create buf mempool octtx_zip_res_pool%u",
+ dev->data->dev_id);
+ return -1;
+ }
+
+ vf->zip_mp = zip_buf_mp;
+
+ return 0;
+}
+
+/** Start device */
+static int
+zip_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zip_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+
+}
+
+/** Close device */
+static int
+zip_pmd_close(struct rte_compressdev *dev)
+{
+ if (dev == NULL)
+ return -1;
+
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+ rte_mempool_free(vf->zip_mp);
+
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zip_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zip_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zip_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ struct zip_vf *vf = (struct zip_vf *)dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->driver->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = octtx_zip_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = vf->max_nb_queue_pairs;
+ }
+}
+
+/** Release queue pair */
+static int
+zip_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zipvf_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ zipvf_q_term(qp);
+
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zip_pmd_qp_create_processed_pkts_ring(struct zipvf_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZIP_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZIP_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zip_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zipvf_qp *qp = NULL;
+ struct zip_vf *vf;
+ char *name;
+ int ret;
+
+ if (!dev)
+ return -1;
+
+ vf = (struct zip_vf *) (dev->data->dev_private);
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ ZIP_PMD_INFO("Using existing queue pair %d ", qp_id);
+ return 0;
+ }
+
+ name = rte_malloc(NULL, RTE_COMPRESSDEV_NAME_MAX_LEN, 0);
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN,
+ "zip_pmd_%u_qp_%u",
+ dev->data->dev_id, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket(name, sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->name = name;
+
+ /* Create completion queue upto max_inflight_ops */
+ qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ qp->id = qp_id;
+ qp->vf = vf;
+
+ ret = zipvf_q_init(qp);
+ if (ret < 0)
+ goto qp_setup_cleanup;
+
+ dev->data->queue_pairs[qp_id] = qp;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp->processed_pkts)
+ rte_ring_free(qp->processed_pkts);
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+zip_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform, void **stream)
+{
+ int ret;
+ struct zip_stream *strm = NULL;
+
+ strm = rte_malloc(NULL,
+ sizeof(struct zip_stream), 0);
+
+ if (strm == NULL)
+ return (-ENOMEM);
+
+ ret = zip_set_stream_parameters(dev, xform, strm);
+ if (ret < 0) {
+ ZIP_PMD_ERR("failed configure xform parameters");
+ rte_free(strm);
+ return ret;
+ }
+ *stream = strm;
+ return 0;
+}
+
+static int
+zip_pmd_stream_free(struct rte_compressdev *dev, void *stream)
+{
+ struct zip_vf *vf = (struct zip_vf *) (dev->data->dev_private);
+ struct zip_stream *z_stream;
+
+ if (stream == NULL)
+ return 0;
+
+ z_stream = (struct zip_stream *)stream;
+
+ /* Free resources back to pool */
+ rte_mempool_put_bulk(vf->zip_mp,
+ (void *)&(z_stream->bufs[0]),
+ MAX_BUFS_PER_STREAM);
+
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zip_stream));
+ rte_free(stream);
+
+ return 0;
+}
+
+
+static uint16_t
+zip_pmd_enqueue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+ struct rte_comp_op *op;
+ struct zip_stream *zstrm;
+ int i, ret = 0;
+ uint16_t enqd = 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ op = ops[i];
+
+ if (op->op_type == RTE_COMP_OP_STATEFUL) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ } else {
+ /* process stateless ops */
+ zstrm = (struct zip_stream *)op->private_xform;
+ if (unlikely(zstrm == NULL))
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ else
+ ret = zstrm->func(op, qp, zstrm);
+ }
+
+ /* Whatever is out of op, put it into completion queue with
+ * its status
+ */
+ if (!ret)
+ ret = rte_ring_enqueue(qp->processed_pkts, (void *)op);
+
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to enqueue op*/
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zip_pmd_dequeue_burst_sync(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zipvf_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+struct rte_compressdev_ops octtx_zip_pmd_ops = {
+ .dev_configure = zip_pmd_config,
+ .dev_start = zip_pmd_start,
+ .dev_stop = zip_pmd_stop,
+ .dev_close = zip_pmd_close,
+
+ .stats_get = zip_pmd_stats_get,
+ .stats_reset = zip_pmd_stats_reset,
+
+ .dev_infos_get = zip_pmd_info_get,
+
+ .queue_pair_setup = zip_pmd_qp_setup,
+ .queue_pair_release = zip_pmd_qp_release,
+
+ .private_xform_create = zip_pmd_stream_create,
+ .private_xform_free = zip_pmd_stream_free,
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+static int
+zip_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ int ret = 0;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id(),
+ };
+
+ ZIP_PMD_INFO("vendor_id=0x%x device_id=0x%x",
+ (unsigned int)pci_dev->id.vendor_id,
+ (unsigned int)pci_dev->id.device_id);
+
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_create(compressdev_name,
+ &pci_dev->device, sizeof(struct zip_vf), &init_params);
+ if (compressdev == NULL) {
+ ZIP_PMD_ERR("driver %s: create failed", init_params.name);
+ return -ENODEV;
+ }
+
+ /*
+ * create only if proc_type is primary.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ /* create vf dev with given pmd dev id */
+ ret = zipvf_create(compressdev);
+ if (ret < 0) {
+ ZIP_PMD_ERR("Device creation failed");
+ rte_compressdev_pmd_destroy(compressdev);
+ return ret;
+ }
+ }
+
+ compressdev->dev_ops = &octtx_zip_pmd_ops;
+ /* register rx/tx burst functions for data path */
+ compressdev->dequeue_burst = zip_pmd_dequeue_burst_sync;
+ compressdev->enqueue_burst = zip_pmd_enqueue_burst_sync;
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+ return ret;
+}
+
+static int
+zip_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_compressdev *compressdev;
+ char compressdev_name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL) {
+ ZIP_PMD_ERR(" Invalid PCI Device\n");
+ return -EINVAL;
+ }
+ rte_pci_device_name(&pci_dev->addr, compressdev_name,
+ sizeof(compressdev_name));
+
+ compressdev = rte_compressdev_pmd_get_named_dev(compressdev_name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (zipvf_destroy(compressdev) < 0)
+ return -ENODEV;
+ }
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_pci_id pci_id_octtx_zipvf_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_ZIPVF),
+ },
+ {
+ .device_id = 0
+ },
+};
+
+/**
+ * Structure that represents a PCI driver
+ */
+static struct rte_pci_driver octtx_zip_pmd = {
+ .id_table = pci_id_octtx_zipvf_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = zip_pci_probe,
+ .remove = zip_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(COMPRESSDEV_NAME_ZIP_PMD, octtx_zip_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(COMPRESSDEV_NAME_ZIP_PMD, pci_id_octtx_zipvf_table);
+
+RTE_INIT(octtx_zip_init_log);
+
+static void
+octtx_zip_init_log(void)
+{
+ octtx_zip_logtype_driver = rte_log_register("pmd.compress.octeontx");
+ if (octtx_zip_logtype_driver >= 0)
+ rte_log_set_level(octtx_zip_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/octeontx/rte_pmd_octeontx_compress_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/qat/meson.build b/drivers/compress/qat/meson.build
new file mode 100644
index 00000000..9d15076d
--- /dev/null
+++ b/drivers/compress/qat/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017-2018 Intel Corporation
+
+
+# Add our sources files to the list
+allow_experimental_apis = true
+qat_sources += files('qat_comp_pmd.c',
+ 'qat_comp.c')
+qat_includes += include_directories('.')
+qat_deps += 'compressdev'
+qat_ext_deps += dep
+
+# build the whole driver
+sources += qat_sources
+cflags += qat_cflags
+deps += qat_deps
+ext_deps += qat_ext_deps
+includes += qat_includes
diff --git a/drivers/compress/qat/qat_comp.c b/drivers/compress/qat/qat_comp.c
new file mode 100644
index 00000000..38c8a5b8
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.c
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_hexdump.h>
+#include <rte_comp.h>
+#include <rte_bus_pci.h>
+#include <rte_byteorder.h>
+#include <rte_memcpy.h>
+#include <rte_common.h>
+#include <rte_spinlock.h>
+#include <rte_log.h>
+#include <rte_malloc.h>
+
+#include "qat_logs.h"
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg,
+ void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused)
+{
+ struct rte_comp_op *op = in_op;
+ struct qat_comp_op_cookie *cookie =
+ (struct qat_comp_op_cookie *)op_cookie;
+ struct qat_comp_xform *qat_xform = op->private_xform;
+ const uint8_t *tmpl = (uint8_t *)&qat_xform->qat_comp_req_tmpl;
+ struct icp_qat_fw_comp_req *comp_req =
+ (struct icp_qat_fw_comp_req *)out_msg;
+
+ if (unlikely(op->op_type != RTE_COMP_OP_STATELESS)) {
+ QAT_DP_LOG(ERR, "QAT PMD only supports stateless compression "
+ "operation requests, op (%p) is not a "
+ "stateless operation.", op);
+ return -EINVAL;
+ }
+
+ rte_mov128(out_msg, tmpl);
+ comp_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
+
+ /* common for sgl and flat buffers */
+ comp_req->comp_pars.comp_len = op->src.length;
+ comp_req->comp_pars.out_buffer_sz = rte_pktmbuf_pkt_len(op->m_dst) -
+ op->dst.offset;
+
+ if (op->m_src->next != NULL || op->m_dst->next != NULL) {
+ /* sgl */
+ int ret = 0;
+
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_SGL);
+
+ ret = qat_sgl_fill_array(op->m_src,
+ op->src.offset,
+ &cookie->qat_sgl_src,
+ op->src.length,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill source sgl array");
+ return ret;
+ }
+
+ ret = qat_sgl_fill_array(op->m_dst,
+ op->dst.offset,
+ &cookie->qat_sgl_dst,
+ comp_req->comp_pars.out_buffer_sz,
+ RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS);
+ if (ret) {
+ QAT_DP_LOG(ERR, "QAT PMD Cannot fill dest. sgl array");
+ return ret;
+ }
+
+ comp_req->comn_mid.src_data_addr =
+ cookie->qat_sgl_src_phys_addr;
+ comp_req->comn_mid.dest_data_addr =
+ cookie->qat_sgl_dst_phys_addr;
+ comp_req->comn_mid.src_length = 0;
+ comp_req->comn_mid.dst_length = 0;
+
+ } else {
+ /* flat aka linear buffer */
+ ICP_QAT_FW_COMN_PTR_TYPE_SET(comp_req->comn_hdr.comn_req_flags,
+ QAT_COMN_PTR_TYPE_FLAT);
+ comp_req->comn_mid.src_length = op->src.length;
+ comp_req->comn_mid.dst_length =
+ comp_req->comp_pars.out_buffer_sz;
+
+ comp_req->comn_mid.src_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_src, op->src.offset);
+ comp_req->comn_mid.dest_data_addr =
+ rte_pktmbuf_mtophys_offset(op->m_dst, op->dst.offset);
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+int
+qat_comp_process_response(void **op, uint8_t *resp)
+{
+ struct icp_qat_fw_comp_resp *resp_msg =
+ (struct icp_qat_fw_comp_resp *)resp;
+ struct rte_comp_op *rx_op = (struct rte_comp_op *)(uintptr_t)
+ (resp_msg->opaque_data);
+ struct qat_comp_xform *qat_xform = (struct qat_comp_xform *)
+ (rx_op->private_xform);
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_LOG(DEBUG, "Direction: %s",
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS ?
+ "decompression" : "compression");
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg,
+ sizeof(struct icp_qat_fw_comp_resp));
+#endif
+
+ if (likely(qat_xform->qat_comp_request_type
+ != QAT_COMP_REQUEST_DECOMPRESS)) {
+ if (unlikely(ICP_QAT_FW_COMN_HDR_CNV_FLAG_GET(
+ resp_msg->comn_resp.hdr_flags)
+ == ICP_QAT_FW_COMP_NO_CNV)) {
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status = ERR_CODE_QAT_COMP_WRONG_FW;
+ *op = (void *)rx_op;
+ QAT_DP_LOG(ERR, "QAT has wrong firmware");
+ return 0;
+ }
+ }
+
+ if ((ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(resp_msg->comn_resp.comn_status)
+ | ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(
+ resp_msg->comn_resp.comn_status)) !=
+ ICP_QAT_FW_COMN_STATUS_FLAG_OK) {
+
+ rx_op->status = RTE_COMP_OP_STATUS_ERROR;
+ rx_op->debug_status =
+ *((uint16_t *)(&resp_msg->comn_resp.comn_error));
+ } else {
+ struct qat_comp_xform *qat_xform = rx_op->private_xform;
+ struct icp_qat_fw_resp_comp_pars *comp_resp =
+ (struct icp_qat_fw_resp_comp_pars *)&resp_msg->comp_resp_pars;
+
+ rx_op->status = RTE_COMP_OP_STATUS_SUCCESS;
+ rx_op->consumed = comp_resp->input_byte_counter;
+ rx_op->produced = comp_resp->output_byte_counter;
+
+ if (qat_xform->checksum_type != RTE_COMP_CHECKSUM_NONE) {
+ if (qat_xform->checksum_type == RTE_COMP_CHECKSUM_CRC32)
+ rx_op->output_chksum = comp_resp->curr_crc32;
+ else if (qat_xform->checksum_type ==
+ RTE_COMP_CHECKSUM_ADLER32)
+ rx_op->output_chksum = comp_resp->curr_adler_32;
+ else
+ rx_op->output_chksum = comp_resp->curr_chksum;
+ }
+ }
+ *op = (void *)rx_op;
+
+ return 0;
+}
+
+unsigned int
+qat_comp_xform_size(void)
+{
+ return RTE_ALIGN_CEIL(sizeof(struct qat_comp_xform), 8);
+}
+
+static void qat_comp_create_req_hdr(struct icp_qat_fw_comn_req_hdr *header,
+ enum qat_comp_request_type request)
+{
+ if (request == QAT_COMP_REQUEST_FIXED_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
+ else if (request == QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
+ else if (request == QAT_COMP_REQUEST_DECOMPRESS)
+ header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
+
+ header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_COMP;
+ header->hdr_flags =
+ ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+
+ header->comn_req_flags = ICP_QAT_FW_COMN_FLAGS_BUILD(
+ QAT_COMN_CD_FLD_TYPE_16BYTE_DATA, QAT_COMN_PTR_TYPE_FLAT);
+}
+
+static int qat_comp_create_templates(struct qat_comp_xform *qat_xform,
+ const struct rte_memzone *interm_buff_mz __rte_unused,
+ const struct rte_comp_xform *xform)
+{
+ struct icp_qat_fw_comp_req *comp_req;
+ int comp_level, algo;
+ uint32_t req_par_flags;
+ int direction = ICP_QAT_HW_COMPRESSION_DIR_COMPRESS;
+
+ if (unlikely(qat_xform == NULL)) {
+ QAT_LOG(ERR, "Session was not created for this device");
+ return -EINVAL;
+ }
+
+ if (qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ direction = ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS;
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_NO_CNV,
+ ICP_QAT_FW_COMP_NO_CNV_RECOVERY);
+
+ } else {
+ if (xform->compress.level == RTE_COMP_LEVEL_PMD_DEFAULT)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level == 1)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_1;
+ else if (xform->compress.level == 2)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_4;
+ else if (xform->compress.level == 3)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_8;
+ else if (xform->compress.level >= 4 &&
+ xform->compress.level <= 9)
+ comp_level = ICP_QAT_HW_COMPRESSION_DEPTH_16;
+ else {
+ QAT_LOG(ERR, "compression level not supported");
+ return -EINVAL;
+ }
+ req_par_flags = ICP_QAT_FW_COMP_REQ_PARAM_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_SOP, ICP_QAT_FW_COMP_EOP,
+ ICP_QAT_FW_COMP_BFINAL, ICP_QAT_FW_COMP_CNV,
+ ICP_QAT_FW_COMP_CNV_RECOVERY);
+ }
+
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ algo = ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE;
+ break;
+ case RTE_COMP_ALGO_LZS:
+ default:
+ /* RTE_COMP_NULL */
+ QAT_LOG(ERR, "compression algorithm not supported");
+ return -EINVAL;
+ }
+
+ comp_req = &qat_xform->qat_comp_req_tmpl;
+
+ /* Initialize header */
+ qat_comp_create_req_hdr(&comp_req->comn_hdr,
+ qat_xform->qat_comp_request_type);
+
+ comp_req->comn_hdr.serv_specif_flags = ICP_QAT_FW_COMP_FLAGS_BUILD(
+ ICP_QAT_FW_COMP_STATELESS_SESSION,
+ ICP_QAT_FW_COMP_NOT_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_NOT_DISABLE_TYPE0_ENH_AUTO_SELECT_BEST,
+ ICP_QAT_FW_COMP_DISABLE_SECURE_RAM_USED_AS_INTMD_BUF);
+
+ comp_req->cd_pars.sl.comp_slice_cfg_word[0] =
+ ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(
+ direction,
+ /* In CPM 1.6 only valid mode ! */
+ ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED, algo,
+ /* Translate level to depth */
+ comp_level, ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
+
+ comp_req->comp_pars.initial_adler = 1;
+ comp_req->comp_pars.initial_crc32 = 0;
+ comp_req->comp_pars.req_par_flags = req_par_flags;
+
+
+ if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS ||
+ qat_xform->qat_comp_request_type == QAT_COMP_REQUEST_DECOMPRESS) {
+ ICP_QAT_FW_COMN_NEXT_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_DRAM_WR);
+ ICP_QAT_FW_COMN_CURR_ID_SET(&comp_req->comp_cd_ctrl,
+ ICP_QAT_FW_SLICE_COMP);
+ } else if (qat_xform->qat_comp_request_type ==
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS) {
+
+ QAT_LOG(ERR, "Dynamic huffman encoding not supported");
+ return -EINVAL;
+ }
+
+#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG
+ QAT_DP_HEXDUMP_LOG(DEBUG, "qat compression message template:", comp_req,
+ sizeof(struct icp_qat_fw_comp_req));
+#endif
+ return 0;
+}
+
+/**
+ * Create driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param xform
+ * xform data from application
+ * @param private_xform
+ * ptr where handle of pmd's private_xform data should be stored
+ * @return
+ * - if successful returns 0
+ * and valid private_xform handle
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ * - Returns -ENOTSUP if comp device does not support the comp transform.
+ * - Returns -ENOMEM if the private_xform could not be allocated.
+ */
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ struct qat_comp_dev_private *qat = dev->data->dev_private;
+
+ if (unlikely(private_xform == NULL)) {
+ QAT_LOG(ERR, "QAT: private_xform parameter is NULL");
+ return -EINVAL;
+ }
+ if (unlikely(qat->xformpool == NULL)) {
+ QAT_LOG(ERR, "QAT device has no private_xform mempool");
+ return -ENOMEM;
+ }
+ if (rte_mempool_get(qat->xformpool, private_xform)) {
+ QAT_LOG(ERR, "Couldn't get object from qat xform mempool");
+ return -ENOMEM;
+ }
+
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)*private_xform;
+
+ if (xform->type == RTE_COMP_COMPRESS) {
+ if (xform->compress.deflate.huffman ==
+ RTE_COMP_HUFFMAN_DYNAMIC) {
+ QAT_LOG(ERR,
+ "QAT device doesn't support dynamic compression");
+ return -ENOTSUP;
+ }
+
+ if (xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_FIXED ||
+ ((xform->compress.deflate.huffman == RTE_COMP_HUFFMAN_DEFAULT)
+ && qat->interm_buff_mz == NULL))
+
+ qat_xform->qat_comp_request_type =
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS;
+
+
+ } else {
+ qat_xform->qat_comp_request_type = QAT_COMP_REQUEST_DECOMPRESS;
+ }
+
+ qat_xform->checksum_type = xform->compress.chksum;
+
+ if (qat_comp_create_templates(qat_xform, qat->interm_buff_mz, xform)) {
+ QAT_LOG(ERR, "QAT: Problem with setting compression");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * Free driver private_xform data.
+ *
+ * @param dev
+ * Compressdev device
+ * @param private_xform
+ * handle of pmd's private_xform data
+ * @return
+ * - 0 if successful
+ * - <0 in error cases
+ * - Returns -EINVAL if input parameters are invalid.
+ */
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev __rte_unused,
+ void *private_xform)
+{
+ struct qat_comp_xform *qat_xform =
+ (struct qat_comp_xform *)private_xform;
+
+ if (qat_xform) {
+ memset(qat_xform, 0, qat_comp_xform_size());
+ struct rte_mempool *mp = rte_mempool_from_obj(qat_xform);
+
+ rte_mempool_put(mp, qat_xform);
+ return 0;
+ }
+ return -EINVAL;
+}
diff --git a/drivers/compress/qat/qat_comp.h b/drivers/compress/qat/qat_comp.h
new file mode 100644
index 00000000..8d315efb
--- /dev/null
+++ b/drivers/compress/qat/qat_comp.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_H_
+#define _QAT_COMP_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_common.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw_comp.h"
+#include "icp_qat_fw_la.h"
+
+#define ERR_CODE_QAT_COMP_WRONG_FW -99
+
+enum qat_comp_request_type {
+ QAT_COMP_REQUEST_FIXED_COMP_STATELESS,
+ QAT_COMP_REQUEST_DYNAMIC_COMP_STATELESS,
+ QAT_COMP_REQUEST_DECOMPRESS,
+ REQ_COMP_END
+};
+
+struct qat_comp_sgl {
+ qat_sgl_hdr;
+ struct qat_flat_buf buffers[RTE_PMD_QAT_COMP_SGL_MAX_SEGMENTS];
+} __rte_packed __rte_cache_aligned;
+
+struct qat_comp_op_cookie {
+ struct qat_comp_sgl qat_sgl_src;
+ struct qat_comp_sgl qat_sgl_dst;
+ phys_addr_t qat_sgl_src_phys_addr;
+ phys_addr_t qat_sgl_dst_phys_addr;
+};
+
+struct qat_comp_xform {
+ struct icp_qat_fw_comp_req qat_comp_req_tmpl;
+ enum qat_comp_request_type qat_comp_request_type;
+ enum rte_comp_checksum_type checksum_type;
+};
+
+int
+qat_comp_build_request(void *in_op, uint8_t *out_msg, void *op_cookie,
+ enum qat_device_gen qat_dev_gen __rte_unused);
+
+int
+qat_comp_process_response(void **op, uint8_t *resp);
+
+
+int
+qat_comp_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform);
+
+int
+qat_comp_private_xform_free(struct rte_compressdev *dev, void *private_xform);
+
+unsigned int
+qat_comp_xform_size(void);
+
+#endif
+#endif
diff --git a/drivers/compress/qat/qat_comp_pmd.c b/drivers/compress/qat/qat_comp_pmd.c
new file mode 100644
index 00000000..b89975fc
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.c
@@ -0,0 +1,429 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#include "qat_comp.h"
+#include "qat_comp_pmd.h"
+
+static const struct rte_compressdev_capabilities qat_comp_gen_capabilities[] = {
+ {/* COMPRESSION - deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = RTE_COMP_FF_MULTI_PKT_CHECKSUM |
+ RTE_COMP_FF_CRC32_CHECKSUM |
+ RTE_COMP_FF_ADLER32_CHECKSUM |
+ RTE_COMP_FF_CRC32_ADLER32_CHECKSUM |
+ RTE_COMP_FF_SHAREABLE_PRIV_XFORM |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_COMP_FF_OOP_SGL_IN_LB_OUT |
+ RTE_COMP_FF_OOP_LB_IN_SGL_OUT,
+ .window_size = {.min = 15, .max = 15, .increment = 0} },
+ {RTE_COMP_ALGO_LIST_END, 0, {0, 0, 0} } };
+
+static void
+qat_comp_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ struct qat_common_stats qat_stats = {0};
+ struct qat_comp_dev_private *qat_priv;
+
+ if (stats == NULL || dev == NULL) {
+ QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_COMPRESSION);
+ stats->enqueued_count = qat_stats.enqueued_count;
+ stats->dequeued_count = qat_stats.dequeued_count;
+ stats->enqueue_err_count = qat_stats.enqueue_err_count;
+ stats->dequeue_err_count = qat_stats.dequeue_err_count;
+}
+
+static void
+qat_comp_stats_reset(struct rte_compressdev *dev)
+{
+ struct qat_comp_dev_private *qat_priv;
+
+ if (dev == NULL) {
+ QAT_LOG(ERR, "invalid compressdev ptr %p", dev);
+ return;
+ }
+ qat_priv = dev->data->dev_private;
+
+ qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_COMPRESSION);
+
+}
+
+static int
+qat_comp_qp_release(struct rte_compressdev *dev, uint16_t queue_pair_id)
+{
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+
+ QAT_LOG(DEBUG, "Release comp qp %u on device %d",
+ queue_pair_id, dev->data->dev_id);
+
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][queue_pair_id]
+ = NULL;
+
+ return qat_qp_release((struct qat_qp **)
+ &(dev->data->queue_pairs[queue_pair_id]));
+}
+
+static int
+qat_comp_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct qat_qp *qp;
+ int ret = 0;
+ uint32_t i;
+ struct qat_qp_config qat_qp_conf;
+
+ struct qat_qp **qp_addr =
+ (struct qat_qp **)&(dev->data->queue_pairs[qp_id]);
+ struct qat_comp_dev_private *qat_private = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[qat_private->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+ const struct qat_qp_hw_data *qp_hw_data = comp_hw_qps + qp_id;
+
+ /* If qp is already in use free ring memory and qp metadata. */
+ if (*qp_addr != NULL) {
+ ret = qat_comp_qp_release(dev, qp_id);
+ if (ret < 0)
+ return ret;
+ }
+ if (qp_id >= qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION)) {
+ QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id);
+ return -EINVAL;
+ }
+
+ qat_qp_conf.hw = qp_hw_data;
+ qat_qp_conf.build_request = qat_comp_build_request;
+ qat_qp_conf.cookie_size = sizeof(struct qat_comp_op_cookie);
+ qat_qp_conf.nb_descriptors = max_inflight_ops;
+ qat_qp_conf.socket_id = socket_id;
+ qat_qp_conf.service_str = "comp";
+
+ ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf);
+ if (ret != 0)
+ return ret;
+
+ /* store a link to the qp in the qat_pci_device */
+ qat_private->qat_dev->qps_in_use[QAT_SERVICE_COMPRESSION][qp_id]
+ = *qp_addr;
+
+ qp = (struct qat_qp *)*qp_addr;
+
+ for (i = 0; i < qp->nb_descriptors; i++) {
+
+ struct qat_comp_op_cookie *cookie =
+ qp->op_cookies[i];
+
+ cookie->qat_sgl_src_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_src);
+
+ cookie->qat_sgl_dst_phys_addr =
+ rte_mempool_virt2iova(cookie) +
+ offsetof(struct qat_comp_op_cookie,
+ qat_sgl_dst);
+ }
+
+ return ret;
+}
+
+static struct rte_mempool *
+qat_comp_create_xform_pool(struct qat_comp_dev_private *comp_dev,
+ uint32_t num_elements)
+{
+ char xform_pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *mp;
+
+ snprintf(xform_pool_name, RTE_MEMPOOL_NAMESIZE,
+ "%s_xforms", comp_dev->qat_dev->name);
+
+ QAT_LOG(DEBUG, "xformpool: %s", xform_pool_name);
+ mp = rte_mempool_lookup(xform_pool_name);
+
+ if (mp != NULL) {
+ QAT_LOG(DEBUG, "xformpool already created");
+ if (mp->size != num_elements) {
+ QAT_LOG(DEBUG, "xformpool wrong size - delete it");
+ rte_mempool_free(mp);
+ mp = NULL;
+ comp_dev->xformpool = NULL;
+ }
+ }
+
+ if (mp == NULL)
+ mp = rte_mempool_create(xform_pool_name,
+ num_elements,
+ qat_comp_xform_size(), 0, 0,
+ NULL, NULL, NULL, NULL, rte_socket_id(),
+ 0);
+ if (mp == NULL) {
+ QAT_LOG(ERR, "Err creating mempool %s w %d elements of size %d",
+ xform_pool_name, num_elements, qat_comp_xform_size());
+ return NULL;
+ }
+
+ return mp;
+}
+
+static void
+_qat_comp_dev_config_clear(struct qat_comp_dev_private *comp_dev)
+{
+ /* Free private_xform pool */
+ if (comp_dev->xformpool) {
+ /* Free internal mempool for private xforms */
+ rte_mempool_free(comp_dev->xformpool);
+ comp_dev->xformpool = NULL;
+ }
+}
+
+static int
+qat_comp_dev_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ int ret = 0;
+
+ if (config->max_nb_streams != 0) {
+ QAT_LOG(ERR,
+ "QAT device does not support STATEFUL so max_nb_streams must be 0");
+ return -EINVAL;
+ }
+
+ comp_dev->xformpool = qat_comp_create_xform_pool(comp_dev,
+ config->max_nb_priv_xforms);
+ if (comp_dev->xformpool == NULL) {
+
+ ret = -ENOMEM;
+ goto error_out;
+ }
+ return 0;
+
+error_out:
+ _qat_comp_dev_config_clear(comp_dev);
+ return ret;
+}
+
+static int
+qat_comp_dev_start(struct rte_compressdev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+qat_comp_dev_stop(struct rte_compressdev *dev __rte_unused)
+{
+
+}
+
+static int
+qat_comp_dev_close(struct rte_compressdev *dev)
+{
+ int i;
+ int ret = 0;
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ ret = qat_comp_qp_release(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ _qat_comp_dev_config_clear(comp_dev);
+
+ return ret;
+}
+
+
+static void
+qat_comp_dev_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *info)
+{
+ struct qat_comp_dev_private *comp_dev = dev->data->dev_private;
+ const struct qat_qp_hw_data *comp_hw_qps =
+ qat_gen_config[comp_dev->qat_dev->qat_dev_gen]
+ .qp_hw_data[QAT_SERVICE_COMPRESSION];
+
+ if (info != NULL) {
+ info->max_nb_queue_pairs =
+ qat_qps_per_service(comp_hw_qps,
+ QAT_SERVICE_COMPRESSION);
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = comp_dev->qat_dev_capabilities;
+ }
+}
+
+static uint16_t
+qat_comp_pmd_enqueue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_enqueue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_dequeue_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ return qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+}
+
+static uint16_t
+qat_comp_pmd_enq_deq_dummy_op_burst(void *qp __rte_unused,
+ struct rte_comp_op **ops __rte_unused,
+ uint16_t nb_ops __rte_unused)
+{
+ QAT_DP_LOG(ERR, "QAT PMD detected wrong FW version !");
+ return 0;
+}
+
+static struct rte_compressdev_ops compress_qat_dummy_ops = {
+
+ /* Device related operations */
+ .dev_configure = NULL,
+ .dev_start = NULL,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = NULL,
+
+ .stats_get = NULL,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = NULL,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = NULL,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+static uint16_t
+qat_comp_pmd_dequeue_frst_op_burst(void *qp, struct rte_comp_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t ret = qat_dequeue_op_burst(qp, (void **)ops, nb_ops);
+ struct qat_qp *tmp_qp = (struct qat_qp *)qp;
+
+ if (ret) {
+ if ((*ops)->debug_status ==
+ (uint64_t)ERR_CODE_QAT_COMP_WRONG_FW) {
+ tmp_qp->qat_dev->comp_dev->compressdev->enqueue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_enq_deq_dummy_op_burst;
+
+ tmp_qp->qat_dev->comp_dev->compressdev->dev_ops =
+ &compress_qat_dummy_ops;
+ QAT_LOG(ERR, "QAT PMD detected wrong FW version !");
+
+ } else {
+ tmp_qp->qat_dev->comp_dev->compressdev->dequeue_burst =
+ qat_comp_pmd_dequeue_op_burst;
+ }
+ }
+ return ret;
+}
+
+static struct rte_compressdev_ops compress_qat_ops = {
+
+ /* Device related operations */
+ .dev_configure = qat_comp_dev_config,
+ .dev_start = qat_comp_dev_start,
+ .dev_stop = qat_comp_dev_stop,
+ .dev_close = qat_comp_dev_close,
+ .dev_infos_get = qat_comp_dev_info_get,
+
+ .stats_get = qat_comp_stats_get,
+ .stats_reset = qat_comp_stats_reset,
+ .queue_pair_setup = qat_comp_qp_setup,
+ .queue_pair_release = qat_comp_qp_release,
+
+ /* Compression related operations */
+ .private_xform_create = qat_comp_private_xform_create,
+ .private_xform_free = qat_comp_private_xform_free
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev)
+{
+ if (qat_pci_dev->qat_dev_gen == QAT_GEN1) {
+ QAT_LOG(ERR, "Compression PMD not supported on QAT dh895xcc");
+ return 0;
+ }
+
+ struct rte_compressdev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = qat_pci_dev->pci_dev->device.numa_node,
+ };
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ struct rte_compressdev *compressdev;
+ struct qat_comp_dev_private *comp_dev;
+
+ snprintf(name, RTE_COMPRESSDEV_NAME_MAX_LEN, "%s_%s",
+ qat_pci_dev->name, "comp");
+ QAT_LOG(DEBUG, "Creating QAT COMP device %s", name);
+
+ compressdev = rte_compressdev_pmd_create(name,
+ &qat_pci_dev->pci_dev->device,
+ sizeof(struct qat_comp_dev_private),
+ &init_params);
+
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ compressdev->dev_ops = &compress_qat_ops;
+
+ compressdev->enqueue_burst = qat_comp_pmd_enqueue_op_burst;
+ compressdev->dequeue_burst = qat_comp_pmd_dequeue_frst_op_burst;
+
+ compressdev->feature_flags = RTE_COMPDEV_FF_HW_ACCELERATED;
+
+ comp_dev = compressdev->data->dev_private;
+ comp_dev->qat_dev = qat_pci_dev;
+ comp_dev->compressdev = compressdev;
+ qat_pci_dev->comp_dev = comp_dev;
+
+ switch (qat_pci_dev->qat_dev_gen) {
+ case QAT_GEN1:
+ case QAT_GEN2:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ break;
+ default:
+ comp_dev->qat_dev_capabilities = qat_comp_gen_capabilities;
+ QAT_LOG(DEBUG,
+ "QAT gen %d capabilities unknown, default to GEN1",
+ qat_pci_dev->qat_dev_gen);
+ break;
+ }
+
+ QAT_LOG(DEBUG,
+ "Created QAT COMP device %s as compressdev instance %d",
+ name, compressdev->data->dev_id);
+ return 0;
+}
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev)
+{
+ struct qat_comp_dev_private *comp_dev;
+
+ if (qat_pci_dev == NULL)
+ return -ENODEV;
+
+ comp_dev = qat_pci_dev->comp_dev;
+ if (comp_dev == NULL)
+ return 0;
+
+ /* clean up any resources used by the device */
+ qat_comp_dev_close(comp_dev->compressdev);
+
+ rte_compressdev_pmd_destroy(comp_dev->compressdev);
+ qat_pci_dev->comp_dev = NULL;
+
+ return 0;
+}
diff --git a/drivers/compress/qat/qat_comp_pmd.h b/drivers/compress/qat/qat_comp_pmd.h
new file mode 100644
index 00000000..9ad2a283
--- /dev/null
+++ b/drivers/compress/qat/qat_comp_pmd.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2015-2018 Intel Corporation
+ */
+
+#ifndef _QAT_COMP_PMD_H_
+#define _QAT_COMP_PMD_H_
+
+#ifdef RTE_LIBRTE_COMPRESSDEV
+
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#include "qat_device.h"
+
+/** private data structure for a QAT compression device.
+ * This QAT device is a device offering only a compression service,
+ * there can be one of these on each qat_pci_device (VF).
+ */
+struct qat_comp_dev_private {
+ struct qat_pci_device *qat_dev;
+ /**< The qat pci device hosting the service */
+ struct rte_compressdev *compressdev;
+ /**< The pointer to this compression device structure */
+ const struct rte_compressdev_capabilities *qat_dev_capabilities;
+ /* QAT device compression capabilities */
+ const struct rte_memzone *interm_buff_mz;
+ /**< The device's memory for intermediate buffers */
+ struct rte_mempool *xformpool;
+ /**< The device's pool for qat_comp_xforms */
+};
+
+int
+qat_comp_dev_create(struct qat_pci_device *qat_pci_dev);
+
+int
+qat_comp_dev_destroy(struct qat_pci_device *qat_pci_dev);
+
+#endif
+#endif /* _QAT_COMP_PMD_H_ */
diff --git a/drivers/compress/qat/rte_pmd_qat_version.map b/drivers/compress/qat/rte_pmd_qat_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/qat/rte_pmd_qat_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/Makefile b/drivers/compress/zlib/Makefile
new file mode 100644
index 00000000..5cf8de6f
--- /dev/null
+++ b/drivers/compress/zlib/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_zlib.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_zlib_version.map
+
+# external library dependencies
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -lz
+LDLIBS += -lrte_compressdev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZLIB) += zlib_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/compress/zlib/meson.build b/drivers/compress/zlib/meson.build
new file mode 100644
index 00000000..7748de2d
--- /dev/null
+++ b/drivers/compress/zlib/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium Networks
+
+dep = dependency('zlib', required: false)
+if not dep.found()
+ build = false
+endif
+
+deps += 'bus_vdev'
+sources = files('zlib_pmd.c', 'zlib_pmd_ops.c')
+ext_deps += dep
+pkgconfig_extra_libs += '-lz'
+
+allow_experimental_apis = true
diff --git a/drivers/compress/zlib/rte_pmd_zlib_version.map b/drivers/compress/zlib/rte_pmd_zlib_version.map
new file mode 100644
index 00000000..ad6e191e
--- /dev/null
+++ b/drivers/compress/zlib/rte_pmd_zlib_version.map
@@ -0,0 +1,3 @@
+DPDK_18.08 {
+ local: *;
+};
diff --git a/drivers/compress/zlib/zlib_pmd.c b/drivers/compress/zlib/zlib_pmd.c
new file mode 100644
index 00000000..7d6871b1
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd.c
@@ -0,0 +1,436 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+
+#include "zlib_pmd_private.h"
+
+/** Compute next mbuf in the list, assign data buffer and length,
+ * returns 0 if mbuf is NULL
+ */
+#define COMPUTE_BUF(mbuf, data, len) \
+ ((mbuf = mbuf->next) ? \
+ (data = rte_pktmbuf_mtod(mbuf, uint8_t *)), \
+ (len = rte_pktmbuf_data_len(mbuf)) : 0)
+
+static void
+process_zlib_deflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush, fin_flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ switch (op->flush_flag) {
+ case RTE_COMP_FLUSH_FULL:
+ case RTE_COMP_FLUSH_FINAL:
+ fin_flush = Z_FINISH;
+ break;
+ default:
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid flush value\n");
+ }
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ /* Update z_stream with the inputs provided by application */
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /* Set flush value to NO_FLUSH unless it is last mbuf */
+ flush = Z_NO_FLUSH;
+ /* Initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ /* Set flush value to Z_FINISH for last block */
+ if ((op->src.length - strm->total_in) <= strm->avail_in) {
+ strm->avail_in = (op->src.length - strm->total_in);
+ flush = fin_flush;
+ }
+ do {
+ ret = deflate(strm, flush);
+ if (unlikely(ret == Z_STREAM_ERROR)) {
+ /* error return, do not process further */
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ goto def_end;
+ }
+ /* Break if Z_STREAM_END is encountered */
+ if (ret == Z_STREAM_END)
+ goto def_end;
+
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no space for compressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+
+ /* Update source buffer to next mbuf
+ * Exit if input buffers are fully consumed
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+def_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not updated for status:%d\n",
+ op->status);
+ }
+
+ deflateReset(strm);
+}
+
+static void
+process_zlib_inflate(struct rte_comp_op *op, z_stream *strm)
+{
+ int ret, flush;
+ struct rte_mbuf *mbuf_src = op->m_src;
+ struct rte_mbuf *mbuf_dst = op->m_dst;
+
+ if (unlikely(!strm)) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid z_stream\n");
+ return;
+ }
+ strm->next_in = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->src.offset);
+
+ strm->avail_in = rte_pktmbuf_data_len(mbuf_src) - op->src.offset;
+
+ strm->next_out = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->dst.offset);
+
+ strm->avail_out = rte_pktmbuf_data_len(mbuf_dst) - op->dst.offset;
+
+ /** Ignoring flush value provided from application for decompression */
+ flush = Z_NO_FLUSH;
+ /* initialize status to SUCCESS */
+ op->status = RTE_COMP_OP_STATUS_SUCCESS;
+
+ do {
+ do {
+ ret = inflate(strm, flush);
+
+ switch (ret) {
+ /* Fall-through */
+ case Z_NEED_DICT:
+ ret = Z_DATA_ERROR;
+ /* Fall-through */
+ case Z_DATA_ERROR:
+ /* Fall-through */
+ case Z_MEM_ERROR:
+ /* Fall-through */
+ case Z_STREAM_ERROR:
+ op->status = RTE_COMP_OP_STATUS_ERROR;
+ /* Fall-through */
+ case Z_STREAM_END:
+ /* no further computation needed if
+ * Z_STREAM_END is encountered
+ */
+ goto inf_end;
+ default:
+ /* success */
+ break;
+
+ }
+ /* Keep looping until input mbuf is consumed.
+ * Exit if destination mbuf gets exhausted.
+ */
+ } while ((strm->avail_out == 0) &&
+ COMPUTE_BUF(mbuf_dst, strm->next_out, strm->avail_out));
+
+ if (!strm->avail_out) {
+ /* there is no more space for decompressed output */
+ op->status = RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED;
+ break;
+ }
+ /* Read next input buffer to be processed, exit if compressed
+ * blocks are fully read
+ */
+ } while (COMPUTE_BUF(mbuf_src, strm->next_in, strm->avail_in));
+
+inf_end:
+ /* Update op stats */
+ switch (op->status) {
+ case RTE_COMP_OP_STATUS_SUCCESS:
+ op->consumed += strm->total_in;
+ /* Fall-through */
+ case RTE_COMP_OP_STATUS_OUT_OF_SPACE_TERMINATED:
+ op->produced += strm->total_out;
+ break;
+ default:
+ ZLIB_PMD_ERR("stats not produced for status:%d\n",
+ op->status);
+ }
+
+ inflateReset(strm);
+}
+
+/** Process comp operation for mbuf */
+static inline int
+process_zlib_op(struct zlib_qp *qp, struct rte_comp_op *op)
+{
+ struct zlib_stream *stream;
+ struct zlib_priv_xform *private_xform;
+
+ if ((op->op_type == RTE_COMP_OP_STATEFUL) ||
+ (op->src.offset > rte_pktmbuf_data_len(op->m_src)) ||
+ (op->dst.offset > rte_pktmbuf_data_len(op->m_dst))) {
+ op->status = RTE_COMP_OP_STATUS_INVALID_ARGS;
+ ZLIB_PMD_ERR("Invalid source or destination buffers or "
+ "invalid Operation requested\n");
+ } else {
+ private_xform = (struct zlib_priv_xform *)op->private_xform;
+ stream = &private_xform->stream;
+ stream->comp(op, &stream->strm);
+ }
+ /* whatever is out of op, put it into completion queue with
+ * its status
+ */
+ return rte_ring_enqueue(qp->processed_pkts, (void *)op);
+}
+
+/** Parse comp xform and set private xform/Stream parameters */
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream)
+{
+ int strategy, level, wbits;
+ z_stream *strm = &stream->strm;
+
+ /* allocate deflate state */
+ strm->zalloc = Z_NULL;
+ strm->zfree = Z_NULL;
+ strm->opaque = Z_NULL;
+
+ switch (xform->type) {
+ case RTE_COMP_COMPRESS:
+ stream->comp = process_zlib_deflate;
+ stream->free = deflateEnd;
+ /** Compression window bits */
+ switch (xform->compress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->compress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+ /** Compression Level */
+ switch (xform->compress.level) {
+ case RTE_COMP_LEVEL_PMD_DEFAULT:
+ level = Z_DEFAULT_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_NONE:
+ level = Z_NO_COMPRESSION;
+ break;
+ case RTE_COMP_LEVEL_MIN:
+ level = Z_BEST_SPEED;
+ break;
+ case RTE_COMP_LEVEL_MAX:
+ level = Z_BEST_COMPRESSION;
+ break;
+ default:
+ level = xform->compress.level;
+ if (level < RTE_COMP_LEVEL_MIN ||
+ level > RTE_COMP_LEVEL_MAX) {
+ ZLIB_PMD_ERR("Compression level %d "
+ "not supported\n",
+ level);
+ return -1;
+ }
+ break;
+ }
+ /** Compression strategy */
+ switch (xform->compress.deflate.huffman) {
+ case RTE_COMP_HUFFMAN_DEFAULT:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ case RTE_COMP_HUFFMAN_FIXED:
+ strategy = Z_FIXED;
+ break;
+ case RTE_COMP_HUFFMAN_DYNAMIC:
+ strategy = Z_DEFAULT_STRATEGY;
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression strategy not supported\n");
+ return -1;
+ }
+ if (deflateInit2(strm, level,
+ Z_DEFLATED, wbits,
+ DEF_MEM_LEVEL, strategy) != Z_OK) {
+ ZLIB_PMD_ERR("Deflate init failed\n");
+ return -1;
+ }
+ break;
+
+ case RTE_COMP_DECOMPRESS:
+ stream->comp = process_zlib_inflate;
+ stream->free = inflateEnd;
+ /** window bits */
+ switch (xform->decompress.algo) {
+ case RTE_COMP_ALGO_DEFLATE:
+ wbits = -(xform->decompress.window_size);
+ break;
+ default:
+ ZLIB_PMD_ERR("Compression algorithm not supported\n");
+ return -1;
+ }
+
+ if (inflateInit2(strm, wbits) != Z_OK) {
+ ZLIB_PMD_ERR("Inflate init failed\n");
+ return -1;
+ }
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+
+static uint16_t
+zlib_pmd_enqueue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+ int ret;
+ uint16_t i;
+ uint16_t enqd = 0;
+ for (i = 0; i < nb_ops; i++) {
+ ret = process_zlib_op(qp, ops[i]);
+ if (unlikely(ret < 0)) {
+ /* increment count if failed to push to completion
+ * queue
+ */
+ qp->qp_stats.enqueue_err_count++;
+ } else {
+ qp->qp_stats.enqueued_count++;
+ enqd++;
+ }
+ }
+ return enqd;
+}
+
+static uint16_t
+zlib_pmd_dequeue_burst(void *queue_pair,
+ struct rte_comp_op **ops, uint16_t nb_ops)
+{
+ struct zlib_qp *qp = queue_pair;
+
+ unsigned int nb_dequeued = 0;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+static int
+zlib_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_compressdev_pmd_init_params *init_params)
+{
+ struct rte_compressdev *dev;
+
+ dev = rte_compressdev_pmd_create(name, &vdev->device,
+ sizeof(struct zlib_private), init_params);
+ if (dev == NULL) {
+ ZLIB_PMD_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
+ dev->dev_ops = rte_zlib_pmd_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = zlib_pmd_dequeue_burst;
+ dev->enqueue_burst = zlib_pmd_enqueue_burst;
+
+ return 0;
+}
+
+static int
+zlib_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev_pmd_init_params init_params = {
+ "",
+ rte_socket_id()
+ };
+ const char *name;
+ const char *input_args;
+ int retval;
+
+ name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+
+ retval = rte_compressdev_pmd_parse_input_args(&init_params, input_args);
+ if (retval < 0) {
+ ZLIB_PMD_LOG(ERR,
+ "Failed to parse initialisation arguments[%s]\n",
+ input_args);
+ return -EINVAL;
+ }
+
+ return zlib_create(name, vdev, &init_params);
+}
+
+static int
+zlib_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_compressdev *compressdev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ compressdev = rte_compressdev_pmd_get_named_dev(name);
+ if (compressdev == NULL)
+ return -ENODEV;
+
+ return rte_compressdev_pmd_destroy(compressdev);
+}
+
+static struct rte_vdev_driver zlib_pmd_drv = {
+ .probe = zlib_probe,
+ .remove = zlib_remove
+};
+
+RTE_PMD_REGISTER_VDEV(COMPRESSDEV_NAME_ZLIB_PMD, zlib_pmd_drv);
+RTE_INIT(zlib_init_log);
+
+static void
+zlib_init_log(void)
+{
+ zlib_logtype_driver = rte_log_register("pmd.compress.zlib");
+ if (zlib_logtype_driver >= 0)
+ rte_log_set_level(zlib_logtype_driver, RTE_LOG_INFO);
+}
diff --git a/drivers/compress/zlib/zlib_pmd_ops.c b/drivers/compress/zlib/zlib_pmd_ops.c
new file mode 100644
index 00000000..0a73aed9
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_ops.c
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+
+#include "zlib_pmd_private.h"
+
+static const struct rte_compressdev_capabilities zlib_pmd_capabilities[] = {
+ { /* Deflate */
+ .algo = RTE_COMP_ALGO_DEFLATE,
+ .comp_feature_flags = (RTE_COMP_FF_NONCOMPRESSED_BLOCKS |
+ RTE_COMP_FF_HUFFMAN_FIXED |
+ RTE_COMP_FF_HUFFMAN_DYNAMIC),
+ .window_size = {
+ .min = 8,
+ .max = 15,
+ .increment = 1
+ },
+ },
+
+ RTE_COMP_END_OF_CAPABILITIES_LIST()
+
+};
+
+/** Configure device */
+static int
+zlib_pmd_config(struct rte_compressdev *dev,
+ struct rte_compressdev_config *config)
+{
+ struct rte_mempool *mp;
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+ struct zlib_private *internals = dev->data->dev_private;
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "stream_mp_%u", dev->data->dev_id);
+ mp = internals->mp;
+ if (mp == NULL) {
+ mp = rte_mempool_create(mp_name,
+ config->max_nb_priv_xforms +
+ config->max_nb_streams,
+ sizeof(struct zlib_priv_xform),
+ 0, 0, NULL, NULL, NULL,
+ NULL, config->socket_id,
+ 0);
+ if (mp == NULL) {
+ ZLIB_PMD_ERR("Cannot create private xform pool on "
+ "socket %d\n", config->socket_id);
+ return -ENOMEM;
+ }
+ internals->mp = mp;
+ }
+ return 0;
+}
+
+/** Start device */
+static int
+zlib_pmd_start(__rte_unused struct rte_compressdev *dev)
+{
+ return 0;
+}
+
+/** Stop device */
+static void
+zlib_pmd_stop(__rte_unused struct rte_compressdev *dev)
+{
+}
+
+/** Close device */
+static int
+zlib_pmd_close(struct rte_compressdev *dev)
+{
+ struct zlib_private *internals = dev->data->dev_private;
+ rte_mempool_free(internals->mp);
+ internals->mp = NULL;
+ return 0;
+}
+
+/** Get device statistics */
+static void
+zlib_pmd_stats_get(struct rte_compressdev *dev,
+ struct rte_compressdev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+}
+
+/** Reset device statistics */
+static void
+zlib_pmd_stats_reset(struct rte_compressdev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+/** Get device info */
+static void
+zlib_pmd_info_get(struct rte_compressdev *dev,
+ struct rte_compressdev_info *dev_info)
+{
+ if (dev_info != NULL) {
+ dev_info->driver_name = dev->device->name;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = zlib_pmd_capabilities;
+ }
+}
+
+/** Release queue pair */
+static int
+zlib_pmd_qp_release(struct rte_compressdev *dev, uint16_t qp_id)
+{
+ struct zlib_qp *qp = dev->data->queue_pairs[qp_id];
+
+ if (qp != NULL) {
+ rte_ring_free(qp->processed_pkts);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+/** set a unique name for the queue pair based on its name, dev_id and qp_id */
+static int
+zlib_pmd_qp_set_unique_name(struct rte_compressdev *dev,
+ struct zlib_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "zlib_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n >= sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+/** Create a ring to place process packets on */
+static struct rte_ring *
+zlib_pmd_qp_create_processed_pkts_ring(struct zlib_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r = qp->processed_pkts;
+
+ if (r) {
+ if (rte_ring_get_size(r) >= ring_size) {
+ ZLIB_PMD_INFO("Reusing existing ring %s for processed"
+ " packets", qp->name);
+ return r;
+ }
+
+ ZLIB_PMD_ERR("Unable to reuse existing ring %s for processed"
+ " packets", qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_EXACT_SZ);
+}
+
+/** Setup a queue pair */
+static int
+zlib_pmd_qp_setup(struct rte_compressdev *dev, uint16_t qp_id,
+ uint32_t max_inflight_ops, int socket_id)
+{
+ struct zlib_qp *qp = NULL;
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ zlib_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("ZLIB PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return (-ENOMEM);
+
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ if (zlib_pmd_qp_set_unique_name(dev, qp))
+ goto qp_setup_cleanup;
+
+ qp->processed_pkts = zlib_pmd_qp_create_processed_pkts_ring(qp,
+ max_inflight_ops, socket_id);
+ if (qp->processed_pkts == NULL)
+ goto qp_setup_cleanup;
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ if (qp) {
+ rte_free(qp);
+ qp = NULL;
+ }
+ return -1;
+}
+
+/** Configure stream */
+static int
+zlib_pmd_stream_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **zstream)
+{
+ int ret = 0;
+ struct zlib_stream *stream;
+ struct zlib_private *internals = dev->data->dev_private;
+
+ if (xform == NULL) {
+ ZLIB_PMD_ERR("invalid xform struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(internals->mp, zstream)) {
+ ZLIB_PMD_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ stream = *((struct zlib_stream **)zstream);
+
+ ret = zlib_set_stream_parameters(xform, stream);
+
+ if (ret < 0) {
+ ZLIB_PMD_ERR("failed configure session parameters");
+
+ memset(stream, 0, sizeof(struct zlib_stream));
+ /* Return session to mempool */
+ rte_mempool_put(internals->mp, stream);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** Configure private xform */
+static int
+zlib_pmd_private_xform_create(struct rte_compressdev *dev,
+ const struct rte_comp_xform *xform,
+ void **private_xform)
+{
+ return zlib_pmd_stream_create(dev, xform, private_xform);
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_stream_free(__rte_unused struct rte_compressdev *dev,
+ void *zstream)
+{
+ struct zlib_stream *stream = (struct zlib_stream *)zstream;
+ if (!stream)
+ return -EINVAL;
+
+ stream->free(&stream->strm);
+ /* Zero out the whole structure */
+ memset(stream, 0, sizeof(struct zlib_stream));
+ struct rte_mempool *mp = rte_mempool_from_obj(stream);
+ rte_mempool_put(mp, stream);
+
+ return 0;
+}
+
+/** Clear the memory of stream so it doesn't leave key material behind */
+static int
+zlib_pmd_private_xform_free(struct rte_compressdev *dev,
+ void *private_xform)
+{
+ return zlib_pmd_stream_free(dev, private_xform);
+}
+
+struct rte_compressdev_ops zlib_pmd_ops = {
+ .dev_configure = zlib_pmd_config,
+ .dev_start = zlib_pmd_start,
+ .dev_stop = zlib_pmd_stop,
+ .dev_close = zlib_pmd_close,
+
+ .stats_get = zlib_pmd_stats_get,
+ .stats_reset = zlib_pmd_stats_reset,
+
+ .dev_infos_get = zlib_pmd_info_get,
+
+ .queue_pair_setup = zlib_pmd_qp_setup,
+ .queue_pair_release = zlib_pmd_qp_release,
+
+ .private_xform_create = zlib_pmd_private_xform_create,
+ .private_xform_free = zlib_pmd_private_xform_free,
+
+ .stream_create = NULL,
+ .stream_free = NULL
+};
+
+struct rte_compressdev_ops *rte_zlib_pmd_ops = &zlib_pmd_ops;
diff --git a/drivers/compress/zlib/zlib_pmd_private.h b/drivers/compress/zlib/zlib_pmd_private.h
new file mode 100644
index 00000000..2c6e83d4
--- /dev/null
+++ b/drivers/compress/zlib/zlib_pmd_private.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium Networks
+ */
+
+#ifndef _RTE_ZLIB_PMD_PRIVATE_H_
+#define _RTE_ZLIB_PMD_PRIVATE_H_
+
+#include <zlib.h>
+#include <rte_compressdev.h>
+#include <rte_compressdev_pmd.h>
+
+#define COMPRESSDEV_NAME_ZLIB_PMD compress_zlib
+/**< ZLIB PMD device name */
+
+#define DEF_MEM_LEVEL 8
+
+int zlib_logtype_driver;
+#define ZLIB_PMD_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, zlib_logtype_driver, "%s(): "fmt "\n", \
+ __func__, ##args)
+
+#define ZLIB_PMD_INFO(fmt, args...) \
+ ZLIB_PMD_LOG(INFO, fmt, ## args)
+#define ZLIB_PMD_ERR(fmt, args...) \
+ ZLIB_PMD_LOG(ERR, fmt, ## args)
+#define ZLIB_PMD_WARN(fmt, args...) \
+ ZLIB_PMD_LOG(WARNING, fmt, ## args)
+
+struct zlib_private {
+ struct rte_mempool *mp;
+};
+
+struct zlib_qp {
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_compressdev_stats qp_stats;
+ /**< Queue pair statistics */
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_COMPRESSDEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+} __rte_cache_aligned;
+
+/* Algorithm handler function prototype */
+typedef void (*comp_func_t)(struct rte_comp_op *op, z_stream *strm);
+
+typedef int (*comp_free_t)(z_stream *strm);
+
+/** ZLIB Stream structure */
+struct zlib_stream {
+ z_stream strm;
+ /**< zlib stream structure */
+ comp_func_t comp;
+ /**< Operation (compression/decompression) */
+ comp_free_t free;
+ /**< Free Operation (compression/decompression) */
+} __rte_cache_aligned;
+
+/** ZLIB private xform structure */
+struct zlib_priv_xform {
+ struct zlib_stream stream;
+} __rte_cache_aligned;
+
+int
+zlib_set_stream_parameters(const struct rte_comp_xform *xform,
+ struct zlib_stream *stream);
+
+/** Device specific operations function pointer structure */
+extern struct rte_compressdev_ops *rte_zlib_pmd_ops;
+
+#endif /* _RTE_ZLIB_PMD_PRIVATE_H_ */