summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/ccp
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r--drivers/crypto/ccp/Makefile35
-rw-r--r--drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--drivers/crypto/ccp/ccp_pmd_ops.c833
-rw-r--r--drivers/crypto/ccp/ccp_pmd_private.h107
-rw-r--r--drivers/crypto/ccp/meson.build21
-rw-r--r--drivers/crypto/ccp/rte_ccp_pmd.c397
-rw-r--r--drivers/crypto/ccp/rte_pmd_ccp_version.map4
12 files changed, 6304 insertions, 0 deletions
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..19ae9153
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_sym_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..6984913f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,833 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_sym_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_sym_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .sym_session_get_size = ccp_pmd_sym_session_get_size,
+ .sym_session_configure = ccp_pmd_sym_session_configure,
+ .sym_session_clear = ccp_pmd_sym_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..79752f68
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..92d8a955
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,397 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_sym_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_sym_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};