summaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c88
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c8
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h6
-rw-r--r--drivers/crypto/aesni_gcm/meson.build12
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h89
-rw-r--r--drivers/crypto/aesni_mb/meson.build12
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c346
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c90
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h27
-rw-r--r--drivers/crypto/caam_jr/Makefile44
-rw-r--r--drivers/crypto/caam_jr/caam_jr.c2508
-rw-r--r--drivers/crypto/caam_jr/caam_jr_capabilities.c266
-rw-r--r--drivers/crypto/caam_jr/caam_jr_capabilities.h18
-rw-r--r--drivers/crypto/caam_jr/caam_jr_config.h207
-rw-r--r--drivers/crypto/caam_jr/caam_jr_desc.h285
-rw-r--r--drivers/crypto/caam_jr/caam_jr_hw.c367
-rw-r--r--drivers/crypto/caam_jr/caam_jr_hw_specific.h503
-rw-r--r--drivers/crypto/caam_jr/caam_jr_log.h42
-rw-r--r--drivers/crypto/caam_jr/caam_jr_pvt.h291
-rw-r--r--drivers/crypto/caam_jr/caam_jr_uio.c501
-rw-r--r--drivers/crypto/caam_jr/meson.build17
-rw-r--r--drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile11
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c788
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_event.h18
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h210
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h816
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h58
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h195
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/pdcp.h2796
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h346
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h22
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c128
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h25
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h73
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build2
-rw-r--r--drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map8
-rw-r--r--drivers/crypto/dpaa_sec/Makefile2
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c314
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h3
-rw-r--r--drivers/crypto/kasumi/meson.build12
-rw-r--r--drivers/crypto/meson.build5
-rw-r--r--drivers/crypto/mvsam/Makefile5
-rw-r--r--drivers/crypto/mvsam/meson.build2
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c213
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c160
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_private.h34
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c2
-rw-r--r--drivers/crypto/octeontx/Makefile46
-rw-r--r--drivers/crypto/octeontx/meson.build18
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev.c133
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev.h20
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_capabilities.c604
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_capabilities.h17
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_hw_access.c598
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_hw_access.h320
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_mbox.c178
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_mbox.h92
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_ops.c531
-rw-r--r--drivers/crypto/octeontx/otx_cryptodev_ops.h18
-rw-r--r--drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map4
-rw-r--r--drivers/crypto/openssl/compat.h265
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c29
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c40
-rw-r--r--drivers/crypto/qat/qat_sym_capabilities.h20
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.c1
-rw-r--r--drivers/crypto/qat/qat_sym_pmd.h2
-rw-r--r--drivers/crypto/qat/qat_sym_session.c190
-rw-r--r--drivers/crypto/qat/qat_sym_session.h3
-rw-r--r--drivers/crypto/scheduler/meson.build19
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c26
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h8
-rw-r--r--drivers/crypto/scheduler/scheduler_failover.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c6
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c8
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c2
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h2
-rw-r--r--drivers/crypto/scheduler/scheduler_roundrobin.c6
-rw-r--r--drivers/crypto/zuc/meson.build12
81 files changed, 13603 insertions, 1614 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c480cbd3..009f8443 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -7,6 +7,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += octeontx
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
@@ -14,12 +15,15 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
-endif
+endif # CONFIG_RTE_LIBRTE_FSLMC_BUS
ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
-endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr
+endif # CONFIG_RTE_LIBRTE_PMD_DPAA_SEC
+endif # CONFIG_RTE_LIBRTE_SECURITY
DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 752e0cd6..ebdf7c35 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -23,7 +23,6 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
{
const struct rte_crypto_sym_xform *auth_xform;
const struct rte_crypto_sym_xform *aead_xform;
- uint16_t digest_length;
uint8_t key_length;
uint8_t *key;
@@ -47,7 +46,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
key_length = auth_xform->auth.key.length;
key = auth_xform->auth.key.data;
- digest_length = auth_xform->auth.digest_length;
+ sess->req_digest_length = auth_xform->auth.digest_length;
/* AES-GCM */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
@@ -73,7 +72,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
key = aead_xform->aead.key.data;
sess->aad_length = aead_xform->aead.aad_length;
- digest_length = aead_xform->aead.digest_length;
+ sess->req_digest_length = aead_xform->aead.digest_length;
} else {
AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication");
return -ENOTSUP;
@@ -106,13 +105,28 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
gcm_ops[sess->key].precomp(key, &sess->gdata_key);
/* Digest check */
- if (digest_length != 16 &&
- digest_length != 12 &&
- digest_length != 8) {
+ if (sess->req_digest_length > 16) {
AESNI_GCM_LOG(ERR, "Invalid digest length");
return -EINVAL;
}
- sess->digest_length = digest_length;
+ /*
+ * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+ * in version 0.50 and sizes of 8, 12 and 16 bytes,
+ * in version 0.49.
+ * If size requested is different, generate the full digest
+ * (16 bytes) in a temporary location and then memcpy
+ * the requested number of bytes.
+ */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->req_digest_length < 4)
+#else
+ if (sess->req_digest_length != 16 &&
+ sess->req_digest_length != 12 &&
+ sess->req_digest_length != 8)
+#endif
+ sess->gen_digest_length = 16;
+ else
+ sess->gen_digest_length = sess->req_digest_length;
return 0;
}
@@ -180,6 +194,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
struct rte_mbuf *m_src = sym_op->m_src;
uint32_t offset, data_offset, data_length;
uint32_t part_len, total_len, data_len;
+ uint8_t *tag;
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION ||
session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
@@ -225,17 +240,8 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
session->iv.offset);
- /*
- * GCM working in 12B IV mode => 16B pre-counter block we need
- * to set BE LSB to 1, driver expects that 16B is allocated
- */
- if (session->iv.length == 12) {
- uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]);
- *iv_padd = rte_bswap32(1);
- }
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
@@ -263,13 +269,16 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
total_len -= part_len;
}
+ if (session->req_digest_length != session->gen_digest_length)
+ tag = qp->temp_digest;
+ else
+ tag = sym_op->aead.digest.data;
+
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- sym_op->aead.digest.data,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = qp->temp_digest;
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
@@ -298,33 +307,41 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
total_len -= part_len;
}
+ tag = qp->temp_digest;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- auth_tag,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else if (session->op == AESNI_GMAC_OP_GENERATE) {
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
+ if (session->req_digest_length != session->gen_digest_length)
+ tag = qp->temp_digest;
+ else
+ tag = sym_op->auth.digest.data;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- sym_op->auth.digest.data,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */
- uint8_t *auth_tag = qp->temp_digest;
-
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
iv_ptr,
src,
(uint64_t)data_length);
+ /*
+ * Generate always 16 bytes and later compare only
+ * the bytes passed.
+ */
+ tag = qp->temp_digest;
qp->ops[session->key].finalize(&session->gdata_key,
&qp->gdata_ctx,
- auth_tag,
- (uint64_t)session->digest_length);
+ tag,
+ session->gen_digest_length);
}
return 0;
@@ -361,13 +378,22 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG
rte_hexdump(stdout, "auth tag (orig):",
- digest, session->digest_length);
+ digest, session->req_digest_length);
rte_hexdump(stdout, "auth tag (calc):",
- tag, session->digest_length);
+ tag, session->req_digest_length);
#endif
- if (memcmp(tag, digest, session->digest_length) != 0)
+ if (memcmp(tag, digest, session->req_digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ if (session->req_digest_length != session->gen_digest_length) {
+ if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION)
+ memcpy(op->sym->aead.digest.data, qp->temp_digest,
+ session->req_digest_length);
+ else
+ memcpy(op->sym->auth.digest.data, qp->temp_digest,
+ session->req_digest_length);
+ }
}
}
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index b6b4dd02..c343a393 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -24,9 +24,9 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.increment = 8
},
.digest_size = {
- .min = 8,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.iv_size = {
.min = 12,
@@ -49,9 +49,9 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = {
.increment = 8
},
.digest_size = {
- .min = 8,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.aad_size = {
.min = 0,
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index c13a12a5..92b04135 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -76,8 +76,10 @@ struct aesni_gcm_session {
/**< IV parameters */
uint16_t aad_length;
/**< AAD length */
- uint16_t digest_length;
- /**< Digest length */
+ uint16_t req_digest_length;
+ /**< Requested digest length */
+ uint16_t gen_digest_length;
+ /**< Generated digest length */
enum aesni_gcm_operation op;
/**< GCM operation type */
enum aesni_gcm_key key;
diff --git a/drivers/crypto/aesni_gcm/meson.build b/drivers/crypto/aesni_gcm/meson.build
new file mode 100644
index 00000000..a02da1ef
--- /dev/null
+++ b/drivers/crypto/aesni_gcm/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('aesni_gcm_pmd.c', 'aesni_gcm_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 5a1cba6c..575d6a5b 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -11,6 +11,15 @@
#include <intel-ipsec-mb.h>
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
RTE_AESNI_MB_SSE,
@@ -39,6 +48,8 @@ typedef void (*aes_cmac_sub_key_gen_t)
(const void *exp_key, void *k2, void *k3);
typedef void (*aes_cmac_keyexp_t)
(const void *key, void *keyexp);
+typedef void (*aes_gcm_keyexp_t)
+ (const void *key, struct gcm_key_data *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -86,8 +97,24 @@ struct aesni_mb_op_fns {
/**< AES CMAC subkey expansions */
aes_cmac_keyexp_t aes_cmac_expkey;
/**< AES CMAC key expansions */
+ aes_gcm_keyexp_t aes_gcm_128;
+ /**< AES GCM 128 key expansions */
+ aes_gcm_keyexp_t aes_gcm_192;
+ /**< AES GCM 192 key expansions */
+ aes_gcm_keyexp_t aes_gcm_256;
+ /**< AES GCM 256 key expansions */
} keyexp;
/**< Key expansion functions */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ struct {
+ hash_fn_t sha1;
+ hash_fn_t sha224;
+ hash_fn_t sha256;
+ hash_fn_t sha384;
+ hash_fn_t sha512;
+ } multi_block;
+ /** multi block hash functions */
+#endif
} aux;
/**< Auxiliary functions */
};
@@ -104,7 +131,13 @@ static const struct aesni_mb_op_fns job_ops[] = {
},
.keyexp = {
NULL
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ NULL
}
+#endif
+
}
},
[RTE_AESNI_MB_SSE] = {
@@ -130,8 +163,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_sse,
aes_xcbc_expand_key_sse,
aes_cmac_subkey_gen_sse,
- aes_keyexp_128_enc_sse
+ aes_keyexp_128_enc_sse,
+ aes_gcm_pre_128_sse,
+ aes_gcm_pre_192_sse,
+ aes_gcm_pre_256_sse
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_sse,
+ sha224_sse,
+ sha256_sse,
+ sha384_sse,
+ sha512_sse
}
+#endif
}
},
[RTE_AESNI_MB_AVX] = {
@@ -157,8 +202,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx,
aes_xcbc_expand_key_avx,
aes_cmac_subkey_gen_avx,
- aes_keyexp_128_enc_avx
+ aes_keyexp_128_enc_avx,
+ aes_gcm_pre_128_avx_gen2,
+ aes_gcm_pre_192_avx_gen2,
+ aes_gcm_pre_256_avx_gen2
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx,
+ sha224_avx,
+ sha256_avx,
+ sha384_avx,
+ sha512_avx
}
+#endif
}
},
[RTE_AESNI_MB_AVX2] = {
@@ -184,8 +241,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx2,
aes_xcbc_expand_key_avx2,
aes_cmac_subkey_gen_avx2,
- aes_keyexp_128_enc_avx2
+ aes_keyexp_128_enc_avx2,
+ aes_gcm_pre_128_avx_gen4,
+ aes_gcm_pre_192_avx_gen4,
+ aes_gcm_pre_256_avx_gen4
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx2,
+ sha224_avx2,
+ sha256_avx2,
+ sha384_avx2,
+ sha512_avx2
}
+#endif
}
},
[RTE_AESNI_MB_AVX512] = {
@@ -211,8 +280,20 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_256_avx512,
aes_xcbc_expand_key_avx512,
aes_cmac_subkey_gen_avx512,
- aes_keyexp_128_enc_avx512
+ aes_keyexp_128_enc_avx512,
+ aes_gcm_pre_128_avx_gen4,
+ aes_gcm_pre_192_avx_gen4,
+ aes_gcm_pre_256_avx_gen4
+ },
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .multi_block = {
+ sha1_avx512,
+ sha224_avx512,
+ sha256_avx512,
+ sha384_avx512,
+ sha512_avx512
}
+#endif
}
}
};
diff --git a/drivers/crypto/aesni_mb/meson.build b/drivers/crypto/aesni_mb/meson.build
new file mode 100644
index 00000000..aae0995e
--- /dev/null
+++ b/drivers/crypto/aesni_mb/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('IPSec_MB', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_aesni_mb_pmd.c', 'rte_aesni_mb_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 93dc7a44..83250e32 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -14,6 +14,9 @@
#include "rte_aesni_mb_pmd_private.h"
+#define AES_CCM_DIGEST_MIN_LEN 4
+#define AES_CCM_DIGEST_MAX_LEN 16
+#define HMAC_MAX_BLOCK_SIZE 128
static uint8_t cryptodev_driver_id;
typedef void (*hash_one_block_t)(const void *data, void *digest);
@@ -83,7 +86,8 @@ aesni_mb_get_chain_order(const struct rte_crypto_sym_xform *xform)
}
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
- if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM ||
+ xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) {
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
return AESNI_MB_OP_AEAD_CIPHER_HASH;
else
@@ -101,6 +105,8 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
const struct rte_crypto_sym_xform *xform)
{
hash_one_block_t hash_oneblock_fn;
+ unsigned int key_larger_block_size = 0;
+ uint8_t hashed_key[HMAC_MAX_BLOCK_SIZE] = { 0 };
if (xform == NULL) {
sess->auth.algo = NULL_HASH;
@@ -112,12 +118,23 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return -1;
}
+ /* Set the request digest size */
+ sess->auth.req_digest_len = xform->auth.digest_length;
+
/* Select auth generate/verify */
sess->auth.operation = xform->auth.op;
/* Set Authentication Parameters */
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC) {
sess->auth.algo = AES_XCBC;
+
+ uint16_t xcbc_mac_digest_len =
+ get_truncated_digest_byte_length(AES_XCBC);
+ if (sess->auth.req_digest_len != xcbc_mac_digest_len) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
(*mb_ops->aux.keyexp.aes_xcbc)(xform->auth.key.data,
sess->auth.xcbc.k1_expanded,
sess->auth.xcbc.k2, sess->auth.xcbc.k3);
@@ -126,6 +143,32 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
sess->auth.algo = AES_CMAC;
+
+ uint16_t cmac_digest_len = get_digest_byte_length(AES_CMAC);
+
+ if (sess->auth.req_digest_len > cmac_digest_len) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+ /*
+ * Multi-buffer lib supports digest sizes from 4 to 16 bytes
+ * in version 0.50 and sizes of 12 and 16 bytes,
+ * in version 0.49.
+ * If size requested is different, generate the full digest
+ * (16 bytes) in a temporary location and then memcpy
+ * the requested number of bytes.
+ */
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->auth.req_digest_len < 4)
+#else
+ uint16_t cmac_trunc_digest_len =
+ get_truncated_digest_byte_length(AES_CMAC);
+ if (sess->auth.req_digest_len != cmac_digest_len &&
+ sess->auth.req_digest_len != cmac_trunc_digest_len)
+#endif
+ sess->auth.gen_digest_len = cmac_digest_len;
+ else
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
(*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
sess->auth.cmac.expkey);
@@ -134,7 +177,6 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
-
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -143,34 +185,107 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_AUTH_SHA1_HMAC:
sess->auth.algo = SHA1;
hash_oneblock_fn = mb_ops->aux.one_block.sha1;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA1)) {
+ mb_ops->aux.multi_block.sha1(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
sess->auth.algo = SHA_224;
hash_oneblock_fn = mb_ops->aux.one_block.sha224;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_224)) {
+ mb_ops->aux.multi_block.sha224(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
sess->auth.algo = SHA_256;
hash_oneblock_fn = mb_ops->aux.one_block.sha256;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_256)) {
+ mb_ops->aux.multi_block.sha256(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
sess->auth.algo = SHA_384;
hash_oneblock_fn = mb_ops->aux.one_block.sha384;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_384)) {
+ mb_ops->aux.multi_block.sha384(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->auth.algo = SHA_512;
hash_oneblock_fn = mb_ops->aux.one_block.sha512;
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (xform->auth.key.length > get_auth_algo_blocksize(SHA_512)) {
+ mb_ops->aux.multi_block.sha512(
+ xform->auth.key.data,
+ xform->auth.key.length,
+ hashed_key);
+ key_larger_block_size = 1;
+ }
+#endif
break;
default:
AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
return -ENOTSUP;
}
+ uint16_t trunc_digest_size =
+ get_truncated_digest_byte_length(sess->auth.algo);
+ uint16_t full_digest_size =
+ get_digest_byte_length(sess->auth.algo);
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ if (sess->auth.req_digest_len > full_digest_size ||
+ sess->auth.req_digest_len == 0) {
+#else
+ if (sess->auth.req_digest_len != trunc_digest_size) {
+#endif
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
+ return -EINVAL;
+ }
+
+ if (sess->auth.req_digest_len != trunc_digest_size &&
+ sess->auth.req_digest_len != full_digest_size)
+ sess->auth.gen_digest_len = full_digest_size;
+ else
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
/* Calculate Authentication precomputes */
- calculate_auth_precomputes(hash_oneblock_fn,
+ if (key_larger_block_size) {
+ calculate_auth_precomputes(hash_oneblock_fn,
+ sess->auth.pads.inner, sess->auth.pads.outer,
+ hashed_key,
+ xform->auth.key.length,
+ get_auth_algo_blocksize(sess->auth.algo));
+ } else {
+ calculate_auth_precomputes(hash_oneblock_fn,
sess->auth.pads.inner, sess->auth.pads.outer,
xform->auth.key.data,
xform->auth.key.length,
get_auth_algo_blocksize(sess->auth.algo));
+ }
return 0;
}
@@ -330,7 +445,10 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- aes_keyexp_t aes_keyexp_fn;
+ union {
+ aes_keyexp_t aes_keyexp_fn;
+ aes_gcm_keyexp_t aes_gcm_keyexp_fn;
+ } keyexp;
switch (xform->aead.op) {
case RTE_CRYPTO_AEAD_OP_ENCRYPT:
@@ -350,7 +468,53 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_AEAD_AES_CCM:
sess->cipher.mode = CCM;
sess->auth.algo = AES_CCM;
+
+ /* Check key length and choose key expansion function for AES */
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ keyexp.aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*keyexp.aes_keyexp_fn)(xform->aead.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+ break;
+
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.mode = GCM;
+ sess->auth.algo = AES_GMAC;
+
+ switch (xform->aead.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ keyexp.aes_gcm_keyexp_fn =
+ mb_ops->aux.keyexp.aes_gcm_256;
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ (keyexp.aes_gcm_keyexp_fn)(xform->aead.key.data,
+ &sess->cipher.gcm_key);
break;
+
default:
AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
return -ENOTSUP;
@@ -360,22 +524,15 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->iv.offset = xform->aead.iv.offset;
sess->iv.length = xform->aead.iv.length;
- /* Check key length and choose key expansion function for AES */
-
- switch (xform->aead.key.length) {
- case AES_128_BYTES:
- sess->cipher.key_length_in_bytes = AES_128_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
- break;
- default:
- AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ sess->auth.req_digest_len = xform->aead.digest_length;
+ /* CCM digests must be between 4 and 16 and an even number */
+ if (sess->auth.req_digest_len < AES_CCM_DIGEST_MIN_LEN ||
+ sess->auth.req_digest_len > AES_CCM_DIGEST_MAX_LEN ||
+ (sess->auth.req_digest_len & 1) == 1) {
+ AESNI_MB_LOG(ERR, "Invalid digest size\n");
return -EINVAL;
}
-
- /* Expanded cipher keys */
- (*aes_keyexp_fn)(xform->aead.key.data,
- sess->cipher.expanded_aes_keys.encode,
- sess->cipher.expanded_aes_keys.decode);
+ sess->auth.gen_digest_len = sess->auth.req_digest_len;
return 0;
}
@@ -397,19 +554,16 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
- sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -428,13 +582,11 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
@@ -573,38 +725,62 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
- if (job->cipher_mode == DES3) {
- job->aes_enc_key_expanded =
- session->cipher.exp_3des_keys.ks_ptr;
- job->aes_dec_key_expanded =
- session->cipher.exp_3des_keys.ks_ptr;
- } else {
- job->aes_enc_key_expanded =
- session->cipher.expanded_aes_keys.encode;
- job->aes_dec_key_expanded =
- session->cipher.expanded_aes_keys.decode;
- }
-
-
-
-
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
- if (job->hash_alg == AES_XCBC) {
+
+ switch (job->hash_alg) {
+ case AES_XCBC:
job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
job->u.XCBC._k2 = session->auth.xcbc.k2;
job->u.XCBC._k3 = session->auth.xcbc.k3;
- } else if (job->hash_alg == AES_CCM) {
+
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_CCM:
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
- } else if (job->hash_alg == AES_CMAC) {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
+
+ case AES_CMAC:
job->u.CMAC._key_expanded = session->auth.cmac.expkey;
job->u.CMAC._skey1 = session->auth.cmac.skey1;
job->u.CMAC._skey2 = session->auth.cmac.skey2;
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ break;
- } else {
+ case AES_GMAC:
+ job->u.GCM.aad = op->sym->aead.aad.data;
+ job->u.GCM.aad_len_in_bytes = session->aead.aad_len;
+ job->aes_enc_key_expanded = &session->cipher.gcm_key;
+ job->aes_dec_key_expanded = &session->cipher.gcm_key;
+ break;
+
+ default:
job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
}
/* Mutable crypto operation parameters */
@@ -625,7 +801,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
rte_pktmbuf_data_len(op->sym->m_src));
} else {
m_dst = m_src;
- if (job->hash_alg == AES_CCM)
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
m_offset = op->sym->aead.data.offset;
else
m_offset = op->sym->cipher.data.offset;
@@ -637,32 +813,33 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->auth_tag_output = qp->temp_digests[*digest_idx];
*digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
- if (job->hash_alg == AES_CCM)
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC)
job->auth_tag_output = op->sym->aead.digest.data;
else
job->auth_tag_output = op->sym->auth.digest.data;
- }
+ if (session->auth.req_digest_len != session->auth.gen_digest_len) {
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
+ }
+ }
/*
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
- job->auth_tag_output_len_in_bytes =
- get_truncated_digest_byte_length(job->hash_alg);
- else
- job->auth_tag_output_len_in_bytes = session->auth.digest_len;
+ /* Set digest length */
+ job->auth_tag_output_len_in_bytes = session->auth.gen_digest_len;
/* Set IV parameters */
-
job->iv_len_in_bytes = session->iv.length;
/* Data Parameter */
job->src = rte_pktmbuf_mtod(m_src, uint8_t *);
job->dst = rte_pktmbuf_mtod_offset(m_dst, uint8_t *, m_offset);
- if (job->hash_alg == AES_CCM) {
+ switch (job->hash_alg) {
+ case AES_CCM:
job->cipher_start_src_offset_in_bytes =
op->sym->aead.data.offset;
job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
@@ -671,7 +848,19 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
session->iv.offset + 1);
- } else {
+ break;
+
+ case AES_GMAC:
+ job->cipher_start_src_offset_in_bytes =
+ op->sym->aead.data.offset;
+ job->hash_start_src_offset_in_bytes = op->sym->aead.data.offset;
+ job->msg_len_to_cipher_in_bytes = op->sym->aead.data.length;
+ job->msg_len_to_hash_in_bytes = job->msg_len_to_cipher_in_bytes;
+ job->iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ session->iv.offset);
+ break;
+
+ default:
job->cipher_start_src_offset_in_bytes =
op->sym->cipher.data.offset;
job->msg_len_to_cipher_in_bytes = op->sym->cipher.data.length;
@@ -690,20 +879,37 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
}
static inline void
-verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
- struct rte_crypto_op *op) {
+verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+ struct aesni_mb_session *sess)
+{
/* Verify digest if required */
- if (job->hash_alg == AES_CCM) {
+ if (job->hash_alg == AES_CCM || job->hash_alg == AES_GMAC) {
if (memcmp(job->auth_tag_output, op->sym->aead.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
+ sess->auth.req_digest_len) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
- job->auth_tag_output_len_in_bytes) != 0)
+ sess->auth.req_digest_len) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
}
+static inline void
+generate_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op,
+ struct aesni_mb_session *sess)
+{
+ /* No extra copy neeed */
+ if (likely(sess->auth.req_digest_len == sess->auth.gen_digest_len))
+ return;
+
+ /*
+ * This can only happen for HMAC, so only digest
+ * for authentication algos is required
+ */
+ memcpy(op->sym->auth.digest.data, job->auth_tag_output,
+ sess->auth.req_digest_len);
+}
+
/**
* Process a completed job and return rte_mbuf which job processed
*
@@ -730,7 +936,9 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (job->hash_alg != NULL_HASH) {
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
- verify_digest(qp, job, op);
+ verify_digest(job, op, sess);
+ else
+ generate_digest(job, op, sess);
}
break;
default:
@@ -833,22 +1041,30 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
uint8_t digest_idx = qp->digest_idx;
do {
- /* Get next operation to process from ingress queue */
- retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
- if (retval < 0)
- break;
-
/* Get next free mb job struct from mb manager */
job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
- (nb_ops - processed_jobs) - 1);
+ nb_ops - processed_jobs);
+
+ if (nb_ops == processed_jobs)
+ break;
job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
}
+ /*
+ * Get next operation to process from ingress queue.
+ * There is no need to return the job to the MB_MGR
+ * if there are no more operations to process, since the MB_MGR
+ * can use that pointer again in next get_next calls.
+ */
+ retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
+ if (retval < 0)
+ break;
+
retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index ab26e5ae..43f6c26e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -25,9 +25,15 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 16,
+ .increment = 1
+#else
.min = 12,
.max = 12,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -42,13 +48,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 20,
+ .increment = 1
+#else
.min = 12,
.max = 12,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -63,13 +79,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 28,
+ .increment = 1
+#else
.min = 14,
.max = 14,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -84,13 +110,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 64,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 64,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 32,
+ .increment = 1
+#else
.min = 16,
.max = 16,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -105,13 +141,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 128,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 128,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 48,
+ .increment = 1
+#else
.min = 24,
.max = 24,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -126,13 +172,23 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.block_size = 128,
.key_size = {
.min = 1,
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .max = 65535,
+#else
.max = 128,
+#endif
.increment = 1
},
.digest_size = {
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ .min = 1,
+ .max = 64,
+ .increment = 1
+#else
.min = 32,
.max = 32,
.increment = 0
+#endif
},
.iv_size = { 0 }
}, }
@@ -322,14 +378,44 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
.increment = 0
},
.digest_size = {
- .min = 12,
+ .min = 1,
.max = 16,
- .increment = 4
+ .increment = 1
},
.iv_size = { 0 }
}, }
}, }
},
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 70e9d18e..d8021cda 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -7,15 +7,6 @@
#include "aesni_mb_ops.h"
-/*
- * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
- * so if macro is not defined, it means that the version is 0.49.
- */
-#if !defined(IMB_VERSION_NUM)
-#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
-#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
-#endif
-
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
@@ -31,8 +22,8 @@ int aesni_mb_logtype_driver;
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
-/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
-#define DIGEST_LENGTH_MAX 32
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
static const unsigned auth_blocksize[] = {
[MD5] = 64,
[SHA1] = 64,
@@ -64,7 +55,7 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
- [AES_CMAC] = 16,
+ [AES_CMAC] = 12,
[AES_CCM] = 8,
[NULL_HASH] = 0
};
@@ -91,11 +82,13 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_512] = 64,
[AES_XCBC] = 16,
[AES_CMAC] = 16,
+ [AES_GMAC] = 12,
[NULL_HASH] = 0
};
/**
- * Get the output digest size in bytes for a specified authentication algorithm
+ * Get the full digest size in bytes for a specified authentication algorithm
+ * (if available in the Multi-buffer library)
*
* @Note: this function will not return a valid value for a non-valid
* authentication algorithm
@@ -180,6 +173,8 @@ struct aesni_mb_session {
const void *ks_ptr[3];
uint64_t key[3][16];
} exp_3des_keys;
+
+ struct gcm_key_data gcm_key;
};
/**< Expanded AES keys - Allocating space to
* contain the maximum expanded key size which
@@ -226,8 +221,10 @@ struct aesni_mb_session {
} cmac;
/**< Expanded XCBC authentication keys */
};
- /** digest size */
- uint16_t digest_len;
+ /** Generated digest size by the Multi-buffer library */
+ uint16_t gen_digest_len;
+ /** Requested digest size from Cryptodev */
+ uint16_t req_digest_len;
} auth;
struct {
diff --git a/drivers/crypto/caam_jr/Makefile b/drivers/crypto/caam_jr/Makefile
new file mode 100644
index 00000000..88cdf741
--- /dev/null
+++ b/drivers/crypto/caam_jr/Makefile
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2017 NXP
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_caam_jr.a
+
+# build flags
+CFLAGS += -DALLOW_EXPERIMENTAL_API
+
+CFLAGS += -D _GNU_SOURCE
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/caam_jr
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+
+# versioning export map
+EXPORT_MAP := rte_pmd_caam_jr_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_hw.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CAAM_JR) += caam_jr_uio.c
+# library dependencies
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_bus_vdev
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/caam_jr/caam_jr.c b/drivers/crypto/caam_jr/caam_jr.c
new file mode 100644
index 00000000..f505adf6
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr.c
@@ -0,0 +1,2508 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_security_driver.h>
+#include <rte_hexdump.h>
+
+#include <caam_jr_capabilities.h>
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_desc.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <of.h>
+
+#define CAAM_JR_DBG 0
+#define CRYPTODEV_NAME_CAAM_JR_PMD crypto_caam_jr
+static uint8_t cryptodev_driver_id;
+int caam_jr_logtype;
+
+enum rta_sec_era rta_sec_era;
+
+/* Lists the states possible for the SEC user space driver. */
+enum sec_driver_state_e {
+ SEC_DRIVER_STATE_IDLE, /* Driver not initialized */
+ SEC_DRIVER_STATE_STARTED, /* Driver initialized and can be used*/
+ SEC_DRIVER_STATE_RELEASE, /* Driver release is in progress */
+};
+
+/* Job rings used for communication with SEC HW */
+static struct sec_job_ring_t g_job_rings[MAX_SEC_JOB_RINGS];
+
+/* The current state of SEC user space driver */
+static enum sec_driver_state_e g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+/* The number of job rings used by SEC user space driver */
+static int g_job_rings_no;
+static int g_job_rings_max;
+
+struct sec_outring_entry {
+ phys_addr_t desc; /* Pointer to completed descriptor */
+ uint32_t status; /* Status for completed descriptor */
+} __rte_packed;
+
+/* virtual address conversin when mempool support is available for ctx */
+static inline phys_addr_t
+caam_jr_vtop_ctx(struct caam_jr_op_ctx *ctx, void *vaddr)
+{
+ PMD_INIT_FUNC_TRACE();
+ return (size_t)vaddr - ctx->vtop_offset;
+}
+
+static inline void
+caam_jr_op_ending(struct caam_jr_op_ctx *ctx)
+{
+ PMD_INIT_FUNC_TRACE();
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct caam_jr_op_ctx *
+caam_jr_alloc_ctx(struct caam_jr_session *ses)
+{
+ struct caam_jr_op_ctx *ctx;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ ret = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || ret) {
+ CAAM_JR_DP_WARN("Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. caam_jr_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+ ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
+
+ return ctx;
+}
+
+static
+void caam_jr_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ struct caam_jr_qp **qp = (struct caam_jr_qp **)
+ dev->data->queue_pairs;
+ int i;
+
+ PMD_INIT_FUNC_TRACE();
+ if (stats == NULL) {
+ CAAM_JR_ERR("Invalid stats ptr NULL");
+ return;
+ }
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ CAAM_JR_WARN("Uninitialised queue pair");
+ continue;
+ }
+
+ stats->enqueued_count += qp[i]->tx_pkts;
+ stats->dequeued_count += qp[i]->rx_pkts;
+ stats->enqueue_err_count += qp[i]->tx_errs;
+ stats->dequeue_err_count += qp[i]->rx_errs;
+ CAAM_JR_INFO("extra stats:\n\tRX Poll ERR = %" PRIu64
+ "\n\tTX Ring Full = %" PRIu64,
+ qp[i]->rx_poll_err,
+ qp[i]->tx_ring_full);
+ }
+}
+
+static
+void caam_jr_stats_reset(struct rte_cryptodev *dev)
+{
+ int i;
+ struct caam_jr_qp **qp = (struct caam_jr_qp **)
+ (dev->data->queue_pairs);
+
+ PMD_INIT_FUNC_TRACE();
+ for (i = 0; i < dev->data->nb_queue_pairs; i++) {
+ if (qp[i] == NULL) {
+ CAAM_JR_WARN("Uninitialised queue pair");
+ continue;
+ }
+ qp[i]->rx_pkts = 0;
+ qp[i]->rx_errs = 0;
+ qp[i]->rx_poll_err = 0;
+ qp[i]->tx_pkts = 0;
+ qp[i]->tx_errs = 0;
+ qp[i]->tx_ring_full = 0;
+ }
+}
+
+static inline int
+is_cipher_only(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int
+is_auth_only(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int
+is_aead(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int
+is_auth_cipher(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
+ (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
+}
+
+static inline int
+is_proto_ipsec(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
+}
+
+static inline int
+is_encode(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ses->dir == DIR_ENC;
+}
+
+static inline int
+is_decode(struct caam_jr_session *ses)
+{
+ PMD_INIT_FUNC_TRACE();
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(struct caam_jr_session *ses, struct alginfo *alginfo_a)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(struct caam_jr_session *ses, struct alginfo *alginfo_c)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(struct caam_jr_session *ses, struct alginfo *alginfo)
+{
+ PMD_INIT_FUNC_TRACE();
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ CAAM_JR_DEBUG("unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+/* prepare command block of the session */
+static int
+caam_jr_prep_cdb(struct caam_jr_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ int32_t shared_desc_len = 0;
+ struct sec_cdb *cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+ if (ses->cdb)
+ caam_jr_dma_free(ses->cdb);
+
+ cdb = caam_jr_dma_mem_alloc(L1_CACHE_BYTES, sizeof(struct sec_cdb));
+ if (!cdb) {
+ CAAM_JR_ERR("failed to allocate memory for cdb\n");
+ return -1;
+ }
+
+ ses->cdb = cdb;
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported cipher alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported auth alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported aead alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+ alginfo.key = (size_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported cipher alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (size_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)CAAM_JR_ALG_UNSUPPORT) {
+ CAAM_JR_ERR("not supported auth alg");
+ rte_free(cdb);
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (size_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ CAAM_JR_ERR("Crypto: Incorrect key lengths");
+ rte_free(cdb);
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (size_t)caam_jr_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (size_t)caam_jr_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (is_proto_ipsec(ses)) {
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &alginfo_c, &alginfo_a);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->decap_pdb,
+ &alginfo_c, &alginfo_a);
+ }
+ } else {
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ }
+
+ if (shared_desc_len < 0) {
+ CAAM_JR_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
+#if CAAM_JR_DBG
+ SEC_DUMP_DESC(cdb->sh_desc);
+#endif
+
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+
+ return 0;
+}
+
+/* @brief Poll the HW for already processed jobs in the JR
+ * and silently discard the available jobs or notify them to UA
+ * with indicated error code.
+ *
+ * @param [in,out] job_ring The job ring to poll.
+ * @param [in] do_notify Can be #TRUE or #FALSE. Indicates if
+ * descriptors are to be discarded
+ * or notified to UA with given error_code.
+ * @param [out] notified_descs Number of notified descriptors. Can be NULL
+ * if do_notify is #FALSE
+ */
+static void
+hw_flush_job_ring(struct sec_job_ring_t *job_ring,
+ uint32_t do_notify,
+ uint32_t *notified_descs)
+{
+ int32_t jobs_no_to_discard = 0;
+ int32_t discarded_descs_no = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Flushing jr notify desc=[%d]",
+ job_ring, job_ring->pidx, job_ring->cidx, do_notify);
+
+ jobs_no_to_discard = hw_get_no_finished_jobs(job_ring);
+
+ /* Discard all jobs */
+ CAAM_JR_DEBUG("Jr[%p] pi[%d] ci[%d].Discarding %d descs",
+ job_ring, job_ring->pidx, job_ring->cidx,
+ jobs_no_to_discard);
+
+ while (jobs_no_to_discard > discarded_descs_no) {
+ discarded_descs_no++;
+ /* Now increment the consumer index for the current job ring,
+ * AFTER saving job in temporary location!
+ * Increment the consumer index for the current job ring
+ */
+ job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+ SEC_JOB_RING_SIZE);
+
+ hw_remove_entries(job_ring, 1);
+ }
+
+ if (do_notify == true) {
+ ASSERT(notified_descs != NULL);
+ *notified_descs = discarded_descs_no;
+ }
+}
+
+/* @brief Poll the HW for already processed jobs in the JR
+ * and notify the available jobs to UA.
+ *
+ * @param [in] job_ring The job ring to poll.
+ * @param [in] limit The maximum number of jobs to notify.
+ * If set to negative value, all available jobs are
+ * notified.
+ *
+ * @retval >=0 for No of jobs notified to UA.
+ * @retval -1 for error
+ */
+static int
+hw_poll_job_ring(struct sec_job_ring_t *job_ring,
+ struct rte_crypto_op **ops, int32_t limit,
+ struct caam_jr_qp *jr_qp)
+{
+ int32_t jobs_no_to_notify = 0; /* the number of done jobs to notify*/
+ int32_t number_of_jobs_available = 0;
+ int32_t notified_descs_no = 0;
+ uint32_t sec_error_code = 0;
+ struct job_descriptor *current_desc;
+ phys_addr_t current_desc_addr;
+ phys_addr_t *temp_addr;
+ struct caam_jr_op_ctx *ctx;
+
+ PMD_INIT_FUNC_TRACE();
+ /* TODO check for ops have memory*/
+ /* check here if any JR error that cannot be written
+ * in the output status word has occurred
+ */
+ if (JR_REG_JRINT_JRE_EXTRACT(GET_JR_REG(JRINT, job_ring))) {
+ CAAM_JR_INFO("err received");
+ sec_error_code = JR_REG_JRINT_ERR_TYPE_EXTRACT(
+ GET_JR_REG(JRINT, job_ring));
+ if (unlikely(sec_error_code)) {
+ hw_job_ring_error_print(job_ring, sec_error_code);
+ return -1;
+ }
+ }
+ /* compute the number of jobs available in the job ring based on the
+ * producer and consumer index values.
+ */
+ number_of_jobs_available = hw_get_no_finished_jobs(job_ring);
+ /* Compute the number of notifications that need to be raised to UA
+ * If limit > total number of done jobs -> notify all done jobs
+ * If limit = 0 -> error
+ * If limit < total number of done jobs -> notify a number
+ * of done jobs equal with limit
+ */
+ jobs_no_to_notify = (limit > number_of_jobs_available) ?
+ number_of_jobs_available : limit;
+ CAAM_JR_DP_DEBUG(
+ "Jr[%p] pi[%d] ci[%d].limit =%d Available=%d.Jobs to notify=%d",
+ job_ring, job_ring->pidx, job_ring->cidx,
+ limit, number_of_jobs_available, jobs_no_to_notify);
+
+ rte_smp_rmb();
+
+ while (jobs_no_to_notify > notified_descs_no) {
+ static uint64_t false_alarm;
+ static uint64_t real_poll;
+
+ /* Get job status here */
+ sec_error_code = job_ring->output_ring[job_ring->cidx].status;
+ /* Get completed descriptor */
+ temp_addr = &(job_ring->output_ring[job_ring->cidx].desc);
+ current_desc_addr = (phys_addr_t)sec_read_addr(temp_addr);
+
+ real_poll++;
+ /* todo check if it is false alarm no desc present */
+ if (!current_desc_addr) {
+ false_alarm++;
+ printf("false alarm %" PRIu64 "real %" PRIu64
+ " sec_err =0x%x cidx Index =0%d\n",
+ false_alarm, real_poll,
+ sec_error_code, job_ring->cidx);
+ rte_panic("CAAM JR descriptor NULL");
+ return notified_descs_no;
+ }
+ current_desc = (struct job_descriptor *)
+ caam_jr_dma_ptov(current_desc_addr);
+ /* now increment the consumer index for the current job ring,
+ * AFTER saving job in temporary location!
+ */
+ job_ring->cidx = SEC_CIRCULAR_COUNTER(job_ring->cidx,
+ SEC_JOB_RING_SIZE);
+ /* Signal that the job has been processed and the slot is free*/
+ hw_remove_entries(job_ring, 1);
+ /*TODO for multiple ops, packets*/
+ ctx = container_of(current_desc, struct caam_jr_op_ctx, jobdes);
+ if (unlikely(sec_error_code)) {
+ CAAM_JR_ERR("desc at cidx %d generated error 0x%x\n",
+ job_ring->cidx, sec_error_code);
+ hw_handle_job_ring_error(job_ring, sec_error_code);
+ //todo improve with exact errors
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ jr_qp->rx_errs++;
+ } else {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+#if CAAM_JR_DBG
+ if (ctx->op->sym->m_dst) {
+ rte_hexdump(stdout, "PROCESSED",
+ rte_pktmbuf_mtod(ctx->op->sym->m_dst, void *),
+ rte_pktmbuf_data_len(ctx->op->sym->m_dst));
+ } else {
+ rte_hexdump(stdout, "PROCESSED",
+ rte_pktmbuf_mtod(ctx->op->sym->m_src, void *),
+ rte_pktmbuf_data_len(ctx->op->sym->m_src));
+ }
+#endif
+ }
+ if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ struct ip *ip4_hdr;
+
+ if (ctx->op->sym->m_dst) {
+ /*TODO check for ip header or other*/
+ ip4_hdr = (struct ip *)
+ rte_pktmbuf_mtod(ctx->op->sym->m_dst, char*);
+ ctx->op->sym->m_dst->pkt_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ ctx->op->sym->m_dst->data_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ } else {
+ ip4_hdr = (struct ip *)
+ rte_pktmbuf_mtod(ctx->op->sym->m_src, char*);
+ ctx->op->sym->m_src->pkt_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ ctx->op->sym->m_src->data_len =
+ rte_be_to_cpu_16(ip4_hdr->ip_len);
+ }
+ }
+ *ops = ctx->op;
+ caam_jr_op_ending(ctx);
+ ops++;
+ notified_descs_no++;
+ }
+ return notified_descs_no;
+}
+
+static uint16_t
+caam_jr_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
+ struct sec_job_ring_t *ring = jr_qp->ring;
+ int num_rx;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DP_DEBUG("Jr[%p]Polling. limit[%d]", ring, nb_ops);
+
+ /* Poll job ring
+ * If nb_ops < 0 -> poll JR until no more notifications are available.
+ * If nb_ops > 0 -> poll JR until limit is reached.
+ */
+
+ /* Run hw poll job ring */
+ num_rx = hw_poll_job_ring(ring, ops, nb_ops, jr_qp);
+ if (num_rx < 0) {
+ CAAM_JR_ERR("Error polling SEC engine (%d)", num_rx);
+ return 0;
+ }
+
+ CAAM_JR_DP_DEBUG("Jr[%p].Jobs notified[%d]. ", ring, num_rx);
+
+ if (ring->jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
+ if (num_rx < nb_ops) {
+ ret = caam_jr_enable_irqs(ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to enable irqs for job ring %p", ring);
+ }
+ } else if (ring->jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+
+ /* Always enable IRQ generation when in pure IRQ mode */
+ ret = caam_jr_enable_irqs(ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to enable irqs for job ring %p", ring);
+ }
+
+ jr_qp->rx_pkts += num_rx;
+
+ return num_rx;
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct caam_jr_op_ctx *
+build_auth_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ int length;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+ uint8_t extra_segs;
+
+ PMD_INIT_FUNC_TRACE();
+ if (is_decode(ses))
+ extra_segs = 2;
+ else
+ extra_segs = 1;
+
+ if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
+ 0, ses->digest_length);
+
+ /*input */
+ sg = &ctx->sg[0];
+ length = sym->auth.data.length;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf) + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ /* digest verification case */
+ sg++;
+ /* hash result or digest, save digest first */
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+#if CAAM_JR_DBG
+ rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
+#endif
+ sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ } else {
+ length -= ses->digest_length;
+ }
+
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_IN_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0, length);
+ /* enabling sg list */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_auth_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)sym->auth.digest.phys_addr,
+ 0, ses->digest_length);
+
+ /*input */
+ if (is_decode(ses)) {
+ sg = &ctx->sg[0];
+ SEC_JD_SET_IN_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
+ (sym->auth.data.length + ses->digest_length));
+ /* enabling sg list */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ /* hash result or digest, save digest first */
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg->ptr = cpu_to_caam64(start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+
+#if CAAM_JR_DBG
+ rte_hexdump(stdout, "ICV", ctx->digest, ses->digest_length);
+#endif
+ /* let's check digest by hw */
+ sg++;
+ sg->ptr = cpu_to_caam64(caam_jr_vtop_ctx(ctx, ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ } else {
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)start_addr,
+ sym->auth.data.offset, sym->auth.data.length);
+ }
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_only_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg, *in_sg;
+ int length;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint8_t reg_segs;
+
+ PMD_INIT_FUNC_TRACE();
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ reg_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
+ } else {
+ mbuf = sym->m_src;
+ reg_segs = mbuf->nb_segs * 2 + 2;
+ }
+
+ if (reg_segs > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+#if CAAM_JR_DBG
+ CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
+ sym->m_src->data_off, sym->cipher.data.offset,
+ sym->cipher.data.length, ses->iv.length);
+#endif
+ /* output */
+ if (sym->m_dst)
+ mbuf = sym->m_dst;
+ else
+ mbuf = sym->m_src;
+
+ sg = &ctx->sg[0];
+ length = sym->cipher.data.length;
+
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_vtop_ctx(ctx, &ctx->sg[0]), 0,
+ length);
+ /*enabling sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /*input */
+ sg++;
+ mbuf = sym->m_src;
+ in_sg = sg;
+
+ length = sym->cipher.data.length + ses->iv.length;
+
+ /* IV */
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ /* 1st seg */
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->cipher.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, in_sg), 0,
+ length);
+ /*enabling sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_only(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+#if CAAM_JR_DBG
+ CAAM_JR_INFO("mbuf offset =%d, cipher offset = %d, length =%d+%d",
+ sym->m_src->data_off, sym->cipher.data.offset,
+ sym->cipher.data.length, ses->iv.length);
+#endif
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr,
+ sym->cipher.data.offset,
+ sym->cipher.data.length + ses->iv.length);
+
+ /*input */
+ sg = &ctx->sg[0];
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_vtop_ctx(ctx, sg), 0,
+ sym->cipher.data.length + ses->iv.length);
+ /*enabling sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ sg = &ctx->sg[1];
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(sym->cipher.data.length);
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ return ctx;
+}
+
+/* For decapsulation:
+ * Input:
+ * +----+----------------+--------------------------------+-----+
+ * | IV | Auth-only data | Authenticated & Encrypted data | ICV |
+ * +----+----------------+--------------------------------+-----+
+ * Output:
+ * +----+--------------------------+
+ * | Decrypted & authenticated data |
+ * +----+--------------------------+
+ */
+
+static inline struct caam_jr_op_ctx *
+build_cipher_auth_sg(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg, *out_sg, *in_sg;
+ struct rte_mbuf *mbuf;
+ uint32_t length = 0;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t req_segs;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint32_t auth_only_len;
+
+ PMD_INIT_FUNC_TRACE();
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ if (sym->m_dst) {
+ mbuf = sym->m_dst;
+ req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
+ } else {
+ mbuf = sym->m_src;
+ req_segs = mbuf->nb_segs * 2 + 3;
+ }
+
+ if (req_segs > MAX_SG_ENTRIES) {
+ CAAM_JR_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
+ return NULL;
+ }
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ if (sym->m_dst)
+ mbuf = sym->m_dst;
+ else
+ mbuf = sym->m_src;
+
+ out_sg = &ctx->sg[0];
+ if (is_encode(ses))
+ length = sym->auth.data.length + ses->digest_length;
+ else
+ length = sym->auth.data.length;
+
+ sg = &ctx->sg[0];
+
+ /* 1st seg */
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_encode(ses)) {
+ /* set auth output */
+ sg++;
+ sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
+ sg->len = cpu_to_caam32(ses->digest_length);
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_dma_vtop(out_sg), 0, length);
+ /* set sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /* input */
+ sg++;
+ mbuf = sym->m_src;
+ in_sg = sg;
+ if (is_encode(ses))
+ length = ses->iv.length + sym->auth.data.length;
+ else
+ length = ses->iv.length + sym->auth.data.length
+ + ses->digest_length;
+
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+
+ sg++;
+ /* 1st seg */
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf)
+ + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(mbuf->data_len - sym->auth.data.offset);
+
+ /* Successive segs */
+ mbuf = mbuf->next;
+ while (mbuf) {
+ sg++;
+ sg->ptr = cpu_to_caam64(rte_pktmbuf_iova(mbuf));
+ sg->len = cpu_to_caam32(mbuf->data_len);
+ mbuf = mbuf->next;
+ }
+
+ if (is_decode(ses)) {
+ sg++;
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(in_sg), 0,
+ length);
+ /* set sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the jd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ /* set sg bit */
+ (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_cipher_auth(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx;
+ struct sec4_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+ struct sec_job_descriptor_t *jobdescr;
+ uint32_t auth_only_len;
+
+ PMD_INIT_FUNC_TRACE();
+ auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ ctx->op = op;
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* input */
+ sg = &ctx->sg[0];
+ if (is_encode(ses)) {
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+ length += ses->iv.length;
+
+ sg++;
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+ length += sym->auth.data.length;
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ } else {
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(IV_ptr));
+ sg->len = cpu_to_caam32(ses->iv.length);
+ length += ses->iv.length;
+
+ sg++;
+ sg->ptr = cpu_to_caam64(src_start_addr + sym->auth.data.offset);
+ sg->len = cpu_to_caam32(sym->auth.data.length);
+ length += sym->auth.data.length;
+
+ rte_memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+ sg->ptr = cpu_to_caam64(caam_jr_dma_vtop(ctx->digest));
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+ }
+
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)caam_jr_dma_vtop(&ctx->sg[0]), 0,
+ length);
+ /* set sg bit */
+ (jobdescr)->seq_in.command.word |= 0x01000000;
+
+ /* output */
+ sg = &ctx->sg[6];
+
+ sg->ptr = cpu_to_caam64(dst_start_addr + sym->cipher.data.offset);
+ sg->len = cpu_to_caam32(sym->cipher.data.length);
+ length = sym->cipher.data.length;
+
+ if (is_encode(ses)) {
+ /* set auth output */
+ sg++;
+ sg->ptr = cpu_to_caam64(sym->auth.digest.phys_addr);
+ sg->len = cpu_to_caam32(ses->digest_length);
+ length += ses->digest_length;
+ }
+ /* last element*/
+ sg->len |= cpu_to_caam32(SEC4_SG_LEN_FIN);
+
+ SEC_JD_SET_OUT_PTR(jobdescr,
+ (uint64_t)caam_jr_dma_vtop(&ctx->sg[6]), 0, length);
+ /* set sg bit */
+ (jobdescr)->seq_out.command.word |= 0x01000000;
+
+ /* Auth_only_len is set as 0 in descriptor and it is
+ * overwritten here in the jd which will update
+ * the DPOVRD reg.
+ */
+ if (auth_only_len)
+ /* set sg bit */
+ (jobdescr)->dpovrd = 0x80000000 | auth_only_len;
+
+ return ctx;
+}
+
+static inline struct caam_jr_op_ctx *
+build_proto(struct rte_crypto_op *op, struct caam_jr_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct caam_jr_op_ctx *ctx = NULL;
+ phys_addr_t src_start_addr, dst_start_addr;
+ struct sec_cdb *cdb;
+ uint64_t sdesc_offset;
+ struct sec_job_descriptor_t *jobdescr;
+
+ PMD_INIT_FUNC_TRACE();
+ ctx = caam_jr_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ cdb = ses->cdb;
+ sdesc_offset = (size_t) ((char *)&cdb->sh_desc - (char *)cdb);
+
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ SEC_JD_INIT(jobdescr);
+ SEC_JD_SET_SD(jobdescr,
+ (phys_addr_t)(caam_jr_dma_vtop(cdb)) + sdesc_offset,
+ cdb->sh_hdr.hi.field.idlen);
+
+ /* output */
+ SEC_JD_SET_OUT_PTR(jobdescr, (uint64_t)dst_start_addr, 0,
+ sym->m_src->buf_len - sym->m_src->data_off);
+ /* input */
+ SEC_JD_SET_IN_PTR(jobdescr, (uint64_t)src_start_addr, 0,
+ sym->m_src->pkt_len);
+ sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
+
+ return ctx;
+}
+
+static int
+caam_jr_enqueue_op(struct rte_crypto_op *op, struct caam_jr_qp *qp)
+{
+ struct sec_job_ring_t *ring = qp->ring;
+ struct caam_jr_session *ses;
+ struct caam_jr_op_ctx *ctx = NULL;
+ struct sec_job_descriptor_t *jobdescr __rte_unused;
+
+ PMD_INIT_FUNC_TRACE();
+ switch (op->sess_type) {
+ case RTE_CRYPTO_OP_WITH_SESSION:
+ ses = (struct caam_jr_session *)
+ get_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id);
+ break;
+ case RTE_CRYPTO_OP_SECURITY_SESSION:
+ ses = (struct caam_jr_session *)
+ get_sec_session_private_data(
+ op->sym->sec_session);
+ break;
+ default:
+ CAAM_JR_DP_ERR("sessionless crypto op not supported");
+ qp->tx_errs++;
+ return -1;
+ }
+
+ if (unlikely(!ses->qp || ses->qp != qp)) {
+ CAAM_JR_DP_DEBUG("Old:sess->qp=%p New qp = %p\n", ses->qp, qp);
+ ses->qp = qp;
+ caam_jr_prep_cdb(ses);
+ }
+
+ if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ if (is_auth_cipher(ses))
+ ctx = build_cipher_auth(op, ses);
+ else if (is_aead(ses))
+ goto err1;
+ else if (is_auth_only(ses))
+ ctx = build_auth_only(op, ses);
+ else if (is_cipher_only(ses))
+ ctx = build_cipher_only(op, ses);
+ else if (is_proto_ipsec(ses))
+ ctx = build_proto(op, ses);
+ } else {
+ if (is_auth_cipher(ses))
+ ctx = build_cipher_auth_sg(op, ses);
+ else if (is_aead(ses))
+ goto err1;
+ else if (is_auth_only(ses))
+ ctx = build_auth_only_sg(op, ses);
+ else if (is_cipher_only(ses))
+ ctx = build_cipher_only_sg(op, ses);
+ }
+err1:
+ if (unlikely(!ctx)) {
+ qp->tx_errs++;
+ CAAM_JR_ERR("not supported sec op");
+ return -1;
+ }
+#if CAAM_JR_DBG
+ if (is_decode(ses))
+ rte_hexdump(stdout, "DECODE",
+ rte_pktmbuf_mtod(op->sym->m_src, void *),
+ rte_pktmbuf_data_len(op->sym->m_src));
+ else
+ rte_hexdump(stdout, "ENCODE",
+ rte_pktmbuf_mtod(op->sym->m_src, void *),
+ rte_pktmbuf_data_len(op->sym->m_src));
+
+ printf("\n JD before conversion\n");
+ for (int i = 0; i < 12; i++)
+ printf("\n 0x%08x", ctx->jobdes.desc[i]);
+#endif
+
+ CAAM_JR_DP_DEBUG("Jr[%p] pi[%d] ci[%d].Before sending desc",
+ ring, ring->pidx, ring->cidx);
+
+ /* todo - do we want to retry */
+ if (SEC_JOB_RING_IS_FULL(ring->pidx, ring->cidx,
+ SEC_JOB_RING_SIZE, SEC_JOB_RING_SIZE)) {
+ CAAM_JR_DP_DEBUG("Ring FULL Jr[%p] pi[%d] ci[%d].Size = %d",
+ ring, ring->pidx, ring->cidx, SEC_JOB_RING_SIZE);
+ caam_jr_op_ending(ctx);
+ qp->tx_ring_full++;
+ return -EBUSY;
+ }
+
+#if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
+ jobdescr = (struct sec_job_descriptor_t *) ctx->jobdes.desc;
+
+ jobdescr->deschdr.command.word =
+ cpu_to_caam32(jobdescr->deschdr.command.word);
+ jobdescr->sd_ptr = cpu_to_caam64(jobdescr->sd_ptr);
+ jobdescr->seq_out.command.word =
+ cpu_to_caam32(jobdescr->seq_out.command.word);
+ jobdescr->seq_out_ptr = cpu_to_caam64(jobdescr->seq_out_ptr);
+ jobdescr->out_ext_length = cpu_to_caam32(jobdescr->out_ext_length);
+ jobdescr->seq_in.command.word =
+ cpu_to_caam32(jobdescr->seq_in.command.word);
+ jobdescr->seq_in_ptr = cpu_to_caam64(jobdescr->seq_in_ptr);
+ jobdescr->in_ext_length = cpu_to_caam32(jobdescr->in_ext_length);
+ jobdescr->load_dpovrd.command.word =
+ cpu_to_caam32(jobdescr->load_dpovrd.command.word);
+ jobdescr->dpovrd = cpu_to_caam32(jobdescr->dpovrd);
+#endif
+
+ /* Set ptr in input ring to current descriptor */
+ sec_write_addr(&ring->input_ring[ring->pidx],
+ (phys_addr_t)caam_jr_vtop_ctx(ctx, ctx->jobdes.desc));
+ rte_smp_wmb();
+
+ /* Notify HW that a new job is enqueued */
+ hw_enqueue_desc_on_job_ring(ring);
+
+ /* increment the producer index for the current job ring */
+ ring->pidx = SEC_CIRCULAR_COUNTER(ring->pidx, SEC_JOB_RING_SIZE);
+
+ return 0;
+}
+
+static uint16_t
+caam_jr_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ int32_t ret;
+ struct caam_jr_qp *jr_qp = (struct caam_jr_qp *)qp;
+ uint16_t num_tx = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ /*Prepare each packet which is to be sent*/
+ for (loop = 0; loop < nb_ops; loop++) {
+ ret = caam_jr_enqueue_op(ops[loop], jr_qp);
+ if (!ret)
+ num_tx++;
+ }
+
+ jr_qp->tx_pkts += num_tx;
+
+ return num_tx;
+}
+
+/* Release queue pair */
+static int
+caam_jr_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct sec_job_ring_t *internals;
+ struct caam_jr_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ CAAM_JR_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->ring = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/* Setup a queue pair */
+static int
+caam_jr_queue_pair_setup(
+ struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct sec_job_ring_t *internals;
+ struct caam_jr_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+ CAAM_JR_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ CAAM_JR_ERR("Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->ring = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/* Return the number of allocated queue pairs */
+static uint32_t
+caam_jr_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/* Returns the size of the aesni gcm session structure */
+static unsigned int
+caam_jr_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(struct caam_jr_session);
+}
+
+static int
+caam_jr_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ CAAM_JR_ERR("No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ CAAM_JR_ERR("No Memory for auth key\n");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ struct caam_jr_session *session)
+{
+ PMD_INIT_FUNC_TRACE();
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ CAAM_JR_ERR("No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+caam_jr_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+ struct caam_jr_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ CAAM_JR_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ caam_jr_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ caam_jr_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ caam_jr_cipher_init(dev, xform, session);
+ caam_jr_auth_init(dev, xform->next, session);
+ } else {
+ CAAM_JR_ERR("Not supported: Auth then Cipher");
+ goto err1;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ caam_jr_auth_init(dev, xform, session);
+ caam_jr_cipher_init(dev, xform->next, session);
+ } else {
+ CAAM_JR_ERR("Not supported: Auth then Cipher");
+ goto err1;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ caam_jr_aead_init(dev, xform, session);
+
+ } else {
+ CAAM_JR_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+
+err1:
+ rte_free(session->cipher_key.data);
+ rte_free(session->auth_key.data);
+ memset(session, 0, sizeof(struct caam_jr_session));
+
+ return -EINVAL;
+}
+
+static int
+caam_jr_sym_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CAAM_JR_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ memset(sess_private_data, 0, sizeof(struct caam_jr_session));
+ ret = caam_jr_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ CAAM_JR_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ return 0;
+}
+
+/* Clear the memory of session so it doesn't leave key material behind */
+static void
+caam_jr_sym_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_sym_session_private_data(sess, index);
+ struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(struct caam_jr_session));
+ set_sym_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+caam_jr_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ struct caam_jr_session *session = (struct caam_jr_session *)sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ session->proto_alg = conf->protocol;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ CAAM_JR_ERR("No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ CAAM_JR_ERR("No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ CAAM_JR_ERR("Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto out;
+ default:
+ CAAM_JR_ERR("Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto out;
+ }
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ CAAM_JR_ERR("Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto out;
+ default:
+ CAAM_JR_ERR("Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_v = IPVERSION;
+ session->ip4_hdr.ip_hl = 5;
+ session->ip4_hdr.ip_len = rte_cpu_to_be_16(
+ sizeof(session->ip4_hdr));
+ session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ session->ip4_hdr.ip_id = 0;
+ session->ip4_hdr.ip_off = 0;
+ session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ session->ip4_hdr.ip_p = (ipsec_xform->proto ==
+ RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
+ : IPPROTO_AH;
+ session->ip4_hdr.ip_sum = 0;
+ session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
+ (void *)&session->ip4_hdr,
+ sizeof(struct ip));
+
+ session->encap_pdb.options =
+ (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ session->encap_pdb.spi = ipsec_xform->spi;
+ session->encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ session->decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ } else
+ goto out;
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ memset(session, 0, sizeof(struct caam_jr_session));
+ return -1;
+}
+
+static int
+caam_jr_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CAAM_JR_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = caam_jr_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ CAAM_JR_ERR("failed to configure session parameters");
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/* Clear the memory of session so it doesn't leave key material behind */
+static int
+caam_jr_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ struct caam_jr_session *s = (struct caam_jr_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(struct caam_jr_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+
+static int
+caam_jr_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ char str[20];
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ sizeof(struct caam_jr_op_ctx),
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ CAAM_JR_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ CAAM_JR_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
+ return 0;
+}
+
+static int
+caam_jr_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+caam_jr_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+caam_jr_dev_close(struct rte_cryptodev *dev)
+{
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
+ return 0;
+}
+
+static void
+caam_jr_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct sec_job_ring_t *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = caam_jr_get_cryptodev_capabilities();
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops caam_jr_ops = {
+ .dev_configure = caam_jr_dev_configure,
+ .dev_start = caam_jr_dev_start,
+ .dev_stop = caam_jr_dev_stop,
+ .dev_close = caam_jr_dev_close,
+ .dev_infos_get = caam_jr_dev_infos_get,
+ .stats_get = caam_jr_stats_get,
+ .stats_reset = caam_jr_stats_reset,
+ .queue_pair_setup = caam_jr_queue_pair_setup,
+ .queue_pair_release = caam_jr_queue_pair_release,
+ .queue_pair_count = caam_jr_queue_pair_count,
+ .sym_session_get_size = caam_jr_sym_session_get_size,
+ .sym_session_configure = caam_jr_sym_session_configure,
+ .sym_session_clear = caam_jr_sym_session_clear
+};
+
+static struct rte_security_ops caam_jr_security_ops = {
+ .session_create = caam_jr_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = caam_jr_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = caam_jr_get_security_capabilities
+};
+
+/* @brief Flush job rings of any processed descs.
+ * The processed descs are silently dropped,
+ * WITHOUT being notified to UA.
+ */
+static void
+close_job_ring(struct sec_job_ring_t *job_ring)
+{
+ PMD_INIT_FUNC_TRACE();
+ if (job_ring->irq_fd) {
+ /* Producer index is frozen. If consumer index is not equal
+ * with producer index, then we have descs to flush.
+ */
+ while (job_ring->pidx != job_ring->cidx)
+ hw_flush_job_ring(job_ring, false, NULL);
+
+ /* free the uio job ring */
+ free_job_ring(job_ring->irq_fd);
+ job_ring->irq_fd = 0;
+ caam_jr_dma_free(job_ring->input_ring);
+ caam_jr_dma_free(job_ring->output_ring);
+ g_job_rings_no--;
+ }
+}
+
+/** @brief Release the software and hardware resources tied to a job ring.
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval -1 for error
+ */
+static int
+shutdown_job_ring(struct sec_job_ring_t *job_ring)
+{
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ ASSERT(job_ring != NULL);
+ ret = hw_shutdown_job_ring(job_ring);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to shutdown hardware job ring %p",
+ job_ring);
+
+ if (job_ring->coalescing_en)
+ hw_job_ring_disable_coalescing(job_ring);
+
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL) {
+ ret = caam_jr_disable_irqs(job_ring->irq_fd);
+ SEC_ASSERT(ret == 0, ret,
+ "Failed to disable irqs for job ring %p",
+ job_ring);
+ }
+
+ return ret;
+}
+
+/*
+ * @brief Release the resources used by the SEC user space driver.
+ *
+ * Reset and release SEC's job rings indicated by the User Application at
+ * init_job_ring() and free any memory allocated internally.
+ * Call once during application tear down.
+ *
+ * @note In case there are any descriptors in-flight (descriptors received by
+ * SEC driver for processing and for which no response was yet provided to UA),
+ * the descriptors are discarded without any notifications to User Application.
+ *
+ * @retval ::0 is returned for a successful execution
+ * @retval ::-1 is returned if SEC driver release is in progress
+ */
+static int
+caam_jr_dev_uninit(struct rte_cryptodev *dev)
+{
+ struct sec_job_ring_t *internals;
+
+ PMD_INIT_FUNC_TRACE();
+ if (dev == NULL)
+ return -ENODEV;
+
+ internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
+ /* If any descriptors in flight , poll and wait
+ * until all descriptors are received and silently discarded.
+ */
+ if (internals) {
+ shutdown_job_ring(internals);
+ close_job_ring(internals);
+ rte_mempool_free(internals->ctx_pool);
+ }
+
+ CAAM_JR_INFO("Closing crypto device %s", dev->data->name);
+
+ /* last caam jr instance) */
+ if (g_job_rings_no == 0)
+ g_driver_state = SEC_DRIVER_STATE_IDLE;
+
+ return SEC_SUCCESS;
+}
+
+/* @brief Initialize the software and hardware resources tied to a job ring.
+ * @param [in] jr_mode; Model to be used by SEC Driver to receive
+ * notifications from SEC. Can be either
+ * of the three: #SEC_NOTIFICATION_TYPE_NAPI
+ * #SEC_NOTIFICATION_TYPE_IRQ or
+ * #SEC_NOTIFICATION_TYPE_POLL
+ * @param [in] NAPI_mode The NAPI work mode to configure a job ring at
+ * startup. Used only when #SEC_NOTIFICATION_TYPE
+ * is set to #SEC_NOTIFICATION_TYPE_NAPI.
+ * @param [in] irq_coalescing_timer This value determines the maximum
+ * amount of time after processing a
+ * descriptor before raising an interrupt.
+ * @param [in] irq_coalescing_count This value determines how many
+ * descriptors are completed before
+ * raising an interrupt.
+ * @param [in] reg_base_addr, The job ring base address register
+ * @param [in] irq_id The job ring interrupt identification number.
+ * @retval job_ring_handle for successful job ring configuration
+ * @retval NULL on error
+ *
+ */
+static void *
+init_job_ring(void *reg_base_addr, uint32_t irq_id)
+{
+ struct sec_job_ring_t *job_ring = NULL;
+ int i, ret = 0;
+ int jr_mode = SEC_NOTIFICATION_TYPE_POLL;
+ int napi_mode = 0;
+ int irq_coalescing_timer = 0;
+ int irq_coalescing_count = 0;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_job_rings[i].irq_fd == 0) {
+ job_ring = &g_job_rings[i];
+ g_job_rings_no++;
+ break;
+ }
+ }
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("No free job ring\n");
+ return NULL;
+ }
+
+ job_ring->register_base_addr = reg_base_addr;
+ job_ring->jr_mode = jr_mode;
+ job_ring->napi_mode = 0;
+ job_ring->irq_fd = irq_id;
+
+ /* Allocate mem for input and output ring */
+
+ /* Allocate memory for input ring */
+ job_ring->input_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
+ SEC_DMA_MEM_INPUT_RING_SIZE);
+ memset(job_ring->input_ring, 0, SEC_DMA_MEM_INPUT_RING_SIZE);
+
+ /* Allocate memory for output ring */
+ job_ring->output_ring = caam_jr_dma_mem_alloc(L1_CACHE_BYTES,
+ SEC_DMA_MEM_OUTPUT_RING_SIZE);
+ memset(job_ring->output_ring, 0, SEC_DMA_MEM_OUTPUT_RING_SIZE);
+
+ /* Reset job ring in SEC hw and configure job ring registers */
+ ret = hw_reset_job_ring(job_ring);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to reset hardware job ring");
+ goto cleanup;
+ }
+
+ if (jr_mode == SEC_NOTIFICATION_TYPE_NAPI) {
+ /* When SEC US driver works in NAPI mode, the UA can select
+ * if the driver starts with IRQs on or off.
+ */
+ if (napi_mode == SEC_STARTUP_INTERRUPT_MODE) {
+ CAAM_JR_INFO("Enabling DONE IRQ generationon job ring - %p",
+ job_ring);
+ ret = caam_jr_enable_irqs(job_ring->irq_fd);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to enable irqs for job ring");
+ goto cleanup;
+ }
+ }
+ } else if (jr_mode == SEC_NOTIFICATION_TYPE_IRQ) {
+ /* When SEC US driver works in pure interrupt mode,
+ * IRQ's are always enabled.
+ */
+ CAAM_JR_INFO("Enabling DONE IRQ generation on job ring - %p",
+ job_ring);
+ ret = caam_jr_enable_irqs(job_ring->irq_fd);
+ if (ret != 0) {
+ CAAM_JR_ERR("Failed to enable irqs for job ring");
+ goto cleanup;
+ }
+ }
+ if (irq_coalescing_timer || irq_coalescing_count) {
+ hw_job_ring_set_coalescing_param(job_ring,
+ irq_coalescing_timer,
+ irq_coalescing_count);
+
+ hw_job_ring_enable_coalescing(job_ring);
+ job_ring->coalescing_en = 1;
+ }
+
+ job_ring->jr_state = SEC_JOB_RING_STATE_STARTED;
+ job_ring->max_nb_queue_pairs = RTE_CAAM_MAX_NB_SEC_QPS;
+ job_ring->max_nb_sessions = RTE_CAAM_JR_PMD_MAX_NB_SESSIONS;
+
+ return job_ring;
+cleanup:
+ caam_jr_dma_free(job_ring->output_ring);
+ caam_jr_dma_free(job_ring->input_ring);
+ return NULL;
+}
+
+
+static int
+caam_jr_dev_init(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct rte_security_ctx *security_instance;
+ struct uio_job_ring *job_ring;
+ char str[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Validate driver state */
+ if (g_driver_state == SEC_DRIVER_STATE_IDLE) {
+ g_job_rings_max = sec_configure();
+ if (!g_job_rings_max) {
+ CAAM_JR_ERR("No job ring detected on UIO !!!!");
+ return -1;
+ }
+ /* Update driver state */
+ g_driver_state = SEC_DRIVER_STATE_STARTED;
+ }
+
+ if (g_job_rings_no >= g_job_rings_max) {
+ CAAM_JR_ERR("No more job rings available max=%d!!!!",
+ g_job_rings_max);
+ return -1;
+ }
+
+ job_ring = config_job_ring();
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("failed to create job ring");
+ goto init_error;
+ }
+
+ snprintf(str, sizeof(str), "caam_jr%d", job_ring->jr_id);
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ CAAM_JR_ERR("failed to create cryptodev vdev");
+ goto cleanup;
+ }
+ /*TODO free it during teardown*/
+ dev->data->dev_private = init_job_ring(job_ring->register_base_addr,
+ job_ring->uio_fd);
+
+ if (!dev->data->dev_private) {
+ CAAM_JR_ERR("Ring memory allocation failed\n");
+ goto cleanup2;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = &caam_jr_ops;
+
+ /* register rx/tx burst functions for data path */
+ dev->dequeue_burst = caam_jr_dequeue_burst;
+ dev->enqueue_burst = caam_jr_enqueue_burst;
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
+
+ /* For secondary processes, we don't initialise any further as primary
+ * has already done this work. Only check we don't need a different
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ CAAM_JR_WARN("Device already init by primary process");
+ return 0;
+ }
+
+ /*TODO free it during teardown*/
+ security_instance = rte_malloc("caam_jr",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL) {
+ CAAM_JR_ERR("memory allocation failed\n");
+ //todo error handling.
+ goto cleanup2;
+ }
+
+ security_instance->device = (void *)dev;
+ security_instance->ops = &caam_jr_security_ops;
+ security_instance->sess_cnt = 0;
+ dev->security_ctx = security_instance;
+
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", dev->data->name);
+
+ return 0;
+
+cleanup2:
+ caam_jr_dev_uninit(dev);
+ rte_cryptodev_pmd_release_device(dev);
+cleanup:
+ free_job_ring(job_ring->uio_fd);
+init_error:
+ CAAM_JR_ERR("driver %s: cryptodev_caam_jr_create failed",
+ init_params->name);
+
+ return -ENXIO;
+}
+
+/** Initialise CAAM JR crypto device */
+static int
+cryptodev_caam_jr_probe(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct sec_job_ring_t),
+ rte_socket_id(),
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
+ };
+ const char *name;
+ const char *input_args;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(cpu_to_caam32(*prop)));
+ break;
+ }
+ }
+ }
+#ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
+ if (rta_get_sec_era() > RTA_SEC_ERA_8) {
+ RTE_LOG(ERR, PMD,
+ "CAAM is compiled in BE mode for device with sec era > 8???\n");
+ return -EINVAL;
+ }
+#endif
+
+ return caam_jr_dev_init(name, vdev, &init_params);
+}
+
+/** Uninitialise CAAM JR crypto device */
+static int
+cryptodev_caam_jr_remove(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ caam_jr_dev_uninit(cryptodev);
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_vdev_driver cryptodev_caam_jr_drv = {
+ .probe = cryptodev_caam_jr_probe,
+ .remove = cryptodev_caam_jr_remove
+};
+
+static struct cryptodev_driver caam_jr_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CAAM_JR_PMD, cryptodev_caam_jr_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CAAM_JR_PMD,
+ "max_nb_queue_pairs=<int>"
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(caam_jr_crypto_drv, cryptodev_caam_jr_drv.driver,
+ cryptodev_driver_id);
+
+RTE_INIT(caam_jr_init_log)
+{
+ caam_jr_logtype = rte_log_register("pmd.crypto.caam");
+ if (caam_jr_logtype >= 0)
+ rte_log_set_level(caam_jr_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_capabilities.c b/drivers/crypto/caam_jr/caam_jr_capabilities.c
new file mode 100644
index 00000000..c51593c4
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_capabilities.c
@@ -0,0 +1,266 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <caam_jr_capabilities.h>
+
+static const struct rte_cryptodev_capabilities caam_jr_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_security_capability caam_jr_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = caam_jr_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = caam_jr_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+const struct rte_cryptodev_capabilities *
+caam_jr_get_cryptodev_capabilities(void)
+{
+ return caam_jr_capabilities;
+}
+
+const struct rte_security_capability *
+caam_jr_get_security_capabilities(void *device __rte_unused)
+{
+ return caam_jr_security_cap;
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_capabilities.h b/drivers/crypto/caam_jr/caam_jr_capabilities.h
new file mode 100644
index 00000000..c1e3f305
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_capabilities.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_CAPABILITIES_H
+#define CAAM_JR_CAPABILITIES_H
+
+#include <rte_cryptodev.h>
+#include <rte_security.h>
+
+/* Get cryptodev capabilities */
+const struct rte_cryptodev_capabilities *
+caam_jr_get_cryptodev_capabilities(void);
+/* Get security capabilities */
+const struct rte_security_capability *
+caam_jr_get_security_capabilities(void *device);
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_config.h b/drivers/crypto/caam_jr/caam_jr_config.h
new file mode 100644
index 00000000..041187a8
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_config.h
@@ -0,0 +1,207 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_CONFIG_H
+#define CAAM_JR_CONFIG_H
+
+#include <rte_byteorder.h>
+
+#include <compat.h>
+
+#ifdef RTE_LIBRTE_PMD_CAAM_JR_BE
+#define CAAM_BYTE_ORDER __BIG_ENDIAN
+#else
+#define CAAM_BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+#define CORE_BYTE_ORDER __BIG_ENDIAN
+#else
+#define CORE_BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+#if CORE_BYTE_ORDER != CAAM_BYTE_ORDER
+
+#define cpu_to_caam64 rte_cpu_to_be_64
+#define cpu_to_caam32 rte_cpu_to_be_32
+#else
+#define cpu_to_caam64
+#define cpu_to_caam32
+
+#endif
+
+/*
+ * SEC is configured to start work in polling mode,
+ * when configured for NAPI notification style.
+ */
+#define SEC_STARTUP_POLLING_MODE 0
+/*
+ * SEC is configured to start work in interrupt mode,
+ * when configured for NAPI notification style.
+ */
+#define SEC_STARTUP_INTERRUPT_MODE 1
+
+/*
+ * SEC driver will use NAPI model to receive notifications
+ * for processed packets from SEC engine hardware:
+ * - IRQ for low traffic
+ * - polling for high traffic.
+ */
+#define SEC_NOTIFICATION_TYPE_NAPI 0
+/*
+ * SEC driver will use ONLY interrupts to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_IRQ 1
+/*
+ * SEC driver will use ONLY polling to receive notifications
+ * for processed packets from SEC engine hardware.
+ */
+#define SEC_NOTIFICATION_TYPE_POLL 2
+
+/*
+ * SEC USER SPACE DRIVER related configuration.
+ */
+
+/*
+ * Determines how SEC user space driver will receive notifications
+ * for processed packets from SEC engine.
+ * Valid values are: #SEC_NOTIFICATION_TYPE_POLL, #SEC_NOTIFICATION_TYPE_IRQ
+ * and #SEC_NOTIFICATION_TYPE_NAPI.
+ */
+#define SEC_NOTIFICATION_TYPE SEC_NOTIFICATION_TYPE_POLL
+
+/* Maximum number of job rings supported by SEC hardware */
+#define MAX_SEC_JOB_RINGS 4
+
+/* Maximum number of QP per job ring */
+#define RTE_CAAM_MAX_NB_SEC_QPS 1
+
+/*
+ * Size of cryptographic context that is used directly in communicating
+ * with SEC device. SEC device works only with physical addresses. This
+ * is the maximum size for a SEC descriptor ( = 64 words).
+ */
+#define SEC_CRYPTO_DESCRIPTOR_SIZE 256
+
+/*
+ * Size of job descriptor submitted to SEC device for each packet to
+ * be processed.
+ * Job descriptor contains 3 DMA address pointers:
+ * - to shared descriptor, to input buffer and to output buffer.
+ * The job descriptor contains other SEC specific commands as well:
+ * - HEADER command, SEQ IN PTR command SEQ OUT PTR command and opaque data
+ * each measuring 4 bytes.
+ * Job descriptor size, depending on physical address representation:
+ * - 32 bit - size is 28 bytes - cacheline-aligned size is 64 bytes
+ * - 36 bit - size is 40 bytes - cacheline-aligned size is 64 bytes
+ * @note: Job descriptor must be cacheline-aligned to ensure efficient
+ * memory access.
+ * @note: If other format is used for job descriptor, then the size must be
+ * revised.
+ */
+#define SEC_JOB_DESCRIPTOR_SIZE 64
+
+/*
+ * Size of one entry in the input ring of a job ring.
+ * Input ring contains pointers to job descriptors.
+ * The memory used for an input ring and output ring must be physically
+ * contiguous.
+ */
+#define SEC_JOB_INPUT_RING_ENTRY_SIZE sizeof(dma_addr_t)
+
+/*
+ * Size of one entry in the output ring of a job ring.
+ * Output ring entry is a pointer to a job descriptor followed by a 4 byte
+ * status word.
+ * The memory used for an input ring and output ring must be physically
+ * contiguous.
+ * @note If desired to use also the optional SEQ OUT indication in output ring
+ * entries,
+ * then 4 more bytes must be added to the size.
+ */
+#define SEC_JOB_OUTPUT_RING_ENTRY_SIZE (SEC_JOB_INPUT_RING_ENTRY_SIZE + 4)
+
+/*
+ * DMA memory required for an input ring of a job ring.
+ */
+#define SEC_DMA_MEM_INPUT_RING_SIZE ((SEC_JOB_INPUT_RING_ENTRY_SIZE) * \
+ (SEC_JOB_RING_SIZE))
+
+/*
+ * DMA memory required for an output ring of a job ring.
+ * Required extra 4 byte for status word per each entry.
+ */
+#define SEC_DMA_MEM_OUTPUT_RING_SIZE ((SEC_JOB_OUTPUT_RING_ENTRY_SIZE) * \
+ (SEC_JOB_RING_SIZE))
+
+/* DMA memory required for a job ring, including both input and output rings. */
+#define SEC_DMA_MEM_JOB_RING_SIZE ((SEC_DMA_MEM_INPUT_RING_SIZE) + \
+ (SEC_DMA_MEM_OUTPUT_RING_SIZE))
+
+/*
+ * When calling sec_init() UA will provide an area of virtual memory
+ * of size #SEC_DMA_MEMORY_SIZE to be used internally by the driver
+ * to allocate data (like SEC descriptors) that needs to be passed to
+ * SEC device in physical addressing and later on retrieved from SEC device.
+ * At initialization the UA provides specialized ptov/vtop functions/macros to
+ * translate addresses allocated from this memory area.
+ */
+#define SEC_DMA_MEMORY_SIZE ((SEC_DMA_MEM_JOB_RING_SIZE) * \
+ (MAX_SEC_JOB_RINGS))
+
+#define L1_CACHE_BYTES 64
+
+/* SEC JOB RING related configuration. */
+
+/*
+ * Configure the size of the JOB RING.
+ * The maximum size of the ring in hardware limited to 1024.
+ * However the number of packets in flight in a time interval of 1ms can
+ * be calculated from the traffic rate (Mbps) and packet size.
+ * Here it was considered a packet size of 64 bytes.
+ *
+ * @note Round up to nearest power of 2 for optimized update
+ * of producer/consumer indexes of each job ring
+ */
+#define SEC_JOB_RING_SIZE 512
+
+/*
+ * Interrupt coalescing related configuration.
+ * NOTE: SEC hardware enabled interrupt
+ * coalescing is not supported on SEC version 3.1!
+ * SEC version 4.4 has support for interrupt
+ * coalescing.
+ */
+
+#if SEC_NOTIFICATION_TYPE != SEC_NOTIFICATION_TYPE_POLL
+
+#define SEC_INT_COALESCING_ENABLE 1
+/*
+ * Interrupt Coalescing Descriptor Count Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines
+ * how many Descriptors are completed before raising an interrupt.
+ *
+ * Valid values for this field are from 0 to 255.
+ * Note that a value of 1 functionally defeats the advantages of interrupt
+ * coalescing since the threshold value is reached each time that a
+ * Job Descriptor is completed. A value of 0 is treated in the same
+ * manner as a value of 1.
+ */
+#define SEC_INTERRUPT_COALESCING_DESCRIPTOR_COUNT_THRESH 10
+
+/*
+ * Interrupt Coalescing Timer Threshold.
+ * While interrupt coalescing is enabled (ICEN=1), this value determines the
+ * maximum amount of time after processing a Descriptor before raising an
+ * interrupt.
+ * The threshold value is represented in units equal to 64 CAAM interface
+ * clocks. Valid values for this field are from 1 to 65535.
+ * A value of 0 results in behavior identical to that when interrupt
+ * coalescing is disabled.
+ */
+#define SEC_INTERRUPT_COALESCING_TIMER_THRESH 100
+#endif /* SEC_NOTIFICATION_TYPE_POLL */
+
+#endif /* CAAM_JR_CONFIG_H */
diff --git a/drivers/crypto/caam_jr/caam_jr_desc.h b/drivers/crypto/caam_jr/caam_jr_desc.h
new file mode 100644
index 00000000..6683ea83
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_desc.h
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_DESC_H
+#define CAAM_JR_DESC_H
+
+#define CMD_HDR_CTYPE_SD 0x16
+#define CMD_HDR_CTYPE_JD 0x17
+
+/* The maximum size of a SEC descriptor, in WORDs (32 bits). */
+#define MAX_DESC_SIZE_WORDS 64
+
+/*
+ * Macros manipulating descriptors
+ */
+/* Macro for setting the SD pointer in a JD. Common for all protocols
+ * supported by the SEC driver.
+ */
+#define SEC_JD_SET_SD(descriptor, ptr, len) { \
+ (descriptor)->sd_ptr = (ptr); \
+ (descriptor)->deschdr.command.jd.shr_desc_len = (len); \
+}
+
+/* Macro for setting a pointer to the job which this descriptor processes.
+ * It eases the lookup procedure for identifying the descriptor that has
+ * completed.
+ */
+#define SEC_JD_SET_JOB_PTR(descriptor, ptr) \
+ ((descriptor)->job_ptr = (ptr))
+
+/* Macro for setting up a JD. The structure of the JD is common across all
+ * supported protocols, thus its structure is identical.
+ */
+#define SEC_JD_INIT(descriptor) ({ \
+ /* CTYPE = job descriptor \
+ * RSMS, DNR = 0
+ * ONE = 1
+ * Start Index = 0
+ * ZRO,TD, MTD = 0
+ * SHR = 1 (there's a shared descriptor referenced
+ * by this job descriptor,pointer in next word)
+ * REO = 1 (execute job descr. first, shared descriptor
+ * after)
+ * SHARE = DEFER
+ * Descriptor Length = 0 ( to be completed @ runtime ) */ \
+ (descriptor)->deschdr.command.word = 0xB0801C0D; \
+ /*
+ * CTYPE = SEQ OUT command * Scater Gather Flag = 0
+ * (can be updated @ runtime) PRE = 0 * EXT = 1
+ * (data length is in next word, following the * command)
+ * RTO = 0 */ \
+ (descriptor)->seq_out.command.word = 0xF8400000; /**/ \
+ /*
+ * CTYPE = SEQ IN command
+ * Scater Gather Flag = 0 (can be updated @ runtime)
+ * PRE = 0
+ * EXT = 1 ( data length is in next word, following the
+ * command)
+ * RTO = 0 */ \
+ (descriptor)->seq_in.command.word = 0xF0400000; /**/ \
+ /*
+ * In order to be compatible with QI scenarios, the DPOVRD value
+ * loaded must be formated like this:
+ * DPOVRD_EN (1b) | Res| DPOVRD Value (right aligned). */ \
+ (descriptor)->load_dpovrd.command.word = 0x16870004; \
+ /* By default, DPOVRD mechanism is disabled, thus the value to be
+ * LOAD-ed through the above descriptor command will be
+ * 0x0000_0000. */ \
+ (descriptor)->dpovrd = 0x00000000; \
+})
+
+/* Macro for setting the pointer to the input buffer in the JD, according to
+ * the parameters set by the user in the ::sec_packet_t structure.
+ */
+#define SEC_JD_SET_IN_PTR(descriptor, phys_addr, offset, length) { \
+ (descriptor)->seq_in_ptr = (phys_addr) + (offset); \
+ (descriptor)->in_ext_length = (length); \
+}
+
+/* Macro for setting the pointer to the output buffer in the JD, according to
+ * the parameters set by the user in the ::sec_packet_t structure.
+ */
+#define SEC_JD_SET_OUT_PTR(descriptor, phys_addr, offset, length) { \
+ (descriptor)->seq_out_ptr = (phys_addr) + (offset); \
+ (descriptor)->out_ext_length = (length); \
+}
+
+/* Macro for setting the Scatter-Gather flag in the SEQ IN command. Used in
+ * case the input buffer is split in multiple buffers, according to the user
+ * specification.
+ */
+#define SEC_JD_SET_SG_IN(descriptor) \
+ ((descriptor)->seq_in.command.field.sgf = 1)
+
+/* Macro for setting the Scatter-Gather flag in the SEQ OUT command. Used in
+ * case the output buffer is split in multiple buffers, according to the user
+ * specification.
+ */
+#define SEC_JD_SET_SG_OUT(descriptor) \
+ ((descriptor)->seq_out.command.field.sgf = 1)
+
+#define SEC_JD_SET_DPOVRD(descriptor) \
+
+/* Macro for retrieving a descriptor's length. Works for both SD and JD. */
+#define SEC_GET_DESC_LEN(descriptor) \
+ (((struct descriptor_header_s *)(descriptor))->command.sd.ctype == \
+ CMD_HDR_CTYPE_SD ? ((struct descriptor_header_s *) \
+ (descriptor))->command.sd.desclen : \
+ ((struct descriptor_header_s *)(descriptor))->command.jd.desclen)
+
+/* Helper macro for dumping the hex representation of a descriptor */
+#define SEC_DUMP_DESC(descriptor) { \
+ int __i; \
+ CAAM_JR_INFO("Des@ 0x%08x\n", (uint32_t)((uint32_t *)(descriptor)));\
+ for (__i = 0; \
+ __i < SEC_GET_DESC_LEN(descriptor); \
+ __i++) { \
+ printf("0x%08x: 0x%08x\n", \
+ (uint32_t)(((uint32_t *)(descriptor)) + __i), \
+ *(((uint32_t *)(descriptor)) + __i)); \
+ } \
+}
+/* Union describing a descriptor header.
+ */
+struct descriptor_header_s {
+ union {
+ uint32_t word;
+ struct {
+ /* 4 */ unsigned int ctype:5;
+ /* 5 */ unsigned int res1:2;
+ /* 7 */ unsigned int dnr:1;
+ /* 8 */ unsigned int one:1;
+ /* 9 */ unsigned int res2:1;
+ /* 10 */ unsigned int start_idx:6;
+ /* 16 */ unsigned int res3:2;
+ /* 18 */ unsigned int cif:1;
+ /* 19 */ unsigned int sc:1;
+ /* 20 */ unsigned int pd:1;
+ /* 21 */ unsigned int res4:1;
+ /* 22 */ unsigned int share:2;
+ /* 24 */ unsigned int res5:2;
+ /* 26 */ unsigned int desclen:6;
+ } sd;
+ struct {
+ /* TODO only below struct members are corrected,
+ * all others also need to be reversed please verify it
+ */
+ /* 0 */ unsigned int desclen:7;
+ /* 7 */ unsigned int res4:1;
+ /* 8 */ unsigned int share:3;
+ /* 11 */ unsigned int reo:1;
+ /* 12 */ unsigned int shr:1;
+ /* 13 */ unsigned int mtd:1;
+ /* 14 */ unsigned int td:1;
+ /* 15 */ unsigned int zero:1;
+ /* 16 */ unsigned int shr_desc_len:6;
+ /* 22 */ unsigned int res2:1;
+ /* 23 */ unsigned int one:1;
+ /* 24 */ unsigned int dnr:1;
+ /* 25 */ unsigned int rsms:1;
+ /* 26 */ unsigned int res1:1;
+ /* 27 */ unsigned int ctype:5;
+ } jd;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a KEY command in a descriptor.
+ */
+struct key_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int cls:2;
+ unsigned int sgf:1;
+ unsigned int imm:1;
+ unsigned int enc:1;
+ unsigned int nwb:1;
+ unsigned int ekt:1;
+ unsigned int kdest:4;
+ unsigned int tk:1;
+ unsigned int rsvd1:5;
+ unsigned int length:10;
+ } __rte_packed field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a PROTOCOL command
+ * in a descriptor.
+ */
+struct protocol_operation_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int optype:3;
+ unsigned char protid;
+ unsigned short protinfo;
+ } __rte_packed field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a SEQIN command in a
+ * descriptor.
+ */
+struct seq_in_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int res1:1;
+ unsigned int inl:1;
+ unsigned int sgf:1;
+ unsigned int pre:1;
+ unsigned int ext:1;
+ unsigned int rto:1;
+ unsigned int rjd:1;
+ unsigned int res2:4;
+ unsigned int length:16;
+ } field;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Union describing a SEQOUT command in a
+ * descriptor.
+ */
+struct seq_out_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int res1:2;
+ unsigned int sgf:1;
+ unsigned int pre:1;
+ unsigned int ext:1;
+ unsigned int rto:1;
+ unsigned int res2:5;
+ unsigned int length:16;
+ } field;
+ } __rte_packed command;
+} __rte_packed;
+
+struct load_command_s {
+ union {
+ uint32_t word;
+ struct {
+ unsigned int ctype:5;
+ unsigned int class:2;
+ unsigned int sgf:1;
+ unsigned int imm:1;
+ unsigned int dst:7;
+ unsigned char offset;
+ unsigned char length;
+ } fields;
+ } __rte_packed command;
+} __rte_packed;
+
+/* Structure encompassing a general shared descriptor of maximum
+ * size (64 WORDs). Usually, other specific shared descriptor structures
+ * will be type-casted to this one
+ * this one.
+ */
+struct sec_sd_t {
+ uint32_t rsvd[MAX_DESC_SIZE_WORDS];
+} __attribute__((packed, aligned(64)));
+
+/* Structure encompassing a job descriptor which processes
+ * a single packet from a context. The job descriptor references
+ * a shared descriptor from a SEC context.
+ */
+struct sec_job_descriptor_t {
+ struct descriptor_header_s deschdr;
+ dma_addr_t sd_ptr;
+ struct seq_out_command_s seq_out;
+ dma_addr_t seq_out_ptr;
+ uint32_t out_ext_length;
+ struct seq_in_command_s seq_in;
+ dma_addr_t seq_in_ptr;
+ uint32_t in_ext_length;
+ struct load_command_s load_dpovrd;
+ uint32_t dpovrd;
+} __attribute__((packed, aligned(64)));
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_hw.c b/drivers/crypto/caam_jr/caam_jr_hw.c
new file mode 100644
index 00000000..4a2b0899
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_hw.c
@@ -0,0 +1,367 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <rte_common.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_crypto.h>
+#include <rte_security.h>
+
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+/* Used to retry resetting a job ring in SEC hardware. */
+#define SEC_TIMEOUT 100000
+
+/* @brief Process Jump Halt Condition related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jmp_halt_cond_err(union hw_error_code error_code)
+{
+ CAAM_JR_DEBUG("JMP: %d, Descriptor Index: 0x%x, Condition: 0x%x",
+ error_code.error_desc.jmp_halt_cond_src.jmp,
+ error_code.error_desc.jmp_halt_cond_src.desc_idx,
+ error_code.error_desc.jmp_halt_cond_src.cond);
+ (void)error_code;
+}
+
+/* @brief Process DECO related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_deco_err(union hw_error_code error_code)
+{
+ CAAM_JR_DEBUG("JMP: %d, Descriptor Index: 0x%x",
+ error_code.error_desc.deco_src.jmp,
+ error_code.error_desc.deco_src.desc_idx);
+
+ switch (error_code.error_desc.deco_src.desc_err) {
+ case SEC_HW_ERR_DECO_HFN_THRESHOLD:
+ CAAM_JR_DEBUG(" Warning: Descriptor completed normally,"
+ "but 3GPP HFN matches or exceeds the Threshold ");
+ break;
+ default:
+ CAAM_JR_DEBUG("Error 0x%04x not implemented",
+ error_code.error_desc.deco_src.desc_err);
+ break;
+ }
+}
+
+/* @brief Process Jump Halt User Status related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jmp_halt_user_err(union hw_error_code error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+/* @brief Process CCB related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_ccb_err(union hw_error_code hw_error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+/* @brief Process Job Ring related errors
+ *
+ * @param [in] error_code The error code in the descriptor status word
+ */
+static inline void
+hw_handle_jr_err(union hw_error_code hw_error_code __rte_unused)
+{
+ CAAM_JR_DEBUG(" Not implemented");
+}
+
+int
+hw_reset_job_ring(struct sec_job_ring_t *job_ring)
+{
+ int ret = 0;
+
+ ASSERT(job_ring->register_base_addr != NULL);
+
+ /* First reset the job ring in hw */
+ ret = hw_shutdown_job_ring(job_ring);
+ SEC_ASSERT(ret == 0, ret, "Failed resetting job ring in hardware");
+
+ /* In order to have the HW JR in a workable state
+ * after a reset, I need to re-write the input
+ * queue size, input start address, output queue
+ * size and output start address
+ */
+ /* Write the JR input queue size to the HW register */
+ hw_set_input_ring_size(job_ring, SEC_JOB_RING_SIZE);
+
+ /* Write the JR output queue size to the HW register */
+ hw_set_output_ring_size(job_ring, SEC_JOB_RING_SIZE);
+
+ /* Write the JR input queue start address */
+ hw_set_input_ring_start_addr(job_ring,
+ caam_jr_dma_vtop(job_ring->input_ring));
+ CAAM_JR_DEBUG(" Set input ring base address to : Virtual: 0x%" PRIx64
+ ",Physical: 0x%" PRIx64 ", Read from HW: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)job_ring->input_ring,
+ caam_jr_dma_vtop(job_ring->input_ring),
+ hw_get_inp_queue_base(job_ring));
+
+ /* Write the JR output queue start address */
+ hw_set_output_ring_start_addr(job_ring,
+ caam_jr_dma_vtop(job_ring->output_ring));
+ CAAM_JR_DEBUG(" Set output ring base address to: Virtual: 0x%" PRIx64
+ ",Physical: 0x%" PRIx64 ", Read from HW: 0x%" PRIx64,
+ (uint64_t)(uintptr_t)job_ring->output_ring,
+ caam_jr_dma_vtop(job_ring->output_ring),
+ hw_get_out_queue_base(job_ring));
+ return ret;
+}
+
+int
+hw_shutdown_job_ring(struct sec_job_ring_t *job_ring)
+{
+ unsigned int timeout = SEC_TIMEOUT;
+ uint32_t tmp = 0;
+ int usleep_interval = 10;
+
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return 0;
+ }
+
+ CAAM_JR_INFO("Resetting Job ring %p", job_ring);
+
+ /*
+ * Mask interrupts since we are going to poll
+ * for reset completion status
+ * Also, at POR, interrupts are ENABLED on a JR, thus
+ * this is the point where I can disable them without
+ * changing the code logic too much
+ */
+ caam_jr_disable_irqs(job_ring->irq_fd);
+
+ /* initiate flush (required prior to reset) */
+ SET_JR_REG(JRCR, job_ring, JR_REG_JRCR_VAL_RESET);
+
+ /* dummy read */
+ tmp = GET_JR_REG(JRCR, job_ring);
+
+ do {
+ tmp = GET_JR_REG(JRINT, job_ring);
+ usleep(usleep_interval);
+ } while (((tmp & JRINT_ERR_HALT_MASK) ==
+ JRINT_ERR_HALT_INPROGRESS) && --timeout);
+
+ CAAM_JR_INFO("JRINT is %x", tmp);
+ if ((tmp & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE ||
+ timeout == 0) {
+ CAAM_JR_ERR("0x%x, %d", tmp, timeout);
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return -1;
+ }
+
+ /* Initiate reset */
+ timeout = SEC_TIMEOUT;
+ SET_JR_REG(JRCR, job_ring, JR_REG_JRCR_VAL_RESET);
+
+ do {
+ tmp = GET_JR_REG(JRCR, job_ring);
+ usleep(usleep_interval);
+ } while ((tmp & JR_REG_JRCR_VAL_RESET) && --timeout);
+
+ CAAM_JR_DEBUG("JRCR is %x", tmp);
+ if (timeout == 0) {
+ CAAM_JR_ERR("Failed to reset hw job ring %p", job_ring);
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return -1;
+ }
+ /* unmask interrupts */
+ if (job_ring->jr_mode != SEC_NOTIFICATION_TYPE_POLL)
+ caam_jr_enable_irqs(job_ring->irq_fd);
+ return 0;
+
+}
+
+void
+hw_handle_job_ring_error(struct sec_job_ring_t *job_ring __rte_unused,
+ uint32_t error_code)
+{
+ union hw_error_code hw_err_code;
+
+ hw_err_code.error = error_code;
+ switch (hw_err_code.error_desc.value.ssrc) {
+ case SEC_HW_ERR_SSRC_NO_SRC:
+ ASSERT(hw_err_code.error_desc.no_status_src.res == 0);
+ CAAM_JR_ERR("No Status Source ");
+ break;
+ case SEC_HW_ERR_SSRC_CCB_ERR:
+ CAAM_JR_ERR("CCB Status Source");
+ hw_handle_ccb_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JMP_HALT_U:
+ CAAM_JR_ERR("Jump Halt User Status Source");
+ hw_handle_jmp_halt_user_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_DECO:
+ CAAM_JR_ERR("DECO Status Source");
+ hw_handle_deco_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JR:
+ CAAM_JR_ERR("Job Ring Status Source");
+ hw_handle_jr_err(hw_err_code);
+ break;
+ case SEC_HW_ERR_SSRC_JMP_HALT_COND:
+ CAAM_JR_ERR("Jump Halt Condition Codes");
+ hw_handle_jmp_halt_cond_err(hw_err_code);
+ break;
+ default:
+ ASSERT(0);
+ CAAM_JR_ERR("Unknown SSRC");
+ break;
+ }
+}
+
+void
+hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code)
+{
+ switch (code) {
+ case JRINT_ERR_WRITE_STATUS:
+ CAAM_JR_ERR("Error writing status to Output Ring ");
+ break;
+ case JRINT_ERR_BAD_INPUT_BASE:
+ CAAM_JR_ERR(
+ "Bad Input Ring Base (%p) (not on a 4-byte boundary) ",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_BAD_OUTPUT_BASE:
+ CAAM_JR_ERR(
+ "Bad Output Ring Base (%p) (not on a 4-byte boundary) ",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_WRITE_2_IRBA:
+ CAAM_JR_ERR(
+ "Invalid write to Input Ring Base Address Register ");
+ break;
+ case JRINT_ERR_WRITE_2_ORBA:
+ CAAM_JR_ERR(
+ "Invalid write to Output Ring Base Address Register ");
+ break;
+ case JRINT_ERR_RES_B4_HALT:
+ CAAM_JR_ERR(
+ "Job Ring [%p] released before Job Ring is halted",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_REM_TOO_MANY:
+ CAAM_JR_ERR("Removed too many jobs from job ring [%p]",
+ (void *)job_ring);
+ break;
+ case JRINT_ERR_ADD_TOO_MANY:
+ CAAM_JR_ERR("Added too many jobs on job ring [%p]", job_ring);
+ break;
+ default:
+ CAAM_JR_ERR(" Unknown SEC JR Error :%d",
+ code);
+ break;
+ }
+}
+
+int
+hw_job_ring_set_coalescing_param(struct sec_job_ring_t *job_ring,
+ uint16_t irq_coalescing_timer,
+ uint8_t irq_coalescing_count)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+ /* Set descriptor count coalescing */
+ reg_val |= (irq_coalescing_count << JR_REG_JRCFG_LO_ICDCT_SHIFT);
+
+ /* Set coalescing timer value */
+ reg_val |= (irq_coalescing_timer << JR_REG_JRCFG_LO_ICTT_SHIFT);
+
+ /* Update parameters in HW */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+ CAAM_JR_DEBUG("Set coalescing params on jr %p timer:%d, desc count: %d",
+ job_ring, irq_coalescing_timer, irq_coalescing_timer);
+
+ return 0;
+}
+
+int
+hw_job_ring_enable_coalescing(struct sec_job_ring_t *job_ring)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+
+ /* Get the current value of the register */
+ reg_val = GET_JR_REG_LO(JRCFG, job_ring);
+
+ /* Enable coalescing */
+ reg_val |= JR_REG_JRCFG_LO_ICEN_EN;
+
+ /* Write in hw */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+
+ CAAM_JR_DEBUG("Enabled coalescing on jr %p ",
+ job_ring);
+
+ return 0;
+}
+
+int
+hw_job_ring_disable_coalescing(struct sec_job_ring_t *job_ring)
+{
+ uint32_t reg_val = 0;
+
+ ASSERT(job_ring != NULL);
+
+ if (job_ring->register_base_addr == NULL) {
+ CAAM_JR_ERR("Jr[%p] has reg base addr as NULL.driver not init",
+ job_ring);
+ return -1;
+ }
+
+ /* Get the current value of the register */
+ reg_val = GET_JR_REG_LO(JRCFG, job_ring);
+
+ /* Disable coalescing */
+ reg_val &= ~JR_REG_JRCFG_LO_ICEN_EN;
+
+ /* Write in hw */
+ SET_JR_REG_LO(JRCFG, job_ring, reg_val);
+ CAAM_JR_DEBUG("Disabled coalescing on jr %p ", job_ring);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam_jr/caam_jr_hw_specific.h b/drivers/crypto/caam_jr/caam_jr_hw_specific.h
new file mode 100644
index 00000000..5f58a585
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_hw_specific.h
@@ -0,0 +1,503 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017 NXP
+ */
+
+#ifndef CAAM_JR_HW_SPECIFIC_H
+#define CAAM_JR_HW_SPECIFIC_H
+
+#include <caam_jr_config.h>
+
+/*
+ * Offset to the registers of a job ring.
+ * Is different for each job ring.
+ */
+#define CHAN_BASE(jr) ((size_t)(jr)->register_base_addr)
+
+#define SEC_JOB_RING_IS_FULL(pi, ci, ring_max_size, ring_threshold) \
+ ((((pi) + 1 + ((ring_max_size) - (ring_threshold))) & \
+ (ring_max_size - 1)) == ((ci)))
+
+#define SEC_CIRCULAR_COUNTER(x, max) (((x) + 1) & (max - 1))
+
+/*
+ * Assert that cond is true. If !cond is true, display str and the vararg list
+ * in a printf-like syntax. also, if !cond is true, return altRet.
+ *
+ * \param cond A boolean expression to be asserted true
+ * \param altRet The value to be returned if cond doesn't hold true
+ * \param str A quoted char string
+ *
+ * E.g.:
+ * SEC_ASSERT(ret > 0, 0, "ERROR initializing app: code = %d\n", ret);
+ */
+#define SEC_ASSERT(cond, altRet, ...) do {\
+ if (unlikely(!(cond))) {\
+ CAAM_JR_ERR(__VA_ARGS__); \
+ return altRet; \
+ } \
+} while (0)
+
+#define SEC_DP_ASSERT(cond, altRet, ...) do {\
+ if (unlikely(!(cond))) {\
+ CAAM_JR_DP_ERR(__VA_ARGS__); \
+ return altRet; \
+ } \
+} while (0)
+
+#define ASSERT(x)
+
+/*
+ * Constants representing various job ring registers
+ */
+#if CAAM_BYTE_ORDER == __BIG_ENDIAN
+#define JR_REG_IRBA_OFFSET 0x0000
+#define JR_REG_IRBA_OFFSET_LO 0x0004
+#else
+#define JR_REG_IRBA_OFFSET 0x0004
+#define JR_REG_IRBA_OFFSET_LO 0x0000
+#endif
+
+#define JR_REG_IRSR_OFFSET 0x000C
+#define JR_REG_IRSA_OFFSET 0x0014
+#define JR_REG_IRJA_OFFSET 0x001C
+
+#if CAAM_BYTE_ORDER == __BIG_ENDIAN
+#define JR_REG_ORBA_OFFSET 0x0020
+#define JR_REG_ORBA_OFFSET_LO 0x0024
+#else
+#define JR_REG_ORBA_OFFSET 0x0024
+#define JR_REG_ORBA_OFFSET_LO 0x0020
+#endif
+
+#define JR_REG_ORSR_OFFSET 0x002C
+#define JR_REG_ORJR_OFFSET 0x0034
+#define JR_REG_ORSFR_OFFSET 0x003C
+#define JR_REG_JROSR_OFFSET 0x0044
+#define JR_REG_JRINT_OFFSET 0x004C
+
+#define JR_REG_JRCFG_OFFSET 0x0050
+#define JR_REG_JRCFG_OFFSET_LO 0x0054
+
+#define JR_REG_IRRI_OFFSET 0x005C
+#define JR_REG_ORWI_OFFSET 0x0064
+#define JR_REG_JRCR_OFFSET 0x006C
+
+/*
+ * Constants for error handling on job ring
+ */
+#define JR_REG_JRINT_ERR_TYPE_SHIFT 8
+#define JR_REG_JRINT_ERR_ORWI_SHIFT 16
+#define JR_REG_JRINIT_JRE_SHIFT 1
+
+#define JRINT_JRE (1 << JR_REG_JRINIT_JRE_SHIFT)
+#define JRINT_ERR_WRITE_STATUS (1 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_INPUT_BASE (3 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_BAD_OUTPUT_BASE (4 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_IRBA (5 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_WRITE_2_ORBA (6 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_RES_B4_HALT (7 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_REM_TOO_MANY (8 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_ADD_TOO_MANY (9 << JR_REG_JRINT_ERR_TYPE_SHIFT)
+#define JRINT_ERR_HALT_MASK 0x0C
+#define JRINT_ERR_HALT_INPROGRESS 0x04
+#define JRINT_ERR_HALT_COMPLETE 0x08
+
+#define JR_REG_JRCR_VAL_RESET 0x00000001
+
+#define JR_REG_JRCFG_LO_ICTT_SHIFT 0x10
+#define JR_REG_JRCFG_LO_ICDCT_SHIFT 0x08
+#define JR_REG_JRCFG_LO_ICEN_EN 0x02
+
+/*
+ * Constants for Descriptor Processing errors
+ */
+#define SEC_HW_ERR_SSRC_NO_SRC 0x00
+#define SEC_HW_ERR_SSRC_CCB_ERR 0x02
+#define SEC_HW_ERR_SSRC_JMP_HALT_U 0x03
+#define SEC_HW_ERR_SSRC_DECO 0x04
+#define SEC_HW_ERR_SSRC_JR 0x06
+#define SEC_HW_ERR_SSRC_JMP_HALT_COND 0x07
+
+#define SEC_HW_ERR_DECO_HFN_THRESHOLD 0xF1
+#define SEC_HW_ERR_CCB_ICV_CHECK_FAIL 0x0A
+
+/*
+ * Constants for descriptors
+ */
+/* Return higher 32 bits of physical address */
+#define PHYS_ADDR_HI(phys_addr) \
+ (uint32_t)(((uint64_t)phys_addr) >> 32)
+
+/* Return lower 32 bits of physical address */
+#define PHYS_ADDR_LO(phys_addr) \
+ (uint32_t)(((uint64_t)phys_addr) & 0xFFFFFFFF)
+
+/*
+ * Macros for extracting error codes for the job ring
+ */
+#define JR_REG_JRINT_ERR_TYPE_EXTRACT(value) ((value) & 0x00000F00)
+#define JR_REG_JRINT_ERR_ORWI_EXTRACT(value) \
+ (((value) & 0x3FFF0000) >> JR_REG_JRINT_ERR_ORWI_SHIFT)
+#define JR_REG_JRINT_JRE_EXTRACT(value) ((value) & JRINT_JRE)
+
+/*
+ * Macros for managing the job ring
+ */
+/* Read pointer to job ring input ring start address */
+#if defined(RTE_ARCH_ARM64)
+#define hw_get_inp_queue_base(jr) ((((dma_addr_t)GET_JR_REG(IRBA, \
+ (jr))) << 32) | \
+ (GET_JR_REG_LO(IRBA, (jr))))
+
+/* Read pointer to job ring output ring start address */
+#define hw_get_out_queue_base(jr) (((dma_addr_t)(GET_JR_REG(ORBA, \
+ (jr))) << 32) | \
+ (GET_JR_REG_LO(ORBA, (jr))))
+#else
+#define hw_get_inp_queue_base(jr) ((dma_addr_t)(GET_JR_REG_LO(IRBA, (jr))))
+
+#define hw_get_out_queue_base(jr) ((dma_addr_t)(GET_JR_REG_LO(ORBA, (jr))))
+#endif
+
+/*
+ * IRJA - Input Ring Jobs Added Register shows
+ * how many new jobs were added to the Input Ring.
+ */
+#define hw_enqueue_desc_on_job_ring(job_ring) SET_JR_REG(IRJA, (job_ring), 1)
+
+#define hw_set_input_ring_size(job_ring, size) SET_JR_REG(IRSR, job_ring, \
+ (size))
+
+#define hw_set_output_ring_size(job_ring, size) SET_JR_REG(ORSR, job_ring, \
+ (size))
+
+#if defined(RTE_ARCH_ARM64)
+#define hw_set_input_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(IRBA, job_ring, PHYS_ADDR_HI(start_addr)); \
+ SET_JR_REG_LO(IRBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#define hw_set_output_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(ORBA, job_ring, PHYS_ADDR_HI(start_addr)); \
+ SET_JR_REG_LO(ORBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#else
+#define hw_set_input_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(IRBA, job_ring, 0); \
+ SET_JR_REG_LO(IRBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+
+#define hw_set_output_ring_start_addr(job_ring, start_addr) \
+{ \
+ SET_JR_REG(ORBA, job_ring, 0); \
+ SET_JR_REG_LO(ORBA, job_ring, PHYS_ADDR_LO(start_addr));\
+}
+#endif
+
+/* ORJR - Output Ring Jobs Removed Register shows how many jobs were
+ * removed from the Output Ring for processing by software. This is done after
+ * the software has processed the entries.
+ */
+#define hw_remove_entries(jr, no_entries) SET_JR_REG(ORJR, (jr), (no_entries))
+
+/* IRSA - Input Ring Slots Available register holds the number of entries in
+ * the Job Ring's input ring. Once a job is enqueued, the value returned is
+ * decremented by the hardware by the number of jobs enqueued.
+ */
+#define hw_get_available_slots(jr) GET_JR_REG(IRSA, jr)
+
+/* ORSFR - Output Ring Slots Full register holds the number of jobs which were
+ * processed by the SEC and can be retrieved by the software. Once a job has
+ * been processed by software, the user will call hw_remove_one_entry in order
+ * to notify the SEC that the entry was processed.
+ */
+#define hw_get_no_finished_jobs(jr) GET_JR_REG(ORSFR, jr)
+
+/*
+ * Macros for manipulating JR registers
+ */
+#if CORE_BYTE_ORDER == CAAM_BYTE_ORDER
+#define sec_read_32(addr) (*(volatile unsigned int *)(addr))
+#define sec_write_32(addr, val) (*(volatile unsigned int *)(addr) = (val))
+
+#else
+#define sec_read_32(addr) rte_bswap32((*(volatile unsigned int *)(addr)))
+#define sec_write_32(addr, val) \
+ (*(volatile unsigned int *)(addr) = rte_bswap32(val))
+#endif
+
+#if CAAM_BYTE_ORDER == __LITTLE_ENDIAN
+#define sec_read_64(addr) (((u64)sec_read_32((u32 *)(addr) + 1) << 32) | \
+ (sec_read_32((u32 *)(addr))))
+
+#define sec_write_64(addr, val) { \
+ sec_write_32((u32 *)(addr) + 1, (u32)((val) >> 32)); \
+ sec_write_32((u32 *)(addr), (u32)(val)); \
+}
+#else /* CAAM_BYTE_ORDER == __BIG_ENDIAN */
+#define sec_read_64(addr) (((u64)sec_read_32((u32 *)(addr)) << 32) | \
+ (sec_read_32((u32 *)(addr) + 1)))
+
+#define sec_write_64(addr, val) { \
+ sec_write_32((u32 *)(addr), (u32)((val) >> 32)); \
+ sec_write_32((u32 *)(addr) + 1, (u32)(val)); \
+}
+#endif
+
+#if defined(RTE_ARCH_ARM64)
+#define sec_read_addr(a) sec_read_64((a))
+#define sec_write_addr(a, v) sec_write_64((a), (v))
+#else
+#define sec_read_addr(a) sec_read_32((a))
+#define sec_write_addr(a, v) sec_write_32((a), (v))
+#endif
+
+#define JR_REG(name, jr) (CHAN_BASE(jr) + JR_REG_##name##_OFFSET)
+#define JR_REG_LO(name, jr) (CHAN_BASE(jr) + JR_REG_##name##_OFFSET_LO)
+
+#define GET_JR_REG(name, jr) (sec_read_32(JR_REG(name, (jr))))
+#define GET_JR_REG_LO(name, jr) (sec_read_32(JR_REG_LO(name, (jr))))
+
+#define SET_JR_REG(name, jr, value) \
+ (sec_write_32(JR_REG(name, (jr)), value))
+#define SET_JR_REG_LO(name, jr, value) \
+ (sec_write_32(JR_REG_LO(name, (jr)), value))
+
+/* Lists the possible states for a job ring. */
+typedef enum sec_job_ring_state_e {
+ SEC_JOB_RING_STATE_STARTED, /* Job ring is initialized */
+ SEC_JOB_RING_STATE_RESET, /* Job ring reset is in progress */
+} sec_job_ring_state_t;
+
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __rte_packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __rte_packed lo;
+ } __rte_packed sh_hdr;
+
+ uint32_t sh_desc[SEC_JOB_DESCRIPTOR_SIZE];
+};
+
+struct caam_jr_qp {
+ struct sec_job_ring_t *ring;
+ uint64_t rx_pkts;
+ uint64_t rx_errs;
+ uint64_t rx_poll_err;
+ uint64_t tx_pkts;
+ uint64_t tx_errs;
+ uint64_t tx_ring_full;
+};
+
+struct sec_job_ring_t {
+ /* TODO: Add wrapper macro to make it obvious this is the consumer index
+ * on the output ring
+ */
+ uint32_t cidx; /* Consumer index for job ring (jobs array).
+ * @note: cidx and pidx are accessed from
+ * different threads. Place the cidx and pidx
+ * inside the structure so that they lay on
+ * different cachelines, to avoid false sharing
+ * between threads when the threads run on
+ * different cores!
+ */
+ /* TODO: Add wrapper macro to make it obvious this is the producer index
+ * on the input ring
+ */
+ uint32_t pidx; /* Producer index for job ring (jobs array) */
+
+ phys_addr_t *input_ring;/* Ring of output descriptors received from SEC.
+ * Size of array is power of 2 to allow fast
+ * update of producer/consumer indexes with
+ * bitwise operations.
+ */
+
+ struct sec_outring_entry *output_ring;
+ /* Ring of output descriptors received from SEC.
+ * Size of array is power of 2 to allow fast
+ * update of producer/consumer indexes with
+ * bitwise operations.
+ */
+
+ uint32_t irq_fd; /* The file descriptor used for polling from
+ * user space for interrupts notifications
+ */
+ uint32_t jr_mode; /* Model used by SEC Driver to receive
+ * notifications from SEC. Can be either
+ * of the three: #SEC_NOTIFICATION_TYPE_NAPI
+ * #SEC_NOTIFICATION_TYPE_IRQ or
+ * #SEC_NOTIFICATION_TYPE_POLL
+ */
+ uint32_t napi_mode; /* Job ring mode if NAPI mode is chosen
+ * Used only when jr_mode is set to
+ * #SEC_NOTIFICATION_TYPE_NAPI
+ */
+ void *register_base_addr; /* Base address for SEC's
+ * register memory for this job ring.
+ */
+ uint8_t coalescing_en; /* notifies if coelescing is
+ * enabled for the job ring
+ */
+ sec_job_ring_state_t jr_state; /* The state of this job ring */
+
+ struct rte_mempool *ctx_pool; /* per dev mempool for caam_jr_op_ctx */
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+ struct caam_jr_qp qps[RTE_CAAM_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+};
+
+/* Union describing the possible error codes that
+ * can be set in the descriptor status word
+ */
+union hw_error_code {
+ uint32_t error;
+ union {
+ struct {
+ uint32_t ssrc:4;
+ uint32_t ssed_val:28;
+ } __rte_packed value;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t res:28;
+ } __rte_packed no_status_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t cha_id:4;
+ uint32_t err_id:4;
+ } __rte_packed ccb_status_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t offset:8;
+ } __rte_packed jmp_halt_user_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t desc_err:8;
+ } __rte_packed deco_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t res:17;
+ uint32_t naddr:3;
+ uint32_t desc_err:8;
+ } __rte_packed jr_src;
+ struct {
+ uint32_t ssrc:4;
+ uint32_t jmp:1;
+ uint32_t res:11;
+ uint32_t desc_idx:8;
+ uint32_t cond:8;
+ } __rte_packed jmp_halt_cond_src;
+ } __rte_packed error_desc;
+} __rte_packed;
+
+/* @brief Initialize a job ring/channel in SEC device.
+ * Write configuration register/s to properly initialize a job ring.
+ *
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval other for error
+ */
+int hw_reset_job_ring(struct sec_job_ring_t *job_ring);
+
+/* @brief Reset a job ring/channel in SEC device.
+ * Write configuration register/s to reset a job ring.
+ *
+ * @param [in] job_ring The job ring
+ *
+ * @retval 0 for success
+ * @retval -1 in case job ring reset failed
+ */
+int hw_shutdown_job_ring(struct sec_job_ring_t *job_ring);
+
+/* @brief Handle a job ring/channel error in SEC device.
+ * Identify the error type and clear error bits if required.
+ *
+ * @param [in] job_ring The job ring
+ * @param [in] sec_error_code The job ring's error code
+ */
+void hw_handle_job_ring_error(struct sec_job_ring_t *job_ring,
+ uint32_t sec_error_code);
+
+/* @brief Handle a job ring error in the device.
+ * Identify the error type and printout a explanatory
+ * messages.
+ *
+ * @param [in] job_ring The job ring
+ *
+ */
+void hw_job_ring_error_print(struct sec_job_ring_t *job_ring, int code);
+
+/* @brief Set interrupt coalescing parameters on the Job Ring.
+ * @param [in] job_ring The job ring
+ * @param [in] irq_coalesing_timer Interrupt coalescing timer threshold.
+ * This value determines the maximum
+ * amount of time after processing a
+ * descriptor before raising an interrupt.
+ * @param [in] irq_coalescing_count Interrupt coalescing descriptor count
+ * threshold.
+ */
+int hw_job_ring_set_coalescing_param(struct sec_job_ring_t *job_ring,
+ uint16_t irq_coalescing_timer,
+ uint8_t irq_coalescing_count);
+
+/* @brief Enable interrupt coalescing on a job ring
+ * @param [in] job_ring The job ring
+ */
+int hw_job_ring_enable_coalescing(struct sec_job_ring_t *job_ring);
+
+/* @brief Disable interrupt coalescing on a job ring
+ * @param [in] job_ring The job ring
+ */
+int hw_job_ring_disable_coalescing(struct sec_job_ring_t *job_ring);
+
+#endif /* CAAM_JR_HW_SPECIFIC_H */
diff --git a/drivers/crypto/caam_jr/caam_jr_log.h b/drivers/crypto/caam_jr/caam_jr_log.h
new file mode 100644
index 00000000..106ff07a
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_log.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAM_JR_LOG_H_
+#define _CAAM_JR_LOG_H_
+
+#include <rte_log.h>
+
+extern int caam_jr_logtype;
+
+#define CAAM_JR_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, caam_jr_logtype, "caam_jr: " \
+ fmt "\n", ##args)
+
+#define CAAM_JR_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, caam_jr_logtype, "caam_jr: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() CAAM_JR_DEBUG(" >>")
+
+#define CAAM_JR_INFO(fmt, args...) \
+ CAAM_JR_LOG(INFO, fmt, ## args)
+#define CAAM_JR_ERR(fmt, args...) \
+ CAAM_JR_LOG(ERR, fmt, ## args)
+#define CAAM_JR_WARN(fmt, args...) \
+ CAAM_JR_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define CAAM_JR_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt "\n", ## args)
+
+#define CAAM_JR_DP_DEBUG(fmt, args...) \
+ CAAM_JR_DP_LOG(DEBUG, fmt, ## args)
+#define CAAM_JR_DP_INFO(fmt, args...) \
+ CAAM_JR_DP_LOG(INFO, fmt, ## args)
+#define CAAM_JR_DP_WARN(fmt, args...) \
+ CAAM_JR_DP_LOG(WARNING, fmt, ## args)
+#define CAAM_JR_DP_ERR(fmt, args...) \
+ CAAM_JR_DP_LOG(ERR, fmt, ## args)
+
+#endif /* _CAAM_JR_LOG_H_ */
diff --git a/drivers/crypto/caam_jr/caam_jr_pvt.h b/drivers/crypto/caam_jr/caam_jr_pvt.h
new file mode 100644
index 00000000..9f1adabc
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_pvt.h
@@ -0,0 +1,291 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef CAAM_JR_PVT_H
+#define CAAM_JR_PVT_H
+
+#include <hw/desc/ipsec.h>
+
+/* NXP CAAM JR PMD device name */
+
+#define CAAM_JR_ALG_UNSUPPORT (-1)
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+#define CAAM_JOB_DESC_SIZE 13
+
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_CACHE_SIZE 512
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+#define JR_MAX_NB_MAX_DIGEST 32
+
+#define RTE_CAAM_JR_PMD_MAX_NB_SESSIONS 2048
+
+
+/* Return codes for SEC user space driver APIs */
+enum sec_return_code_e {
+ SEC_SUCCESS = 0, /* Operation executed successfully.*/
+ SEC_INVALID_INPUT_PARAM, /* API received an invalid input
+ * parameter
+ */
+ SEC_OUT_OF_MEMORY, /* Memory allocation failed. */
+ SEC_DESCRIPTOR_IN_FLIGHT, /* API function indicates there are
+ * descriptors in flight
+ * for SEC to process.
+ */
+ SEC_LAST_DESCRIPTOR_IN_FLIGHT, /* API function indicates there is one
+ * last descriptor in flight
+ * for SEC to process that.
+ */
+ SEC_PROCESSING_ERROR, /* Indicates a SEC processing error
+ * occurred on a Job Ring which requires
+ * a SEC user space driver shutdown. Can
+ * be returned from sec_poll_job_ring().
+ * Then the only other API that can be
+ * called after this error is
+ * sec_release().
+ */
+ SEC_DESC_PROCESSING_ERROR, /* Indicates a SEC descriptor processing
+ * error occurred on a Job Ring. Can be
+ * returned from sec_poll_job_ring().
+ * The driver was able to reset job ring
+ * and job ring can be used like in a
+ * normal case.
+ */
+ SEC_JR_IS_FULL, /* Job Ring is full. There is no more
+ * room in the JR for new descriptors.
+ * This can happen if the descriptor RX
+ * rate is higher than SEC's capacity.
+ */
+ SEC_DRIVER_RELEASE_IN_PROGRESS, /* SEC driver shutdown is in progress,
+ * descriptors processing or polling is
+ * allowed.
+ */
+ SEC_DRIVER_ALREADY_INITIALIZED, /* SEC driver is already initialized.*/
+ SEC_DRIVER_NOT_INITIALIZED, /* SEC driver is NOT initialized. */
+ SEC_JOB_RING_RESET_IN_PROGRESS, /* Job ring is resetting due to a
+ * per-descriptor SEC processing error
+ * ::SEC_desc_PROCESSING_ERROR. Reset is
+ * finished when sec_poll_job_ring()
+ * return. Then the job ring can be used
+ * again.
+ */
+ SEC_RESET_ENGINE_FAILED, /* Resetting of SEC Engine by SEC Kernel
+ * Driver Failed
+ */
+ SEC_ENABLE_IRQS_FAILED, /* Enabling of IRQs in SEC Kernel Driver
+ * Failed
+ */
+ SEC_DISABLE_IRQS_FAILED, /* Disabling of IRQs in SEC Kernel
+ * Driver Failed
+ */
+ /* END OF VALID VALUES */
+
+ SEC_RETURN_CODE_MAX_VALUE, /* Invalid value for return code. It is
+ * used to mark the end of the return
+ * code values. @note ALL new return
+ * code values MUST be added before
+ * ::SEC_RETURN_CODE_MAX_VALUE!
+ */
+};
+
+enum caam_jr_op_type {
+ CAAM_JR_NONE, /* No Cipher operations*/
+ CAAM_JR_CIPHER,/* CIPHER operations */
+ CAAM_JR_AUTH, /* Authentication Operations */
+ CAAM_JR_AEAD, /* Authenticated Encryption with associated data */
+ CAAM_JR_IPSEC, /* IPSEC protocol operations*/
+ CAAM_JR_PDCP, /* PDCP protocol operations*/
+ CAAM_JR_PKC, /* Public Key Cryptographic Operations */
+ CAAM_JR_MAX
+};
+
+struct caam_jr_session {
+ uint8_t dir; /* Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /* Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /* Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /* AEAD Algorithm*/
+ enum rte_security_session_protocol proto_alg; /* Security Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /* pointer to key data */
+ size_t length; /* key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /* Initialisation vector parameters */
+ uint16_t auth_only_len; /* Length of data for Auth only */
+ uint32_t digest_length;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ip ip4_hdr;
+ struct ipsec_decap_pdb decap_pdb;
+ struct caam_jr_qp *qp;
+ struct sec_cdb *cdb; /* cmd block associated with qp */
+ struct rte_mempool *ctx_pool; /* session mempool for caam_jr_op_ctx */
+};
+
+/*
+ * 16-byte hardware scatter/gather table
+ */
+
+#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_BPID_MASK 0x000000ff
+#define SEC4_SG_BPID_SHIFT 16
+#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
+#define SEC4_SG_OFFSET_MASK 0x00001fff
+
+struct sec4_sg_entry {
+ uint64_t ptr;
+ uint32_t len;
+ uint32_t bpid_offset;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+
+/* Structure encompassing a job descriptor which is to be processed
+ * by SEC. User should also initialise this structure with the callback
+ * function pointer which will be called by driver after recieving proccessed
+ * descriptor from SEC. User data is also passed in this data structure which
+ * will be sent as an argument to the user callback function.
+ */
+struct job_descriptor {
+ uint32_t desc[CAAM_JOB_DESC_SIZE];
+};
+
+struct caam_jr_op_ctx {
+ struct job_descriptor jobdes;
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct sec4_sg_entry sg[MAX_SG_ENTRIES];
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for caam_jr_op_ctx */
+ int64_t vtop_offset;
+ uint8_t digest[JR_MAX_NB_MAX_DIGEST];
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+struct uio_job_ring {
+ uint32_t jr_id;
+ uint32_t uio_fd;
+ void *register_base_addr;
+ int map_size;
+ int uio_minor_number;
+};
+
+int sec_cleanup(void);
+int sec_configure(void);
+struct uio_job_ring *config_job_ring(void);
+void free_job_ring(uint32_t uio_fd);
+
+/* For Dma memory allocation of specified length and alignment */
+static inline void *
+caam_jr_dma_mem_alloc(size_t align, size_t len)
+{
+ return rte_malloc("mem_alloc", len, align);
+}
+
+/* For freeing dma memory */
+static inline void
+caam_jr_dma_free(void *ptr)
+{
+ rte_free(ptr);
+}
+
+static inline rte_iova_t
+caam_jr_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *ms;
+
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
+}
+
+static inline void *
+caam_jr_dma_ptov(rte_iova_t paddr)
+{
+ return rte_mem_iova2virt(paddr);
+}
+
+/* Virtual to physical address conversion */
+static inline rte_iova_t caam_jr_dma_vtop(void *ptr)
+{
+ return caam_jr_mem_vtop(ptr);
+}
+
+/** @brief Request to SEC kernel driver to enable interrupts for
+ * descriptor finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ enable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd Job Ring UIO File descriptor
+ * @retval 0 for success
+ * @retval -1 value for error
+ */
+uint32_t caam_jr_enable_irqs(uint32_t uio_fd);
+
+/** @brief Request to SEC kernel driver to disable interrupts for descriptor
+ * finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ disable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd UIO File descripto
+ * @retval 0 for success
+ * @retval -1 value for error
+ *
+ */
+uint32_t caam_jr_disable_irqs(uint32_t uio_fd);
+
+#endif
diff --git a/drivers/crypto/caam_jr/caam_jr_uio.c b/drivers/crypto/caam_jr/caam_jr_uio.c
new file mode 100644
index 00000000..c07d9db0
--- /dev/null
+++ b/drivers/crypto/caam_jr/caam_jr_uio.c
@@ -0,0 +1,501 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2017-2018 NXP
+ */
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <dirent.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_crypto.h>
+#include <rte_security.h>
+
+#include <caam_jr_config.h>
+#include <caam_jr_hw_specific.h>
+#include <caam_jr_pvt.h>
+#include <caam_jr_log.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+/* Prefix path to sysfs directory where UIO device attributes are exported.
+ * Path for UIO device X is /sys/class/uio/uioX
+ */
+#define SEC_UIO_DEVICE_SYS_ATTR_PATH "/sys/class/uio"
+
+/* Subfolder in sysfs where mapping attributes are exported
+ * for each UIO device. Path for mapping Y for device X is:
+ * /sys/class/uio/uioX/maps/mapY
+ */
+#define SEC_UIO_DEVICE_SYS_MAP_ATTR "maps/map"
+
+/* Name of UIO device file prefix. Each UIO device will have a device file
+ * /dev/uioX, where X is the minor device number.
+ */
+#define SEC_UIO_DEVICE_FILE_NAME "/dev/uio"
+
+/*
+ * Name of UIO device. Each user space SEC job ring will have a corresponding
+ * UIO device with the name sec-channelX, where X is the job ring id.
+ * Maximum length is #SEC_UIO_MAX_DEVICE_NAME_LENGTH.
+ *
+ * @note Must be kept in synch with SEC kernel driver
+ * define #SEC_UIO_DEVICE_NAME !
+ */
+#define SEC_UIO_DEVICE_NAME "fsl-jr"
+
+/* Maximum length for the name of an UIO device file.
+ * Device file name format is: /dev/uioX.
+ */
+#define SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH 30
+
+/* Maximum length for the name of an attribute file for an UIO device.
+ * Attribute files are exported in sysfs and have the name formatted as:
+ * /sys/class/uio/uioX/<attribute_file_name>
+ */
+#define SEC_UIO_MAX_ATTR_FILE_NAME 100
+
+/* Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to disable job DONE
+ * and error IRQs on a certain job ring.
+ * The configuration is done at SEC Controller's level.
+ * @note Need to be kept in synch with #SEC_UIO_DISABLE_IRQ_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_DISABLE_IRQ_CMD 0
+
+/* Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to enable job DONE
+ * and error IRQs on a certain job ring.
+ * The configuration is done at SEC Controller's level.
+ * @note Need to be kept in synch with #SEC_UIO_ENABLE_IRQ_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_ENABLE_IRQ_CMD 1
+
+/** Command that is used by SEC user space driver and SEC kernel driver
+ * to signal a request from the former to the later to do a SEC engine reset.
+ * @note Need to be kept in synch with #SEC_UIO_RESET_SEC_ENGINE_CMD from
+ * linux/drivers/crypto/talitos.c !
+ */
+#define SEC_UIO_RESET_SEC_ENGINE_CMD 3
+
+/* The id for the mapping used to export SEC's registers to
+ * user space through UIO devices.
+ */
+#define SEC_UIO_MAP_ID 0
+
+static struct uio_job_ring g_uio_job_ring[MAX_SEC_JOB_RINGS];
+static int g_uio_jr_num;
+
+/** @brief Checks if a file name contains a certain substring.
+ * If so, it extracts the number following the substring.
+ * This function assumes a filename format of: [text][number].
+ * @param [in] filename File name
+ * @param [in] match String to match in file name
+ * @param [out] number The number extracted from filename
+ *
+ * @retval true if file name matches the criteria
+ * @retval false if file name does not match the criteria
+ */
+static bool
+file_name_match_extract(const char filename[], const char match[], int *number)
+{
+ char *substr = NULL;
+
+ substr = strstr(filename, match);
+ if (substr == NULL)
+ return false;
+
+ /* substring <match> was found in <filename>
+ * read number following <match> substring in <filename>
+ */
+ if (sscanf(filename + strlen(match), "%d", number) <= 0)
+ return false;
+
+ return true;
+}
+
+/** @brief Reads first line from a file.
+ * Composes file name as: root/subdir/filename
+ *
+ * @param [in] root Root path
+ * @param [in] subdir Subdirectory name
+ * @param [in] filename File name
+ * @param [out] line The first line read from file.
+ *
+ * @retval 0 for succes
+ * @retval other value for error
+ */
+static int
+file_read_first_line(const char root[], const char subdir[],
+ const char filename[], char *line)
+{
+ char absolute_file_name[SEC_UIO_MAX_ATTR_FILE_NAME];
+ int fd = 0, ret = 0;
+
+ /*compose the file name: root/subdir/filename */
+ memset(absolute_file_name, 0, sizeof(absolute_file_name));
+ snprintf(absolute_file_name, SEC_UIO_MAX_ATTR_FILE_NAME,
+ "%s/%s/%s", root, subdir, filename);
+
+ fd = open(absolute_file_name, O_RDONLY);
+ SEC_ASSERT(fd > 0, fd, "Error opening file %s",
+ absolute_file_name);
+
+ /* read UIO device name from first line in file */
+ ret = read(fd, line, SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH);
+ close(fd);
+
+ /* NULL-ify string */
+ line[SEC_UIO_MAX_DEVICE_FILE_NAME_LENGTH - 1] = '\0';
+
+ if (ret <= 0) {
+ CAAM_JR_ERR("Error reading from file %s", absolute_file_name);
+ return ret;
+ }
+
+ return 0;
+}
+
+/** @brief Uses UIO control to send commands to SEC kernel driver.
+ * The mechanism is to write a command word into the file descriptor
+ * that the user-space driver obtained for each user-space SEC job ring.
+ * Both user-space driver and kernel driver must have the same understanding
+ * about the command codes.
+ *
+ * @param [in] UIO FD The UIO file descriptor
+ * @param [in] uio_command Command word
+ *
+ * @retval Result of write operation on the job ring's UIO file descriptor.
+ * Should be sizeof(int) for success operations.
+ * Other values can be returned and used, if desired to add special
+ * meaning to return values, but this has to be programmed in SEC
+ * kernel driver as well. No special return values are used.
+ */
+static int
+sec_uio_send_command(uint32_t uio_fd, int32_t uio_command)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver execute the desired command.
+ */
+ ret = write(uio_fd, &uio_command, sizeof(int));
+ return ret;
+}
+
+/** @brief Request to SEC kernel driver to enable interrupts for
+ * descriptor finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ enable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd Job Ring UIO File descriptor
+ * @retval 0 for success
+ * @retval -1 value for error
+ */
+uint32_t
+caam_jr_enable_irqs(uint32_t uio_fd)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver enable DONE and Error IRQs for this job ring,
+ * at Controller level.
+ */
+ ret = sec_uio_send_command(uio_fd, SEC_UIO_ENABLE_IRQ_CMD);
+ SEC_ASSERT(ret == sizeof(int), -1,
+ "Failed to request SEC engine to enable job done and "
+ "error IRQs through UIO control. UIO FD %d. Reset SEC driver!",
+ uio_fd);
+ CAAM_JR_DEBUG("Enabled IRQs on jr with uio_fd %d", uio_fd);
+ return 0;
+}
+
+
+/** @brief Request to SEC kernel driver to disable interrupts for descriptor
+ * finished processing
+ * Use UIO to communicate with SEC kernel driver: write command
+ * value that indicates an IRQ disable action into UIO file descriptor
+ * of this job ring.
+ *
+ * @param [in] uio_fd UIO File descripto
+ * @retval 0 for success
+ * @retval -1 value for error
+ *
+ */
+uint32_t
+caam_jr_disable_irqs(uint32_t uio_fd)
+{
+ int ret;
+
+ /* Use UIO file descriptor we have for this job ring.
+ * Writing a command code to this file descriptor will make the
+ * SEC kernel driver disable IRQs for this job ring,
+ * at Controller level.
+ */
+
+ ret = sec_uio_send_command(uio_fd, SEC_UIO_DISABLE_IRQ_CMD);
+ SEC_ASSERT(ret == sizeof(int), -1,
+ "Failed to request SEC engine to disable job done and "
+ "IRQs through UIO control. UIO_FD %d Reset SEC driver!",
+ uio_fd);
+ CAAM_JR_DEBUG("Disabled IRQs on jr with uio_fd %d", uio_fd);
+ return 0;
+}
+
+/** @brief Maps register range assigned for a job ring.
+ *
+ * @param [in] uio_device_fd UIO device file descriptor
+ * @param [in] uio_device_id UIO device id
+ * @param [in] uio_map_id UIO allows maximum 5 different mapping for
+ each device. Maps start with id 0.
+ * @param [out] map_size Map size.
+ * @retval NULL if failed to map registers
+ * @retval Virtual address for mapped register address range
+ */
+static void *
+uio_map_registers(int uio_device_fd, int uio_device_id,
+ int uio_map_id, int *map_size)
+{
+ void *mapped_address = NULL;
+ unsigned int uio_map_size = 0;
+ char uio_sys_root[SEC_UIO_MAX_ATTR_FILE_NAME];
+ char uio_sys_map_subdir[SEC_UIO_MAX_ATTR_FILE_NAME];
+ char uio_map_size_str[32];
+ int ret = 0;
+
+ /* compose the file name: root/subdir/filename */
+ memset(uio_sys_root, 0, sizeof(uio_sys_root));
+ memset(uio_sys_map_subdir, 0, sizeof(uio_sys_map_subdir));
+ memset(uio_map_size_str, 0, sizeof(uio_map_size_str));
+
+ /* Compose string: /sys/class/uio/uioX */
+ sprintf(uio_sys_root, "%s/%s%d", SEC_UIO_DEVICE_SYS_ATTR_PATH,
+ "uio", uio_device_id);
+ /* Compose string: maps/mapY */
+ sprintf(uio_sys_map_subdir, "%s%d", SEC_UIO_DEVICE_SYS_MAP_ATTR,
+ uio_map_id);
+
+ /* Read first (and only) line from file
+ * /sys/class/uio/uioX/maps/mapY/size
+ */
+ ret = file_read_first_line(uio_sys_root, uio_sys_map_subdir,
+ "size", uio_map_size_str);
+ SEC_ASSERT(ret == 0, NULL, "file_read_first_line() failed");
+
+ /* Read mapping size, expressed in hexa(base 16) */
+ uio_map_size = strtol(uio_map_size_str, NULL, 16);
+
+ /* Map the region in user space */
+ mapped_address = mmap(0, /*dynamically choose virtual address */
+ uio_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, uio_device_fd, 0);
+ /* offset = 0 because UIO device has only one mapping
+ * for the entire SEC register memory
+ */
+ if (mapped_address == MAP_FAILED) {
+ CAAM_JR_ERR(
+ "Failed to map registers! errno = %d job ring fd = %d,"
+ "uio device id = %d, uio map id = %d", errno,
+ uio_device_fd, uio_device_id, uio_map_id);
+ return NULL;
+ }
+
+ /*
+ * Save the map size to use it later on for munmap-ing.
+ */
+ *map_size = uio_map_size;
+
+ CAAM_JR_INFO("UIO dev[%d] mapped region [id =%d] size 0x%x at %p",
+ uio_device_id, uio_map_id, uio_map_size, mapped_address);
+
+ return mapped_address;
+}
+
+void
+free_job_ring(uint32_t uio_fd)
+{
+ struct uio_job_ring *job_ring = NULL;
+ int i;
+
+ if (!job_ring->uio_fd)
+ return;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_uio_job_ring[i].uio_fd == uio_fd) {
+ job_ring = &g_uio_job_ring[i];
+ break;
+ }
+ }
+
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("JR not available for fd = %x\n", uio_fd);
+ return;
+ }
+
+ /* Open device file */
+ CAAM_JR_INFO("Closed device file for job ring %d , fd = %d",
+ job_ring->jr_id, job_ring->uio_fd);
+ close(job_ring->uio_fd);
+ g_uio_jr_num--;
+ job_ring->uio_fd = 0;
+ if (job_ring->register_base_addr == NULL)
+ return;
+
+ /* Unmap the PCI memory resource of device */
+ if (munmap(job_ring->register_base_addr, job_ring->map_size)) {
+ CAAM_JR_INFO("cannot munmap(%p, 0x%lx): %s",
+ job_ring->register_base_addr,
+ (unsigned long)job_ring->map_size, strerror(errno));
+ } else
+ CAAM_JR_DEBUG(" JR UIO memory unmapped at %p",
+ job_ring->register_base_addr);
+ job_ring->register_base_addr = NULL;
+}
+
+struct
+uio_job_ring *config_job_ring(void)
+{
+ char uio_device_file_name[32];
+ struct uio_job_ring *job_ring = NULL;
+ int i;
+
+ for (i = 0; i < MAX_SEC_JOB_RINGS; i++) {
+ if (g_uio_job_ring[i].uio_fd == 0) {
+ job_ring = &g_uio_job_ring[i];
+ g_uio_jr_num++;
+ break;
+ }
+ }
+
+ if (job_ring == NULL) {
+ CAAM_JR_ERR("No free job ring\n");
+ return NULL;
+ }
+
+ /* Find UIO device created by SEC kernel driver for this job ring. */
+ memset(uio_device_file_name, 0, sizeof(uio_device_file_name));
+
+ sprintf(uio_device_file_name, "%s%d", SEC_UIO_DEVICE_FILE_NAME,
+ job_ring->uio_minor_number);
+
+ /* Open device file */
+ job_ring->uio_fd = open(uio_device_file_name, O_RDWR);
+ SEC_ASSERT(job_ring->uio_fd > 0, NULL,
+ "Failed to open UIO device file for job ring %d",
+ job_ring->jr_id);
+
+ CAAM_JR_INFO("Open device(%s) file for job ring=%d , uio_fd = %d",
+ uio_device_file_name, job_ring->jr_id, job_ring->uio_fd);
+
+ ASSERT(job_ring->register_base_addr == NULL);
+ job_ring->register_base_addr = uio_map_registers(
+ job_ring->uio_fd, job_ring->uio_minor_number,
+ SEC_UIO_MAP_ID, &job_ring->map_size);
+
+ SEC_ASSERT(job_ring->register_base_addr != NULL, NULL,
+ "Failed to map SEC registers");
+ return job_ring;
+}
+
+int
+sec_configure(void)
+{
+ char uio_name[32];
+ int config_jr_no = 0, jr_id = -1;
+ int uio_minor_number = -1;
+ int ret;
+ DIR *d = NULL;
+ struct dirent *dir;
+
+ d = opendir(SEC_UIO_DEVICE_SYS_ATTR_PATH);
+ if (d == NULL) {
+ printf("\nError opening directory '%s': %s\n",
+ SEC_UIO_DEVICE_SYS_ATTR_PATH, strerror(errno));
+ return -1;
+ }
+
+ /* Iterate through all subdirs */
+ while ((dir = readdir(d)) != NULL) {
+ if (!strncmp(dir->d_name, ".", 1) ||
+ !strncmp(dir->d_name, "..", 2))
+ continue;
+
+ if (file_name_match_extract
+ (dir->d_name, "uio", &uio_minor_number)) {
+ /*
+ * Open file uioX/name and read first line which contains
+ * the name for the device. Based on the name check if this
+ * UIO device is UIO device for job ring with id jr_id.
+ */
+ memset(uio_name, 0, sizeof(uio_name));
+ ret = file_read_first_line(SEC_UIO_DEVICE_SYS_ATTR_PATH,
+ dir->d_name, "name", uio_name);
+ CAAM_JR_INFO("sec device uio name: %s", uio_name);
+ SEC_ASSERT(ret == 0, -1, "file_read_first_line failed");
+
+ if (file_name_match_extract(uio_name,
+ SEC_UIO_DEVICE_NAME,
+ &jr_id)) {
+ g_uio_job_ring[config_jr_no].jr_id = jr_id;
+ g_uio_job_ring[config_jr_no].uio_minor_number =
+ uio_minor_number;
+ CAAM_JR_INFO("Detected logical JRID:%d", jr_id);
+ config_jr_no++;
+
+ /* todo find the actual ring id
+ * OF_FULLNAME=/soc/crypto@1700000/jr@20000
+ */
+ }
+ }
+ }
+ closedir(d);
+
+ if (config_jr_no == 0) {
+ CAAM_JR_ERR("! No SEC Job Rings assigned for userspace usage!");
+ return 0;
+ }
+ CAAM_JR_INFO("Total JR detected =%d", config_jr_no);
+ return config_jr_no;
+}
+
+int
+sec_cleanup(void)
+{
+ int i;
+ struct uio_job_ring *job_ring;
+
+ for (i = 0; i < g_uio_jr_num; i++) {
+ job_ring = &g_uio_job_ring[i];
+ /* munmap SEC's register memory */
+ if (job_ring->register_base_addr) {
+ munmap(job_ring->register_base_addr,
+ job_ring->map_size);
+ job_ring->register_base_addr = NULL;
+ }
+ /* I need to close the fd after shutdown UIO commands need to be
+ * sent using the fd
+ */
+ if (job_ring->uio_fd != 0) {
+ CAAM_JR_INFO(
+ "Closed device file for job ring %d , fd = %d",
+ job_ring->jr_id, job_ring->uio_fd);
+ close(job_ring->uio_fd);
+ }
+ }
+ return 0;
+}
diff --git a/drivers/crypto/caam_jr/meson.build b/drivers/crypto/caam_jr/meson.build
new file mode 100644
index 00000000..99b71aef
--- /dev/null
+++ b/drivers/crypto/caam_jr/meson.build
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_vdev', 'bus_dpaa', 'security']
+sources = files('caam_jr_capabilities.c',
+ 'caam_jr_hw.c',
+ 'caam_jr_uio.c',
+ 'caam_jr.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
+includes += include_directories('../../bus/dpaa/include/')
diff --git a/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map b/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/crypto/caam_jr/rte_pmd_caam_jr_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index da3d8f84..f537f76a 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -4,13 +4,6 @@
#
include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
-$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
-endif
-endif
-
#
# library name
#
@@ -20,7 +13,6 @@ LIB = librte_pmd_dpaa2_sec.a
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
@@ -41,7 +33,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
@@ -51,5 +43,6 @@ LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2a3c61c6..6095c602 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -10,7 +10,6 @@
#include <rte_mbuf.h>
#include <rte_cryptodev.h>
-#include <rte_security_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -24,10 +23,12 @@
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_mc_sys.h>
#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_event.h"
#include "dpaa2_sec_logs.h"
/* Required types */
@@ -35,6 +36,7 @@ typedef uint64_t dma_addr_t;
/* RTA header files */
#include <hw/desc/ipsec.h>
+#include <hw/desc/pdcp.h>
#include <hw/desc/algo.h>
/* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -62,11 +64,87 @@ static uint8_t cryptodev_driver_id;
int dpaa2_logtype_sec;
static inline int
+build_proto_compound_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *src_mbuf = sym_op->m_src;
+ struct rte_mbuf *dst_mbuf = sym_op->m_dst;
+ int retval;
+
+ if (!dst_mbuf)
+ dst_mbuf = src_mbuf;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* we are using the first FLE entry to store Mbuf */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("Memory alloc failed");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(op_fle, bpid);
+ DPAA2_SET_FLE_BPID(ip_fle, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(op_fle);
+ DPAA2_SET_FLE_IVP(ip_fle);
+ }
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+
+ /* Configure Output FLE with dst mbuf data */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
+ DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
+
+ /* Configure Input FLE with src mbuf data */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
+ DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
+
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+#ifdef ENABLE_HFN_OVERRIDE
+ if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+ /*enable HFN override override */
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
+ }
+#endif
+
+ return 0;
+
+}
+
+static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
struct rte_crypto_sym_op *sym_op = op->sym;
+ if (sym_op->m_dst)
+ return build_proto_compound_fd(sess, op, fd, bpid);
+
struct ctxt_priv *priv = sess->ctxt;
struct sec_flow_context *flc;
struct rte_mbuf *mbuf = sym_op->m_src;
@@ -1124,6 +1202,9 @@ build_sec_fd(struct rte_crypto_op *op,
case DPAA2_SEC_IPSEC:
ret = build_proto_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_PDCP:
+ ret = build_proto_compound_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_HASH_CIPHER:
default:
DPAA2_SEC_ERR("error: Unsupported session");
@@ -1145,6 +1226,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
struct qbman_swp *swp;
uint16_t num_tx = 0;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
@@ -1172,9 +1254,19 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
- frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+ frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*ops)->sym->m_src->seqn) {
+ uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
@@ -1191,7 +1283,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
- NULL,
+ &flags[loop],
frames_to_send - loop);
}
@@ -1216,6 +1308,9 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1226,9 +1321,6 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
else
mbuf->data_off += SEC_FLC_DHR_INBOUND;
- diff = len - mbuf->pkt_len;
- mbuf->pkt_len += diff;
- mbuf->data_len += diff;
return op;
}
@@ -1273,6 +1365,16 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ dpaa2_sec_session *sess = (dpaa2_sec_session *)
+ get_sec_session_private_data(op->sym->sec_session);
+ if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ dst->pkt_len = len;
+ dst->data_len = len;
+ }
+ }
+
DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
(void *)dst,
@@ -1321,8 +1423,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
- (nb_ops > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_ops);
+ (nb_ops > dpaa2_dqrr_size) ?
+ dpaa2_dqrr_size : nb_ops);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
@@ -2099,6 +2201,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
return -1;
}
+ memset(session, 0, sizeof(dpaa2_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -2139,107 +2242,127 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
- struct rte_security_session_conf *conf,
- void *sess)
+dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *aeaddata)
{
- struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
- dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
- struct ctxt_priv *priv;
- struct ipsec_encap_pdb encap_pdb;
- struct ipsec_decap_pdb decap_pdb;
- struct alginfo authdata, cipherdata;
- int bufsize;
- struct sec_flow_context *flc;
-
PMD_INIT_FUNC_TRACE();
- if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
- } else {
- auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for aead key");
+ return -1;
}
- priv = (struct ctxt_priv *)rte_zmalloc(NULL,
- sizeof(struct ctxt_priv) +
- sizeof(struct sec_flc_desc),
- RTE_CACHE_LINE_SIZE);
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
- if (priv == NULL) {
- DPAA2_SEC_ERR("No memory for priv CTXT");
- return -ENOMEM;
- }
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
- flc = &priv->flc_desc[0].flc;
+ aeaddata->key = (size_t)session->aead_key.data;
+ aeaddata->keylen = session->aead_key.length;
+ aeaddata->key_enc_flags = 0;
+ aeaddata->key_type = RTA_DATA_IMM;
- session->ctxt_type = DPAA2_SEC_IPSEC;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for cipher key");
- rte_free(priv);
- return -ENOMEM;
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_CCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ return -1;
}
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- rte_free(priv);
- return -ENOMEM;
+ return 0;
+}
+
+static int
+dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
+ struct rte_crypto_auth_xform *auth_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ }
+
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- authdata.key = (size_t)session->auth_key.data;
- authdata.keylen = session->auth_key.length;
- authdata.key_enc_flags = 0;
- authdata.key_type = RTA_DATA_IMM;
- switch (auth_xform->algo) {
+ authdata->key = (size_t)session->auth_key.data;
+ authdata->keylen = session->auth_key.length;
+ authdata->key_enc_flags = 0;
+ authdata->key_type = RTA_DATA_IMM;
+ switch (session->auth_alg) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_CMAC:
- authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
break;
case RTE_CRYPTO_AUTH_NULL:
- authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
- session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
@@ -2255,50 +2378,119 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
}
- cipherdata.key = (size_t)session->cipher_key.data;
- cipherdata.keylen = session->cipher_key.length;
- cipherdata.key_enc_flags = 0;
- cipherdata.key_type = RTA_DATA_IMM;
+ cipherdata->key = (size_t)session->cipher_key.data;
+ cipherdata->keylen = session->cipher_key.length;
+ cipherdata->key_enc_flags = 0;
+ cipherdata->key_type = RTA_DATA_IMM;
- switch (cipher_xform->algo) {
+ switch (session->cipher_alg) {
case RTE_CRYPTO_CIPHER_AES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_3DES;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_3DES;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
- cipherdata.algmode = OP_ALG_AAI_CTR;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata->algmode = OP_ALG_AAI_CTR;
break;
case RTE_CRYPTO_CIPHER_NULL:
- cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ cipherdata->algtype = OP_PCL_IPSEC_NULL;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
+ session->cipher_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
+ session->cipher_alg);
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_SECURITY_TEST
+static uint8_t aes_cbc_iv[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
+#endif
+
+static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ int bufsize;
+ struct sec_flow_context *flc;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ int ret = -1;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &conf->crypto_xform->auth;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &conf->crypto_xform->aead;
+ ret = dpaa2_sec_ipsec_aead_init(aead_xform,
+ session, &cipherdata);
+ } else {
+ DPAA2_SEC_ERR("XFORM not specified");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ret) {
+ DPAA2_SEC_ERR("Failed to process xform");
goto out;
}
+ session->ctxt_type = DPAA2_SEC_IPSEC;
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
struct ip ip4_hdr;
@@ -2310,7 +2502,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
ip4_hdr.ip_id = 0;
ip4_hdr.ip_off = 0;
ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
- ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_p = IPPROTO_ESP;
ip4_hdr.ip_sum = 0;
ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
@@ -2322,13 +2514,14 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
encap_pdb.spi = ipsec_xform->spi;
encap_pdb.ip_hdr_len = sizeof(struct ip);
session->dir = DIR_ENC;
bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
- 1, 0, &encap_pdb,
+ 1, 0, SHR_SERIAL, &encap_pdb,
(uint8_t *)&ip4_hdr,
&cipherdata, &authdata);
} else if (ipsec_xform->direction ==
@@ -2338,7 +2531,8 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
decap_pdb.options = sizeof(struct ip) << 16;
session->dir = DIR_DEC;
bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
- 1, 0, &decap_pdb, &cipherdata, &authdata);
+ 1, 0, SHR_SERIAL,
+ &decap_pdb, &cipherdata, &authdata);
} else
goto out;
@@ -2372,6 +2566,244 @@ out:
rte_free(session->auth_key.data);
rte_free(session->cipher_key.data);
rte_free(priv);
+ return ret;
+}
+
+static int
+dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
+ struct rte_crypto_sym_xform *xform = conf->crypto_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ int bufsize = -1;
+ struct sec_flow_context *flc;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = true;
+#else
+ int swap = false;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ /* find xfrm types */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ cipher_xform = &xform->cipher;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ } else {
+ DPAA2_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ session->ctxt_type = DPAA2_SEC_PDCP;
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->dir =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ session->dir = DIR_ENC;
+ }
+
+ session->pdcp.domain = pdcp_xform->domain;
+ session->pdcp.bearer = pdcp_xform->bearer;
+ session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
+ session->pdcp.sn_size = pdcp_xform->sn_size;
+#ifdef ENABLE_HFN_OVERRIDE
+ session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
+#endif
+ session->pdcp.hfn = pdcp_xform->hfn;
+ session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
+
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (session->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ session->cipher_alg);
+ goto out;
+ }
+
+ /* Auth is only applicable for control mode operation. */
+ if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+ if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
+ DPAA2_SEC_ERR(
+ "PDCP Seq Num size should be 5 bits for cmode");
+ goto out;
+ }
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ }
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (session->auth_alg) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ authdata.algtype = PDCP_AUTH_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ authdata.algtype = PDCP_AUTH_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = PDCP_AUTH_TYPE_AES;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = PDCP_AUTH_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ session->auth_alg);
+ goto out;
+ }
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ } else {
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ }
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
return -1;
}
@@ -2397,6 +2829,10 @@ dpaa2_sec_security_session_create(void *dev,
break;
case RTE_SECURITY_PROTOCOL_MACSEC:
return -ENOTSUP;
+ case RTE_SECURITY_PROTOCOL_PDCP:
+ ret = dpaa2_sec_set_pdcp_session(cdev, conf,
+ sess_private_data);
+ break;
default:
return -EINVAL;
}
@@ -2686,6 +3122,129 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
}
}
+static void __attribute__((hot))
+dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+static void
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+ struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ crypto_op->sym->m_src->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+}
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
+ qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
+ else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
+ qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+ else
+ return -EINVAL;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
+ cfg.dest_cfg.dest_id = dpcon_id;
+ cfg.dest_cfg.priority = event->priority;
+
+ cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (size_t)(qp);
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+ cfg.order_preservation_en = 1;
+ }
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int
+dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
+
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+
+ return ret;
+}
+
static struct rte_cryptodev_ops crypto_ops = {
.dev_configure = dpaa2_sec_dev_configure,
.dev_start = dpaa2_sec_dev_start,
@@ -2708,7 +3267,7 @@ dpaa2_sec_capabilities_get(void *device __rte_unused)
return dpaa2_sec_security_cap;
}
-struct rte_security_ops dpaa2_sec_security_ops = {
+static const struct rte_security_ops dpaa2_sec_security_ops = {
.session_create = dpaa2_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2843,7 +3402,7 @@ init_error:
}
static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2871,7 +3430,6 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
new file mode 100644
index 00000000..97709942
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ *
+ */
+
+#ifndef _DPAA2_SEC_EVENT_H_
+#define _DPAA2_SEC_EVENT_H_
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event);
+
+int dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id);
+
+#endif /* _DPAA2_SEC_EVENT_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index d015be1e..51751103 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -8,6 +8,8 @@
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#include <rte_security_driver.h>
+
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
/**< NXP DPAA2 - SEC PMD device name */
@@ -135,6 +137,19 @@ struct dpaa2_sec_aead_ctxt {
uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
};
+/*
+ * The structure is to be filled by user for PDCP Protocol
+ */
+struct dpaa2_pdcp_ctxt {
+ enum rte_security_pdcp_domain domain; /*!< Data/Control mode*/
+ int8_t bearer; /*!< PDCP bearer ID */
+ int8_t pkt_dir;/*!< PDCP Frame Direction 0:UL 1:DL*/
+ int8_t hfn_ovd;/*!< Overwrite HFN per packet*/
+ uint32_t hfn; /*!< Hyper Frame Number */
+ uint32_t hfn_threshold; /*!< HFN Threashold for key renegotiation */
+ uint8_t sn_size; /*!< Sequence number size, 7/12/15 */
+};
+
typedef struct dpaa2_sec_session_entry {
void *ctxt;
uint8_t ctxt_type;
@@ -158,15 +173,20 @@ typedef struct dpaa2_sec_session_entry {
} auth_key;
};
};
- struct {
- uint16_t length; /**< IV length in bytes */
- uint16_t offset; /**< IV offset in bytes */
- } iv;
- uint16_t digest_length;
- uint8_t status;
union {
- struct dpaa2_sec_aead_ctxt aead_ctxt;
- } ext_params;
+ struct {
+ struct {
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+ };
+ struct dpaa2_pdcp_ctxt pdcp;
+ };
} dpaa2_sec_session;
static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
@@ -390,6 +410,162 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_cryptodev_capabilities dpaa2_pdcp_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
static const struct rte_security_capability dpaa2_sec_security_cap[] = {
{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
@@ -413,6 +589,24 @@ static const struct rte_security_capability dpaa2_sec_security_cap[] = {
},
.crypto_capabilities = dpaa2_sec_capabilities
},
+ { /* PDCP Lookaside Protocol offload Data */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_DATA,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
+ { /* PDCP Lookaside Protocol offload Control */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_CONTROL,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index e9255832..5d99dd8a 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -588,7 +588,7 @@
#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS_PRF (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
@@ -612,7 +612,7 @@
#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
@@ -665,643 +665,179 @@
#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
-/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
-
-#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
-#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
-#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
-#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
-#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
-#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
-#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
-#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
-#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
-#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
-#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
-#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
-#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
-
-/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
-#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
-#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
-#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
-#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
-#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
-#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
-
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
-
-/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
-
-
-/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
-
-/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
-
-/* For DTLS - OP_PCLID_DTLS */
-
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+/*
+ * For SSL/TLS/DTLS - OP_PCL_TLS
+ * For more details see IANA TLS Cipher Suite registry:
+ * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ * Note: for private/internal use (reserved by IANA) - OP_PCL_PVT_TLS
+ */
+#define OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5 0x0003
+#define OP_PCL_TLS_RSA_WITH_RC4_128_MD5 0x0004
+#define OP_PCL_TLS_RSA_WITH_RC4_128_SHA 0x0005
+#define OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS_RSA_WITH_DES_CBC_SHA 0x0009
+#define OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA 0x000a
+#define OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA 0x000b
+#define OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA 0x000c
+#define OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA 0x000d
+#define OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA 0x000e
+#define OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA 0x000f
+#define OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA 0x0010
+#define OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA 0x0011
+#define OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA 0x0012
+#define OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA 0x0013
+#define OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0014
+#define OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA 0x0015
+#define OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA 0x0016
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 0x0017
+#define OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5 0x0018
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA 0x0019
+#define OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA 0x001a
+#define OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA 0x001b
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_SHA 0x0020
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 0x0023
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5 0x0022
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_MD5 0x0024
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA 0x0026
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA 0x0028
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 0x0029
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 0x002b
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA 0x0030
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA 0x0031
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA 0x0032
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA 0x0033
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA 0x0034
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA 0x0036
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA 0x0037
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA 0x0038
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA 0x0039
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA 0x003a
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256 0x003c
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256 0x003d
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 0x003e
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 0x003f
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 0x0040
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 0x0067
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 0x0068
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 0x0069
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 0x006a
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 0x006b
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256 0x006c
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256 0x006d
+#define OP_PCL_TLS_PSK_WITH_RC4_128_SHA 0x008a
+#define OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA 0x008b
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA 0x008c
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA 0x008d
+#define OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA 0x008e
+#define OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA 0x008f
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA 0x0090
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA 0x0091
+#define OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA 0x0092
+#define OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA 0x0093
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA 0x0094
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA 0x0095
+#define OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256 0x009c
+#define OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384 0x009d
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 0x009e
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 0x009f
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 0x00a0
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 0x00a1
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 0x00a2
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 0x00a3
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 0x00a4
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 0x00a5
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256 0x00a6
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384 0x00a7
+#define OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256 0x00a8
+#define OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384 0x00a9
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 0x00aa
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 0x00ab
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 0x00ac
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 0x00ad
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256 0x00ae
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384 0x00af
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 0x00b2
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 0x00b3
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 0x00b6
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 0x00b7
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA 0xc002
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc003
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0xc004
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0xc005
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA 0xc007
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc008
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0xc009
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0xc00a
+#define OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA 0xc00c
+#define OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA 0xc00d
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA 0xc00e
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA 0xc00f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA 0xc011
+#define OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA 0xc012
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA 0xc013
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA 0xc014
+#define OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA 0xc016
+#define OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA 0xc017
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA 0xc018
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA 0xc019
+#define OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0xc01a
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0xc01b
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0xc01c
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA 0xc01d
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0xc01e
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0xc01f
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA 0xc020
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0xc021
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0xc022
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 0xc023
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 0xc024
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 0xc025
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 0xc026
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 0xc027
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 0xc028
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 0xc029
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 0xc02a
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0xc02b
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0xc02c
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0xc02d
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0xc02e
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0xc02f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0xc030
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 0xc031
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 0xc032
+#define OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA 0xc033
+#define OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA 0xc034
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA 0xc035
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA 0xc036
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 0xc037
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 0xc038
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF 0xffff
/* 802.16 WiMAX protinfos */
#define OP_PCL_WIMAX_OFDM 0x0201
@@ -1332,7 +868,7 @@
#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
-#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 << OP_PCL_LTE_MIXED_ENC_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 91f3e067..febcb6d0 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -410,6 +410,35 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
}
/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
* cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
* @descbuf: pointer to descriptor-under-construction buffer
* @ps: if 36/40bit addressing is desired, this parameter must be true
@@ -614,33 +643,4 @@ cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
return PROGRAM_FINALIZE(p);
}
-/**
- * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
- * @descbuf: pointer to descriptor-under-construction buffer
- * @swap: must be true when core endianness doesn't match SEC endianness
- *
- * Return: size of descriptor written in words or negative number on error
- */
-static inline int
-cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
-{
- struct program prg;
- struct program *p = &prg;
-
- PROGRAM_CNTXT_INIT(p, descbuf, 0);
- if (swap)
- PROGRAM_SET_BSWAP(p);
-
- SHR_HDR(p, SHR_ALWAYS, 1, 0);
-
- MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
- ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
- OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
- OP_ALG_AS_FINALIZE, 0, DIR_ENC);
- SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
- SEQSTORE(p, CONTEXT2, 0, 4, 0);
-
- return PROGRAM_FINALIZE(p);
-}
-
#endif /* __DESC_ALGO_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index 35cc02a6..d256a391 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -522,44 +522,133 @@ enum ipsec_icv_size {
/*
* IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ * IPSEC_N_* defines are for IPsec new mode.
*/
-#define IPSEC_DECO_DPOVRD_USE 0x80
+/**
+ * IPSEC_DPOVRD_USE - DPOVRD will override values specified in the PDB
+ */
+#define IPSEC_DPOVRD_USE BIT(31)
-struct ipsec_deco_dpovrd {
- uint8_t ovrd_ecn;
- uint8_t ip_hdr_len;
- uint8_t nh_offset;
- union {
- uint8_t next_header; /* next header if encap */
- uint8_t rsvd; /* reserved if decap */
- };
-};
+/**
+ * IPSEC_DPOVRD_ECN_SHIFT - Explicit Congestion Notification
+ *
+ * If set, MSB of the 4 bits indicates that the 2 LSBs will replace the ECN bits
+ * in the IP header.
+ */
+#define IPSEC_DPOVRD_ECN_SHIFT 24
-struct ipsec_new_encap_deco_dpovrd {
-#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
- uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
- * length
- */
-#define IPSEC_NEW_ENCAP_OIMIF 0x80
- uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
- * offset
- */
- uint8_t rsvd;
-};
+/**
+ * IPSEC_DPOVRD_ECN_MASK - See IPSEC_DPOVRD_ECN_SHIFT
+ */
+#define IPSEC_DPOVRD_ECN_MASK (0xf << IPSEC_ENCAP_DPOVRD_ECN_SHIFT)
-struct ipsec_new_decap_deco_dpovrd {
- uint8_t ovrd;
- uint8_t aoipho_hi; /* upper nibble of actual outer IP
- * header
- */
- uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
- * header + outer IP header material
- */
-};
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_SHIFT - The length (in bytes) of the portion of the
+ * IP header that is not encrypted
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_SHIFT 16
+
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_MASK - See IPSEC_DPOVRD_IP_HDR_LEN_SHIFT
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_MASK (0xff << IPSEC_DPOVRD_IP_HDR_LEN_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_SHIFT - The location of the next header field within
+ * the IP header of the transport mode packet
+ *
+ * Encap:
+ * ESP_Trailer_NH <-- IP_Hdr[DPOVRD[NH_OFFSET]]
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- DPOVRD[NH]
+ *Decap:
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- ESP_Trailer_NH
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_SHIFT 8
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_MASK (0xff << IPSEC_DPOVRD_NH_OFFSET_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ * Valid only for encapsulation.
+ */
+#define IPSEC_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT - Outer IP header Material length (encap)
+ * Valid only if L2_COPY is not set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT 16
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK \
+ (0xfff << IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT - L2 header length
+ * Valid only if L2_COPY is set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT 16
-static inline void
-__gen_auth_key(struct program *program, struct alginfo *authdata)
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK \
+ (0xff << IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIMIF - Outer IP header Material in Input Frame
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIMIF BIT(15)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_COPY - L2 header present in input frame
+ *
+ * Note: For Era <= 8, this bit is reserved (not used) by HW.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_COPY BIT(14)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (encap)
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT 8
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK \
+ (0x3c << IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_NH_MASK - Next Header
+ *
+ * Used in the Next Header field of the encapsulated payload.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT 12
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK \
+ (0xff << IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK - Outer IP header Material length (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK 0xfff
+
+static inline void __gen_auth_key(struct program *program,
+ struct alginfo *authdata)
{
uint32_t dkp_protid;
@@ -603,6 +692,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -621,6 +711,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
*/
static inline int
cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -638,7 +729,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
SET_LABEL(p, hdr);
@@ -669,6 +760,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -687,6 +779,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
*/
static inline int
cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -704,7 +797,7 @@ cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
@@ -1040,7 +1133,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or keys can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (12 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1052,7 +1145,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or key can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (11 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1061,6 +1154,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1080,11 +1174,21 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* compute MDHA on the fly in HW.
* Valid algorithm values - one of OP_PCL_IPSEC_*
*
+ * Note: L2 header copy functionality is implemented assuming that bits 14
+ * (currently reserved) and 16-23 (part of Outer IP Header Material Length)
+ * in DPOVRD register are not used (which is usually the case when L3 header
+ * is provided in PDB).
+ * When DPOVRD[14] is set, frame starts with an L2 header; in this case, the
+ * L2 header length is found at DPOVRD[23:16]. SEC uses this length to copy
+ * the header and then it deletes DPOVRD[23:16] (so there is no side effect
+ * when later running IPsec protocol).
+ *
* Return: size of descriptor written in words or negative number on error
*/
static inline int
cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
uint8_t *opt_ip_hdr,
struct alginfo *cipherdata,
@@ -1097,6 +1201,8 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
REFERENCE(pkeyjmp);
LABEL(hdr);
REFERENCE(phdr);
+ LABEL(l2copy);
+ REFERENCE(pl2copy);
if (rta_sec_era < RTA_SEC_ERA_8) {
pr_err("IPsec new mode encap: available only for Era %d or above\n",
@@ -1109,7 +1215,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
@@ -1128,6 +1234,16 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
}
SET_LABEL(p, hdr);
+ MATHB(p, DPOVRD, AND, IPSEC_N_ENCAP_DPOVRD_L2_COPY, NONE, 4, IMMED2);
+ pl2copy = JUMP(p, l2copy, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ MATHI(p, DPOVRD, RSHIFT, IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT, VSEQOUTSZ,
+ 1, 0);
+ MATHB(p, DPOVRD, AND, ~IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK, DPOVRD, 4,
+ IMMED2);
+ /* TODO: CLASS2 corresponds to AUX=2'b10; add more intuitive defines */
+ SEQFIFOSTORE(p, METADATA, 0, 0, CLASS2 | VLF);
+ SET_LABEL(p, l2copy);
+
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
if (authdata->keylen)
__gen_auth_key(p, authdata);
@@ -1138,6 +1254,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
OP_PCLID_IPSEC_NEW,
(uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pl2copy, l2copy);
PATCH_JUMP(p, pkeyjmp, keyjmp);
PATCH_HDR(p, phdr, hdr);
return PROGRAM_FINALIZE(p);
@@ -1171,6 +1288,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1188,6 +1306,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
static inline int
cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -1211,7 +1330,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
new file mode 100644
index 00000000..719ef605
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
@@ -0,0 +1,2796 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
+ */
+
+#ifndef __DESC_PDCP_H__
+#define __DESC_PDCP_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: PDCP Shared Descriptor Constructors
+ *
+ * Shared descriptors for PDCP protocol.
+ */
+
+/**
+ * PDCP_NULL_MAX_FRAME_LEN - The maximum frame frame length that is supported by
+ * PDCP NULL protocol.
+ */
+#define PDCP_NULL_MAX_FRAME_LEN 0x00002FFF
+
+/**
+ * PDCP_MAC_I_LEN - The length of the MAC-I for PDCP protocol operation
+ */
+#define PDCP_MAC_I_LEN 0x00000004
+
+/**
+ * PDCP_MAX_FRAME_LEN_STATUS - The status returned in FD status/command field in
+ * case the input frame is larger than
+ * PDCP_NULL_MAX_FRAME_LEN.
+ */
+#define PDCP_MAX_FRAME_LEN_STATUS 0xF1
+
+/**
+ * PDCP_C_PLANE_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the PDCP
+ * Control Plane header. For PDCP Control Plane, the SN
+ * is constant (5 bits) as opposed to PDCP Data Plane
+ * (7/12/15 bits).
+ */
+#define PDCP_C_PLANE_SN_MASK 0x1F000000
+#define PDCP_C_PLANE_SN_MASK_BE 0x0000001F
+
+/**
+ * PDCP_U_PLANE_15BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header. For PDCP Control Plane,
+ * the SN is constant (5 bits) as opposed to PDCP
+ * Data Plane (7/12/15 bits).
+ */
+#define PDCP_U_PLANE_15BIT_SN_MASK 0xFF7F0000
+#define PDCP_U_PLANE_15BIT_SN_MASK_BE 0x00007FFF
+
+/**
+ * PDCP_BEARER_MASK - This mask is used masking out the bearer for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Count-C (32 bit) | Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the upper 64 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The lower 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_BEARER_MASK 0x00000004FFFFFFFFull
+#define PDCP_BEARER_MASK_BE 0xFFFFFFFF04000000ull
+
+/**
+ * PDCP_DIR_MASK - This mask is used masking out the direction for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the lower 32 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The upper 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_DIR_MASK 0x00000000000000F8ull
+#define PDCP_DIR_MASK_BE 0xF800000000000000ull
+
+/**
+ * PDCP_NULL_INT_MAC_I_VAL - The value of the PDCP PDU MAC-I in case NULL
+ * integrity is used.
+ */
+
+#define PDCP_NULL_INT_MAC_I_VAL 0x00000000
+
+/**
+ * PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS - The status used to report ICV check
+ * failed in case of NULL integrity
+ * Control Plane processing.
+ */
+#define PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS 0x0A
+/**
+ * PDCP_DPOVRD_HFN_OV_EN - Value to be used in the FD status/cmd field to
+ * indicate the HFN override mechanism is active for the
+ * frame.
+ */
+#define PDCP_DPOVRD_HFN_OV_EN 0x80000000
+
+/**
+ * PDCP_P4080REV2_HFN_OV_BUFLEN - The length in bytes of the supplementary space
+ * that must be provided by the user at the
+ * beginning of the input frame buffer for
+ * P4080 REV 2.
+ *
+ * The format of the frame buffer is the following:
+ *
+ * |<---PDCP_P4080REV2_HFN_OV_BUFLEN-->|
+ * //===================================||============||==============\\
+ * || PDCP_DPOVRD_HFN_OV_EN | HFN value || PDCP Header|| PDCP Payload ||
+ * \\===================================||============||==============//
+ *
+ * If HFN override mechanism is not desired, then the MSB of the first 4 bytes
+ * must be set to 0b.
+ */
+#define PDCP_P4080REV2_HFN_OV_BUFLEN 4
+
+/**
+ * enum cipher_type_pdcp - Type selectors for cipher types in PDCP protocol OP
+ * instructions.
+ * @PDCP_CIPHER_TYPE_NULL: NULL
+ * @PDCP_CIPHER_TYPE_SNOW: SNOW F8
+ * @PDCP_CIPHER_TYPE_AES: AES
+ * @PDCP_CIPHER_TYPE_ZUC: ZUCE
+ * @PDCP_CIPHER_TYPE_INVALID: invalid option
+ */
+enum cipher_type_pdcp {
+ PDCP_CIPHER_TYPE_NULL,
+ PDCP_CIPHER_TYPE_SNOW,
+ PDCP_CIPHER_TYPE_AES,
+ PDCP_CIPHER_TYPE_ZUC,
+ PDCP_CIPHER_TYPE_INVALID
+};
+
+/**
+ * enum auth_type_pdcp - Type selectors for integrity types in PDCP protocol OP
+ * instructions.
+ * @PDCP_AUTH_TYPE_NULL: NULL
+ * @PDCP_AUTH_TYPE_SNOW: SNOW F9
+ * @PDCP_AUTH_TYPE_AES: AES CMAC
+ * @PDCP_AUTH_TYPE_ZUC: ZUCA
+ * @PDCP_AUTH_TYPE_INVALID: invalid option
+ */
+enum auth_type_pdcp {
+ PDCP_AUTH_TYPE_NULL,
+ PDCP_AUTH_TYPE_SNOW,
+ PDCP_AUTH_TYPE_AES,
+ PDCP_AUTH_TYPE_ZUC,
+ PDCP_AUTH_TYPE_INVALID
+};
+
+/**
+ * enum pdcp_dir - Type selectors for direction for PDCP protocol
+ * @PDCP_DIR_UPLINK: uplink direction
+ * @PDCP_DIR_DOWNLINK: downlink direction
+ * @PDCP_DIR_INVALID: invalid option
+ */
+enum pdcp_dir {
+ PDCP_DIR_UPLINK = 0,
+ PDCP_DIR_DOWNLINK = 1,
+ PDCP_DIR_INVALID
+};
+
+/**
+ * enum pdcp_plane - PDCP domain selectors
+ * @PDCP_CONTROL_PLANE: Control Plane
+ * @PDCP_DATA_PLANE: Data Plane
+ * @PDCP_SHORT_MAC: Short MAC
+ */
+enum pdcp_plane {
+ PDCP_CONTROL_PLANE,
+ PDCP_DATA_PLANE,
+ PDCP_SHORT_MAC
+};
+
+/**
+ * enum pdcp_sn_size - Sequence Number Size selectors for PDCP protocol
+ * @PDCP_SN_SIZE_5: 5bit sequence number
+ * @PDCP_SN_SIZE_7: 7bit sequence number
+ * @PDCP_SN_SIZE_12: 12bit sequence number
+ * @PDCP_SN_SIZE_15: 15bit sequence number
+ * @PDCP_SN_SIZE_18: 18bit sequence number
+ */
+enum pdcp_sn_size {
+ PDCP_SN_SIZE_5 = 5,
+ PDCP_SN_SIZE_7 = 7,
+ PDCP_SN_SIZE_12 = 12,
+ PDCP_SN_SIZE_15 = 15
+};
+
+/*
+ * PDCP Control Plane Protocol Data Blocks
+ */
+#define PDCP_C_PLANE_PDB_HFN_SHIFT 5
+#define PDCP_C_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_C_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_C_PLANE_PDB_HFN_THR_SHIFT 5
+
+#define PDCP_U_PLANE_PDB_OPT_SHORT_SN 0x2
+#define PDCP_U_PLANE_PDB_OPT_15B_SN 0x4
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT 15
+#define PDCP_U_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_U_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT 15
+
+struct pdcp_pdb {
+ union {
+ uint32_t opt;
+ uint32_t rsvd;
+ } opt_res;
+ uint32_t hfn_res; /* HyperFrame number,(27, 25 or 21 bits),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t bearer_dir_res;/* Bearer(5 bits), packet direction (1 bit),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t hfn_thr_res; /* HyperFrame number threshold (27, 25 or 21
+ * bits), left aligned & right-padded with
+ * zeros.
+ */
+};
+
+/*
+ * PDCP internal PDB types
+ */
+enum pdb_type_e {
+ PDCP_PDB_TYPE_NO_PDB,
+ PDCP_PDB_TYPE_FULL_PDB,
+ PDCP_PDB_TYPE_REDUCED_PDB,
+ PDCP_PDB_TYPE_INVALID
+};
+
+/*
+ * Function for appending the portion of a PDCP Control Plane shared descriptor
+ * which performs NULL encryption and integrity (i.e. copies the input frame
+ * to the output frame, appending 32 bits of zeros at the end (MAC-I for
+ * NULL integrity).
+ */
+static inline int
+pdcp_insert_cplane_null_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ } else {
+ MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
+ IMMED2);
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
+ }
+
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+ else
+ MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /* Placeholder for MOVE command with length from M1 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+insert_copy_frame_op(struct program *p,
+ struct alginfo *cipherdata __maybe_unused,
+ unsigned int dir __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IFB | IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M0 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_int_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata, unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_SNOW:
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK, MATH2, 8,
+ IMMED2);
+ MOVEB(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK, MATH3, 8, IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK_BE, MATH2, 8,
+ IMMED2);
+
+ MOVE(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK_BE, MATH3, 8,
+ IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /*
+ * Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ /* Insert Auth Key */
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /*
+ * Placeholder for MOVE command with length from
+ * M1 register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
+ else
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ SEQINPTR(p, 0, 1, RTO);
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+ }
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "pdcp_insert_cplane_int_only_op", authdata->algtype);
+ return -EINVAL;
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_enc_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ (uint16_t)cipherdata->algtype << 8);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_cplane_enc_only_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ FIFOLOAD(p, MSG1, PDCP_NULL_INT_MAC_I_VAL, 4,
+ LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, OFIFO, 0, MATH1, 4, PDCP_MAC_I_LEN, WAITCOMP | IMMED);
+ MATHB(p, MATH1, XOR, PDCP_NULL_INT_MAC_I_VAL, NONE, 4, IMMED2);
+ JUMP(p, PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS,
+ HALT_STATUS, ALL_FALSE, MATH_Z);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_acc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_hfn_ovrd __maybe_unused)
+{
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL, (uint16_t)cipherdata->algtype);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(back_to_sd_offset);
+ LABEL(end_desc);
+ LABEL(local_offset);
+ LABEL(jump_to_beginning);
+ LABEL(fifo_load_mac_i_offset);
+ REFERENCE(seqin_ptr_read);
+ REFERENCE(seqin_ptr_write);
+ REFERENCE(seq_out_read);
+ REFERENCE(jump_back_to_sd_cmd);
+ REFERENCE(move_mac_i_to_desc_buf);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ /*
+ * Note: Although the calculations below might seem a
+ * little off, the logic is the following:
+ *
+ * - SEQ IN PTR RTO below needs the full length of the
+ * frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
+ * this means the length of the frame to be processed
+ * + 4 bytes (the HFN override flag and value).
+ * The length of the frame to be processed minus 1
+ * byte is in the VSIL register (because
+ * VSIL = SIL + 3, due to 1 byte, the header being
+ * already written by the SEQ STORE above). So for
+ * calculating the length to use in RTO, I add one
+ * to the VSIL value in order to obtain the total
+ * frame length. This helps in case of P4080 which
+ * can have the value 0 as an operand in a MATH
+ * command only as SRC1 When the HFN override
+ * workaround is not enabled, the length of the
+ * frame is given by the SIL register; the
+ * calculation is similar to the one in the SEC 4.2
+ * and SEC 5.3 cases.
+ */
+ if (era_2_sw_hfn_ovrd)
+ MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
+ 0);
+ else
+ MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
+ 0);
+ }
+ /*
+ * Placeholder for filling the length in
+ * SEQIN PTR RTO below
+ */
+ seqin_ptr_read = MOVE(p, DESCBUF, 0, MATH1, 0, 6, IMMED);
+ seqin_ptr_write = MOVE(p, MATH1, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, local_offset);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, 0, RTO);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ SEQFIFOLOAD(p, SKIP, 5, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
+ }
+
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0))
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ PATCH_MOVE(p, seqin_ptr_read, local_offset);
+ PATCH_MOVE(p, seqin_ptr_write, local_offset);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_5)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load a
+ * command that is also written by RTA (or patch it for that matter).
+ * Change when proper RTA support is added.
+ */
+ if (p->ps)
+ WORD(p, 0x168B0004);
+ else
+ WORD(p, 0x16880404);
+
+ jump_back_to_sd_cmd = JUMP(p, 0, LOCAL_JUMP, ALL_TRUE, 0);
+ /*
+ * Placeholder for command reading the SEQ OUT command in
+ * JD. Done for rereading the decrypted data and performing
+ * the integrity check
+ */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 20,
+ WAITCOMP | IMMED);
+ else
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 16,
+ WAITCOMP | IMMED);
+
+ MATHB(p, MATH1, XOR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 4,
+ IMMED2);
+ /* Placeholder for overwriting the SEQ IN with SEQ OUT */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ MOVE(p, MATH1, 0, DESCBUF, 0, 24, IMMED);
+ else
+ MOVE(p, MATH1, 0, DESCBUF, 0, 20, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_4)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
+ 4, WAITCOMP | IMMED);
+ else
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ /*
+ * Placeholder for jump in SD for executing the new SEQ IN PTR
+ * command (which is actually the old SEQ OUT PTR command
+ * copied over from JD.
+ */
+ SET_LABEL(p, jump_to_beginning);
+ JUMP(p, 1 - jump_to_beginning, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, back_to_sd_offset);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ /* Read the # of bytes written in the output buffer + 1 (HDR) */
+ MATHB(p, VSEQOUTSZ, ADD, ONE, VSEQINSZ, 4, 0);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era >= RTA_SEC_ERA_4) {
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ } else {
+ SET_LABEL(p, fifo_load_mac_i_offset);
+ FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
+ LAST1 | FLUSH1 | IMMED);
+ }
+
+ SET_LABEL(p, end_desc);
+
+ if (!p->ps) {
+ PATCH_MOVE(p, seq_out_read, end_desc + 1);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ } else {
+ PATCH_MOVE(p, seq_out_read, end_desc + 2);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2, 4,
+ IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3, 4,
+ IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4, IMMED2);
+
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ else
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_2) {
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
+ } else {
+ MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ SET_LABEL(p, keyjump);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3,
+ 4, IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ }
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ /*
+ * For SEC ERA 6, there's a problem with the OFIFO
+ * pointer, and thus it needs to be reset here before
+ * moving to M0.
+ */
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ /* Put ICV to M0 before sending it to C2 for comparison. */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+ MOVE(p, MATH0, 7, IFIFOAB1, 0, 1, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, PDCP_NULL_MAX_FRAME_LEN, RTO);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQINPTR(p, 0, 0, SOP);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_15bit_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ unsigned int dir)
+{
+ int op;
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 6, 2, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 6, 2, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ op = dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC;
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_uplane_15bit_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ return 0;
+}
+
+/*
+ * Function for inserting the snippet of code responsible for creating
+ * the HFN override code via either DPOVRD or via the input frame.
+ */
+static inline int
+insert_hfn_ov_op(struct program *p,
+ uint32_t shift,
+ enum pdb_type_e pdb_type,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
+ uint16_t hfn_pdb_offset;
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
+ return 0;
+
+ switch (pdb_type) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ /*
+ * If there is no PDB, then HFN override mechanism does not
+ * make any sense, thus in this case the function will
+ * return the pointer to the current position in the
+ * descriptor buffer
+ */
+ return 0;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ hfn_pdb_offset = 4;
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ hfn_pdb_offset = 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
+ } else {
+ SEQLOAD(p, MATH0, 4, 4, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
+ SEQSTORE(p, MATH0, 4, 4, 0);
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ JUMP(p, 6, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ else
+ JUMP(p, 5, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+
+ MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ /*
+ * For ERA8, DPOVRD could be handled by the PROTOCOL command
+ * itself. For now, this is not done. Thus, clear DPOVRD here
+ * to alleviate any side-effects.
+ */
+ MATHB(p, DPOVRD, AND, ZERO, DPOVRD, 4, STL);
+
+ return 0;
+}
+
+/*
+ * PDCP Control PDB creation function
+ */
+static inline enum pdb_type_e
+cnstr_pdcp_c_plane_pdb(struct program *p,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct pdcp_pdb pdb;
+ enum pdb_type_e
+ pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ PDCP_PDB_TYPE_NO_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* AES CTR */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* ZUC-E */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ };
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return PDCP_PDB_TYPE_FULL_PDB;
+ }
+
+ switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ break;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ __rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
+ __rta_out32(p,
+ (uint32_t)((bearer <<
+ PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction <<
+ PDCP_C_PLANE_PDB_DIR_SHIFT)));
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ break;
+
+ default:
+ return PDCP_PDB_TYPE_INVALID;
+ }
+
+ return pdb_mask[cipherdata->algtype][authdata->algtype];
+}
+
+/*
+ * PDCP UPlane PDB creation function
+ */
+static inline int
+cnstr_pdcp_u_plane_pdb(struct program *p,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn, unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold)
+{
+ struct pdcp_pdb pdb;
+ /* Read options from user */
+ /* Depending on sequence number length, the HFN and HFN threshold
+ * have different lengths.
+ */
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ pdb.opt_res.opt |= PDCP_U_PLANE_PDB_OPT_SHORT_SN;
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_12:
+ pdb.opt_res.opt &= (uint32_t)(~PDCP_U_PLANE_PDB_OPT_SHORT_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_15:
+ pdb.opt_res.opt = (uint32_t)(PDCP_U_PLANE_PDB_OPT_15B_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT;
+ break;
+
+ default:
+ pr_err("Invalid Sequence Number Size setting in PDB\n");
+ return -EINVAL;
+ }
+
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return 0;
+}
+/**
+ * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int,
+ unsigned char __maybe_unused) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_ENCAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_c_plane_decap - Function for creating a PDCP Control Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, unsigned char) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_DECAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_encap - Function for creating a PDCP User Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN ovrd for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ (uint64_t)cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_decap - Function for creating a PDCP User Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_short_mac - Function for creating a PDCP Short MAC
+ * descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv[3] = {0, 0, 0};
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_NULL:
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, MATH0, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_SNOW:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0x04000000) : 0x04000000;
+ iv[2] = swap ? swab32(0xF8000000) : 0xF8000000;
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, MATH0, 0, 8, IMMED | COPY);
+ MOVE(p, MATH0, 0, IFIFOAB1, 0, 8, IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "cnstr_shdsc_pdcp_short_mac", authdata->algtype);
+ return -EINVAL;
+ }
+
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_PDCP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index d9a5b0e5..cf8dfb91 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -14,178 +14,176 @@ static inline int
__rta_ssl_proto(uint16_t protoinfo)
{
switch (protoinfo) {
- case OP_PCL_SSL30_RC4_40_MD5_2:
- case OP_PCL_SSL30_RC4_128_MD5_2:
- case OP_PCL_SSL30_RC4_128_SHA_5:
- case OP_PCL_SSL30_RC4_40_MD5_3:
- case OP_PCL_SSL30_RC4_128_MD5_3:
- case OP_PCL_SSL30_RC4_128_SHA:
- case OP_PCL_SSL30_RC4_128_MD5:
- case OP_PCL_SSL30_RC4_40_SHA:
- case OP_PCL_SSL30_RC4_40_MD5:
- case OP_PCL_SSL30_RC4_128_SHA_2:
- case OP_PCL_SSL30_RC4_128_SHA_3:
- case OP_PCL_SSL30_RC4_128_SHA_4:
- case OP_PCL_SSL30_RC4_128_SHA_6:
- case OP_PCL_SSL30_RC4_128_SHA_7:
- case OP_PCL_SSL30_RC4_128_SHA_8:
- case OP_PCL_SSL30_RC4_128_SHA_9:
- case OP_PCL_SSL30_RC4_128_SHA_10:
- case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA:
if (rta_sec_era == RTA_SEC_ERA_7)
return -EINVAL;
/* fall through if not Era 7 */
- case OP_PCL_SSL30_DES40_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_SHA_2:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
- case OP_PCL_SSL30_DES40_CBC_SHA_2:
- case OP_PCL_SSL30_DES_CBC_SHA_3:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
- case OP_PCL_SSL30_DES40_CBC_SHA_3:
- case OP_PCL_SSL30_DES_CBC_SHA_4:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_SHA_4:
- case OP_PCL_SSL30_DES_CBC_SHA_5:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
- case OP_PCL_SSL30_DES40_CBC_SHA_5:
- case OP_PCL_SSL30_DES_CBC_SHA_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
- case OP_PCL_SSL30_DES40_CBC_SHA_6:
- case OP_PCL_SSL30_DES_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
- case OP_PCL_SSL30_DES_CBC_SHA:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_MD5:
- case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
- case OP_PCL_SSL30_DES40_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_MD5:
- case OP_PCL_SSL30_AES_128_CBC_SHA:
- case OP_PCL_SSL30_AES_128_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_5:
- case OP_PCL_SSL30_AES_128_CBC_SHA_6:
- case OP_PCL_SSL30_AES_256_CBC_SHA:
- case OP_PCL_SSL30_AES_256_CBC_SHA_2:
- case OP_PCL_SSL30_AES_256_CBC_SHA_3:
- case OP_PCL_SSL30_AES_256_CBC_SHA_4:
- case OP_PCL_SSL30_AES_256_CBC_SHA_5:
- case OP_PCL_SSL30_AES_256_CBC_SHA_6:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_7:
- case OP_PCL_SSL30_AES_256_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_8:
- case OP_PCL_SSL30_AES_256_CBC_SHA_8:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_9:
- case OP_PCL_SSL30_AES_256_CBC_SHA_9:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
- case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_10:
- case OP_PCL_SSL30_AES_256_CBC_SHA_10:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
- case OP_PCL_SSL30_AES_128_CBC_SHA_11:
- case OP_PCL_SSL30_AES_256_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
- case OP_PCL_SSL30_AES_128_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_13:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_14:
- case OP_PCL_SSL30_AES_256_CBC_SHA_14:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
- case OP_PCL_SSL30_AES_128_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_16:
- case OP_PCL_SSL30_AES_128_CBC_SHA_17:
- case OP_PCL_SSL30_AES_256_CBC_SHA_15:
- case OP_PCL_SSL30_AES_256_CBC_SHA_16:
- case OP_PCL_SSL30_AES_256_CBC_SHA_17:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
- case OP_PCL_TLS12_AES_128_CBC_SHA160:
- case OP_PCL_TLS12_AES_128_CBC_SHA224:
- case OP_PCL_TLS12_AES_128_CBC_SHA256:
- case OP_PCL_TLS12_AES_128_CBC_SHA384:
- case OP_PCL_TLS12_AES_128_CBC_SHA512:
- case OP_PCL_TLS12_AES_192_CBC_SHA160:
- case OP_PCL_TLS12_AES_192_CBC_SHA224:
- case OP_PCL_TLS12_AES_192_CBC_SHA256:
- case OP_PCL_TLS12_AES_192_CBC_SHA512:
- case OP_PCL_TLS12_AES_256_CBC_SHA160:
- case OP_PCL_TLS12_AES_256_CBC_SHA224:
- case OP_PCL_TLS12_AES_256_CBC_SHA256:
- case OP_PCL_TLS12_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_AES_256_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA384:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF:
return 0;
}
@@ -323,6 +321,12 @@ static const uint32_t proto_blob_flags[] = {
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
};
@@ -556,7 +560,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
- {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
@@ -568,7 +572,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
- {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index 6e666108..5357187f 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -497,6 +497,28 @@ __rta_out64(struct program *program, bool is_ext, uint64_t val)
}
}
+static inline void __rta_out_be64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_be32(program, upper_32_bits(val));
+ __rta_out_be32(program, lower_32_bits(val));
+ } else {
+ __rta_out_be32(program, lower_32_bits(val));
+ }
+}
+
+static inline void __rta_out_le64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_le32(program, lower_32_bits(val));
+ __rta_out_le32(program, upper_32_bits(val));
+ } else {
+ __rta_out_le32(program, lower_32_bits(val));
+ }
+}
+
static inline unsigned int
rta_word(struct program *program, uint32_t val)
{
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index de8ca970..87e0defd 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -6,6 +6,7 @@
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_dpseci_cmd.h>
@@ -116,11 +117,13 @@ int dpseci_create(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
cmd_params = (struct dpseci_cmd_create *)cmd.params;
- for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ for (i = 0; i < 8; i++)
cmd_params->priorities[i] = cfg->priorities[i];
+ for (i = 0; i < 8; i++)
+ cmd_params->priorities2[i] = cfg->priorities[8 + i];
cmd_params->num_tx_queues = cfg->num_tx_queues;
cmd_params->num_rx_queues = cfg->num_rx_queues;
- cmd_params->options = cfg->options;
+ cmd_params->options = cpu_to_le32(cfg->options);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -302,7 +305,7 @@ int dpseci_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
- attr->options = rsp_params->options;
+ attr->options = le32_to_cpu(rsp_params->options);
attr->num_tx_queues = rsp_params->num_tx_queues;
attr->num_rx_queues = rsp_params->num_rx_queues;
@@ -490,6 +493,8 @@ int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
attr->arc4_acc_num = rsp_params->arc4_acc_num;
attr->des_acc_num = rsp_params->des_acc_num;
attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
return 0;
}
@@ -569,6 +574,113 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpseci_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpseci_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_set_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpseci_rsp_get_opr *rsp_params;
+ struct dpseci_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpseci_get_field(rsp_params->flags, RIP);
+ qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_set_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -604,6 +716,16 @@ int dpseci_set_congestion_notification(
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpseci_get_congestion_notification() - Get congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_get_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 12ac005a..279e8f4d 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
/**
* Maximum number of Tx/Rx priorities per DPSECI object
*/
-#define DPSECI_PRIO_NUM 8
+#define DPSECI_MAX_QUEUE_NUM 16
/**
* All queues considered; see dpseci_set_rx_queue()
@@ -58,7 +58,7 @@ struct dpseci_cfg {
uint32_t options;
uint8_t num_tx_queues;
uint8_t num_rx_queues;
- uint8_t priorities[DPSECI_PRIO_NUM];
+ uint8_t priorities[DPSECI_MAX_QUEUE_NUM];
};
int dpseci_create(struct fsl_mc_io *mc_io,
@@ -259,6 +259,10 @@ int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
* implemented in this version of SEC.
* @aes_acc_num: The number of copies of the AES module that are
* implemented in this version of SEC.
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
**/
struct dpseci_sec_attr {
@@ -279,6 +283,8 @@ struct dpseci_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
@@ -316,6 +322,21 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
uint16_t *minor_ver);
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
/**
* enum dpseci_congestion_unit - DPSECI congestion units
* @DPSECI_CONGESTION_UNIT_BYTES: bytes units
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 26cef0f7..af3518a0 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -9,22 +9,25 @@
/* DPSECI Version */
#define DPSECI_VER_MAJOR 5
-#define DPSECI_VER_MINOR 1
+#define DPSECI_VER_MINOR 3
/* Command versioning */
#define DPSECI_CMD_BASE_VERSION 1
#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_BASE_VERSION_V3 3
#define DPSECI_CMD_ID_OFFSET 4
#define DPSECI_CMD_V1(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
#define DPSECI_CMD_V2(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+#define DPSECI_CMD_V3(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V3)
/* Command IDs */
#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
-#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
@@ -37,9 +40,10 @@
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
-#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
-
+#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
+#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
@@ -63,6 +67,8 @@ struct dpseci_cmd_create {
uint8_t num_rx_queues;
uint8_t pad[6];
uint32_t options;
+ uint32_t pad2;
+ uint8_t priorities2[8];
};
struct dpseci_cmd_destroy {
@@ -152,6 +158,8 @@ struct dpseci_rsp_get_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
struct dpseci_rsp_get_sec_counters {
@@ -169,6 +177,63 @@ struct dpseci_rsp_get_api_version {
uint16_t minor;
};
+struct dpseci_cmd_set_opr {
+ uint16_t pad0;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpseci_cmd_get_opr {
+ uint16_t pad;
+ uint8_t index;
+};
+
+#define DPSECI_RIP_SHIFT 0
+#define DPSECI_RIP_SIZE 1
+#define DPSECI_OPR_ENABLE_SHIFT 1
+#define DPSECI_OPR_ENABLE_SIZE 1
+#define DPSECI_TSEQ_NLIS_SHIFT 0
+#define DPSECI_TSEQ_NLIS_SIZE 1
+#define DPSECI_HSEQ_NLIS_SHIFT 0
+#define DPSECI_HSEQ_NLIS_SIZE 1
+
+struct dpseci_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
+
#define DPSECI_DEST_TYPE_SHIFT 0
#define DPSECI_DEST_TYPE_SIZE 4
#define DPSECI_CG_UNITS_SHIFT 4
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index 01afc587..8fa4827e 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
diff --git a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
index 8591cc0b..0bfb986d 100644
--- a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
+++ b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -2,3 +2,11 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_18.11 {
+ global:
+
+ dpaa2_sec_eventq_attach;
+ dpaa2_sec_eventq_detach;
+
+} DPDK_17.05;
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index 9be44704..5ce95c23 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -11,7 +11,6 @@ LIB = librte_pmd_dpaa_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
-CFLAGS += -D _GNU_SOURCE
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
@@ -38,5 +37,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
LDLIBS += -lrte_bus_dpaa
LDLIBS += -lrte_mempool_dpaa
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f571050b..d83e7454 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -23,6 +23,7 @@
#include <rte_mbuf.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
+#include <rte_spinlock.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -106,6 +107,12 @@ dpaa_mem_vtop(void *vaddr)
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
+ void *va;
+
+ va = (void *)dpaax_iova_table_get_va(paddr);
+ if (likely(va))
+ return va;
+
return rte_mem_iova2virt(paddr);
}
@@ -274,6 +281,9 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
{
switch (ses->auth_alg) {
case RTE_CRYPTO_AUTH_NULL:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_NULL : 0;
ses->digest_length = 0;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
@@ -322,6 +332,9 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
{
switch (ses->cipher_alg) {
case RTE_CRYPTO_CIPHER_NULL:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_NULL : 0;
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
alginfo_c->algtype =
@@ -359,6 +372,87 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
}
}
+/* prepare ipsec proto command block of the session */
+static int
+dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo cipherdata = {0}, authdata = {0};
+ struct sec_cdb *cdb = &ses->cdb;
+ int32_t shared_desc_len = 0;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ caam_cipher_alg(ses, &cipherdata);
+ if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ cipherdata.key = (size_t)ses->cipher_key.data;
+ cipherdata.keylen = ses->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &authdata);
+ if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ authdata.key = (size_t)ses->auth_key.data;
+ authdata.keylen = ses->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = cipherdata.keylen;
+ cdb->sh_desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ cipherdata.key_type = RTA_DATA_IMM;
+ else {
+ cipherdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ authdata.key_type = RTA_DATA_IMM;
+ else {
+ authdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->decap_pdb,
+ &cipherdata, &authdata);
+ }
+ return shared_desc_len;
+}
/* prepare command block of the session */
static int
@@ -376,7 +470,9 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
memset(cdb, 0, sizeof(struct sec_cdb));
- if (is_cipher_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
+ } else if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
DPAA_SEC_ERR("not supported cipher alg");
@@ -484,28 +580,13 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
cdb->sh_desc[0] = 0;
cdb->sh_desc[1] = 0;
cdb->sh_desc[2] = 0;
- if (is_proto_ipsec(ses)) {
- if (ses->dir == DIR_ENC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_encap(
- cdb->sh_desc,
- true, swap, &ses->encap_pdb,
- (uint8_t *)&ses->ip4_hdr,
- &alginfo_c, &alginfo_a);
- } else if (ses->dir == DIR_DEC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_decap(
- cdb->sh_desc,
- true, swap, &ses->decap_pdb,
- &alginfo_c, &alginfo_a);
- }
- } else {
- /* Auth_only_len is set as 0 here and it will be
- * overwritten in fd for each packet.
- */
- shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
- true, swap, &alginfo_c, &alginfo_a,
- ses->iv.length, 0,
- ses->digest_length, ses->dir);
- }
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
}
if (shared_desc_len < 0) {
@@ -1445,20 +1526,26 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
nb_ops = loop;
goto send_pkts;
}
- if (unlikely(!ses->qp || ses->qp != qp)) {
- DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
- ses->qp, qp);
+ if (unlikely(!ses->qp)) {
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
+ } else if (unlikely(ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+ " New qp = %p\n", ses->qp, qp);
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
}
auth_only_len = op->sym->auth.data.length -
op->sym->cipher.data.length;
if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- if (is_auth_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else if (is_auth_only(ses)) {
cf = build_auth_only(op, ses);
} else if (is_cipher_only(ses)) {
cf = build_cipher_only(op, ses);
@@ -1467,8 +1554,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
auth_only_len = ses->auth_only_len;
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth(op, ses);
- } else if (is_proto_ipsec(ses)) {
- cf = build_proto(op, ses);
} else {
DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
@@ -1760,6 +1845,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
+ memset(session, 0, sizeof(dpaa_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -1807,7 +1893,9 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
@@ -1888,111 +1976,86 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
{
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
dpaa_sec_session *session = (dpaa_sec_session *)sess;
PMD_INIT_FUNC_TRACE();
+ memset(session, 0, sizeof(dpaa_sec_session));
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
} else {
auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
}
session->proto_alg = conf->protocol;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for cipher key");
- return -ENOMEM;
- }
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- return -ENOMEM;
+ if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_key.length = cipher_xform->key.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ }
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
- break;
- case RTE_CRYPTO_AUTH_AES_CMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
- break;
- case RTE_CRYPTO_AUTH_NULL:
+ if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_key.length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ }
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
- }
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
- goto out;
}
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
@@ -2020,7 +2083,8 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
session->encap_pdb.spi = ipsec_xform->spi;
session->encap_pdb.ip_hdr_len = sizeof(struct ip);
@@ -2033,7 +2097,9 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
} else
goto out;
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto out;
@@ -2204,7 +2270,7 @@ dpaa_sec_capabilities_get(void *device __rte_unused)
return dpaa_sec_security_cap;
}
-struct rte_security_ops dpaa_sec_security_ops = {
+static const struct rte_security_ops dpaa_sec_security_ops = {
.session_create = dpaa_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2284,6 +2350,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
security_instance->sess_cnt = 0;
cryptodev->security_ctx = security_instance;
+ rte_spinlock_init(&internals->lock);
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
/* init qman fq for queue pair */
qp = &internals->qps[i];
@@ -2316,7 +2383,7 @@ init_error:
}
static int
-cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2344,7 +2411,6 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
dpaa_dev->crypto_dev = cryptodev;
cryptodev->device = &dpaa_dev->device;
- cryptodev->device->driver = &dpaa_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index ac6c00a6..f4b87844 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -137,7 +137,7 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_DPAA_MAX_NB_SEC_QPS 8
+#define RTE_DPAA_MAX_NB_SEC_QPS 2
#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
@@ -150,6 +150,7 @@ struct dpaa_sec_dev_private {
unsigned char inq_attach[RTE_DPAA_MAX_RX_QUEUE];
unsigned int max_nb_queue_pairs;
unsigned int max_nb_sessions;
+ rte_spinlock_t lock;
};
#define MAX_SG_ENTRIES 16
diff --git a/drivers/crypto/kasumi/meson.build b/drivers/crypto/kasumi/meson.build
new file mode 100644
index 00000000..a09b0e25
--- /dev/null
+++ b/drivers/crypto/kasumi/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('libsso_kasumi', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_kasumi_pmd.c', 'rte_kasumi_pmd_ops.c')
+deps += ['bus_vdev']
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index d64ca418..bf1bd928 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
- 'null', 'openssl', 'qat', 'virtio']
+drivers = ['aesni_gcm', 'aesni_mb', 'caam_jr', 'ccp', 'dpaa_sec', 'dpaa2_sec',
+ 'kasumi', 'mvsam', 'null', 'octeontx', 'openssl', 'qat', 'scheduler',
+ 'virtio', 'zuc']
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile
index c3dc72c1..2b4d036c 100644
--- a/drivers/crypto/mvsam/Makefile
+++ b/drivers/crypto/mvsam/Makefile
@@ -19,6 +19,7 @@ LIB = librte_pmd_mvsam_crypto.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(RTE_SDK)/drivers/common/mvep
CFLAGS += -I$(LIBMUSDK_PATH)/include
CFLAGS += -DMVCONF_TYPES_PUBLIC
CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
@@ -31,9 +32,9 @@ EXPORT_MAP := rte_pmd_mvsam_version.map
# external library dependencies
LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
-LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_kvargs
LDLIBS += -lrte_cryptodev
-LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_bus_vdev -lrte_common_mvep
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build
index 3c8ea3cf..f1c87966 100644
--- a/drivers/crypto/mvsam/meson.build
+++ b/drivers/crypto/mvsam/meson.build
@@ -18,4 +18,4 @@ endif
sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
-deps += ['bus_vdev']
+deps += ['bus_vdev', 'common_mvep']
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 73eff757..c2ae82a2 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -11,11 +11,11 @@
#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <rte_kvargs.h>
+#include <rte_mvep_common.h>
#include "rte_mrvl_pmd_private.h"
-#define MRVL_MUSDK_DMA_MEMSIZE 41943040
-
#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions")
#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048
@@ -68,6 +68,9 @@ __rte_aligned(32);
*/
static const
struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_NONE },
[RTE_CRYPTO_CIPHER_3DES_CBC] = {
.supported = ALGO_SUPPORTED,
.cipher_alg = SAM_CIPHER_3DES,
@@ -93,6 +96,11 @@ struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
.cipher_alg = SAM_CIPHER_AES,
.cipher_mode = SAM_CIPHER_CTR,
.max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(256) },
};
/**
@@ -100,6 +108,9 @@ struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
*/
static const
struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_NULL] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_NONE },
[RTE_CRYPTO_AUTH_MD5_HMAC] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HMAC_MD5 },
@@ -112,6 +123,9 @@ struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
[RTE_CRYPTO_AUTH_SHA1] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_224 },
[RTE_CRYPTO_AUTH_SHA224] = {
.supported = ALGO_SUPPORTED,
.auth_alg = SAM_AUTH_HASH_SHA2_224 },
@@ -210,7 +224,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
@@ -218,7 +232,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
(cipher_map[cipher_xform->cipher.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ MRVL_LOG(ERR, "Cipher algorithm not supported!");
return -EINVAL;
}
@@ -238,7 +252,7 @@ mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
/* Get max key length. */
if (cipher_xform->cipher.key.length >
cipher_map[cipher_xform->cipher.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
@@ -261,14 +275,14 @@ mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
/* See if map data is present and valid */
if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
(auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ MRVL_LOG(ERR, "Auth algorithm not supported!");
return -EINVAL;
}
@@ -300,7 +314,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
{
/* Make sure we've got proper struct */
if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
- MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ MRVL_LOG(ERR, "Wrong xform struct provided!");
return -EINVAL;
}
@@ -308,7 +322,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
(aead_map[aead_xform->aead.algo].supported
!= ALGO_SUPPORTED)) {
- MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ MRVL_LOG(ERR, "AEAD algorithm not supported!");
return -EINVAL;
}
@@ -326,7 +340,7 @@ mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
/* Get max key length. */
if (aead_xform->aead.key.length >
aead_map[aead_xform->aead.algo].max_key_len) {
- MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ MRVL_LOG(ERR, "Wrong key length!");
return -EINVAL;
}
@@ -391,21 +405,21 @@ mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
if ((cipher_xform != NULL) &&
(mrvl_crypto_set_cipher_session_parameters(
sess, cipher_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported cipher parameters!");
return -EINVAL;
}
if ((auth_xform != NULL) &&
(mrvl_crypto_set_auth_session_parameters(
sess, auth_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported auth parameters!");
return -EINVAL;
}
if ((aead_xform != NULL) &&
(mrvl_crypto_set_aead_session_parameters(
sess, aead_xform) < 0)) {
- MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ MRVL_LOG(ERR, "Invalid/unsupported aead parameters!");
return -EINVAL;
}
@@ -437,12 +451,14 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
struct rte_crypto_op *op)
{
struct mrvl_crypto_session *sess;
- struct rte_mbuf *dst_mbuf;
+ struct rte_mbuf *src_mbuf, *dst_mbuf;
+ uint16_t segments_nb;
uint8_t *digest;
+ int i;
if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
- MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
- "oriented requests, op (%p) is sessionless.",
+ MRVL_LOG(ERR, "MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless!",
op);
return -EINVAL;
}
@@ -450,39 +466,56 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
sess = (struct mrvl_crypto_session *)get_sym_session_private_data(
op->sym->session, cryptodev_driver_id);
if (unlikely(sess == NULL)) {
- MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ MRVL_LOG(ERR, "Session was not created for this device!");
return -EINVAL;
}
- /*
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ src_mbuf = op->sym->m_src;
+ segments_nb = src_mbuf->nb_segs;
+ /* The following conditions must be met:
+ * - Destination buffer is required when segmented source buffer
+ * - Segmented destination buffer is not supported
+ */
+ if ((segments_nb > 1) && (!op->sym->m_dst)) {
+ MRVL_LOG(ERR, "op->sym->m_dst = NULL!");
+ return -1;
+ }
+ /* For non SG case:
* If application delivered us null dst buffer, it means it expects
* us to deliver the result in src buffer.
*/
dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
- request->sa = sess->sam_sess;
- request->cookie = op;
-
- /* Single buffers only, sorry. */
- request->num_bufs = 1;
- request->src = src_bd;
- src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
- src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
- src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
-
- /* Empty source. */
- if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
- /* EIP does not support 0 length buffers. */
- MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ if (!rte_pktmbuf_is_contiguous(dst_mbuf)) {
+ MRVL_LOG(ERR, "Segmented destination buffer not supported!");
return -1;
}
+ request->num_bufs = segments_nb;
+ for (i = 0; i < segments_nb; i++) {
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(src_mbuf) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_LOG(ERR, "Buffer length == 0 not supported!");
+ return -1;
+ }
+ src_bd[i].vaddr = rte_pktmbuf_mtod(src_mbuf, void *);
+ src_bd[i].paddr = rte_pktmbuf_iova(src_mbuf);
+ src_bd[i].len = rte_pktmbuf_data_len(src_mbuf);
+
+ src_mbuf = src_mbuf->next;
+ }
+ request->src = src_bd;
+
/* Empty destination. */
if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
/* Make dst buffer fit at least source data. */
if (rte_pktmbuf_append(dst_mbuf,
rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
- MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ MRVL_LOG(ERR, "Unable to set big enough dst buffer!");
return -1;
}
}
@@ -527,7 +560,7 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
/*
* EIP supports only scenarios where ICV(digest buffer) is placed at
- * auth_icv_offset. Any other placement means risking errors.
+ * auth_icv_offset.
*/
if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
/*
@@ -536,17 +569,36 @@ mrvl_request_prepare(struct sam_cio_op_params *request,
*/
if (rte_pktmbuf_mtod_offset(
dst_mbuf, uint8_t *,
- request->auth_icv_offset) == digest) {
+ request->auth_icv_offset) == digest)
return 0;
- }
} else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
/*
* EIP will look for digest at auth_icv_offset
- * offset in SRC buffer.
+ * offset in SRC buffer. It must be placed in the last
+ * segment and the offset must be set to reach digest
+ * in the last segment
*/
- if (rte_pktmbuf_mtod_offset(
- op->sym->m_src, uint8_t *,
- request->auth_icv_offset) == digest) {
+ struct rte_mbuf *last_seg = op->sym->m_src;
+ uint32_t d_offset = request->auth_icv_offset;
+ u32 d_size = sess->sam_sess_params.u.basic.auth_icv_len;
+ unsigned char *d_ptr;
+
+ /* Find the last segment and the offset for the last segment */
+ while ((last_seg->next != NULL) &&
+ (d_offset >= last_seg->data_len)) {
+ d_offset -= last_seg->data_len;
+ last_seg = last_seg->next;
+ }
+
+ if (rte_pktmbuf_mtod_offset(last_seg, uint8_t *,
+ d_offset) == digest)
+ return 0;
+
+ /* copy digest to last segment */
+ if (last_seg->buf_len >= (d_size + d_offset)) {
+ d_ptr = (unsigned char *)last_seg->buf_addr +
+ d_offset;
+ rte_memcpy(d_ptr, digest, d_size);
return 0;
}
}
@@ -582,11 +634,10 @@ mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
int ret;
struct sam_cio_op_params requests[nb_ops];
/*
- * DPDK uses single fragment buffers, so we can KISS descriptors.
* SAM does not store bd pointers, so on-stack scope will be enough.
*/
- struct sam_buf_info src_bd[nb_ops];
- struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_src_table src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
if (nb_ops == 0)
@@ -594,15 +645,17 @@ mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
/* Prepare the burst. */
memset(&requests, 0, sizeof(requests));
+ memset(&src_bd, 0, sizeof(src_bd));
/* Iterate through */
for (; iter_ops < nb_ops; ++iter_ops) {
+ /* store the op id for debug */
+ src_bd[iter_ops].iter_ops = iter_ops;
if (mrvl_request_prepare(&requests[iter_ops],
- &src_bd[iter_ops],
+ src_bd[iter_ops].src_bd,
&dst_bd[iter_ops],
ops[iter_ops]) < 0) {
- MRVL_CRYPTO_LOG_ERR(
- "Error while parameters preparation!");
+ MRVL_LOG(ERR, "Error while preparing parameters!");
qp->stats.enqueue_err_count++;
ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -680,12 +733,12 @@ mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
break;
case SAM_CIO_ERR_ICV:
- MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ MRVL_LOG(DEBUG, "CIO returned SAM_CIO_ERR_ICV.");
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
break;
default:
- MRVL_CRYPTO_LOG_DBG(
- "CIO returned Error: %d", results[i].status);
+ MRVL_LOG(DEBUG,
+ "CIO returned Error: %d.", results[i].status);
ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
break;
}
@@ -711,12 +764,12 @@ cryptodev_mrvl_crypto_create(const char *name,
struct rte_cryptodev *dev;
struct mrvl_crypto_private *internals;
struct sam_init_params sam_params;
- int ret;
+ int ret = -EINVAL;
dev = rte_cryptodev_pmd_create(name, &vdev->device,
&init_params->common);
if (dev == NULL) {
- MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ MRVL_LOG(ERR, "Failed to create cryptodev vdev!");
goto init_error;
}
@@ -729,7 +782,9 @@ cryptodev_mrvl_crypto_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_HW_ACCELERATED;
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
/* Set vector instructions mode supported */
internals = dev->data->dev_private;
@@ -737,29 +792,26 @@ cryptodev_mrvl_crypto_create(const char *name,
internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
- /*
- * ret == -EEXIST is correct, it means DMA
- * has been already initialized.
- */
- ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
- if (ret < 0) {
- if (ret != -EEXIST)
- return ret;
-
- MRVL_CRYPTO_LOG_INFO(
- "DMA memory has been already initialized by a different driver.");
- }
+ ret = rte_mvep_init(MVEP_MOD_T_SAM, NULL);
+ if (ret)
+ goto init_error;
sam_params.max_num_sessions = internals->max_nb_sessions;
- return sam_init(&sam_params);
+ /* sam_set_debug_flags(3); */
+
+ ret = sam_init(&sam_params);
+ if (ret)
+ goto init_error;
+
+ return 0;
init_error:
- MRVL_CRYPTO_LOG_ERR(
- "driver %s: %s failed", init_params->common.name, __func__);
+ MRVL_LOG(ERR,
+ "Driver %s: %s failed!", init_params->common.name, __func__);
cryptodev_mrvl_crypto_uninit(vdev);
- return -EFAULT;
+ return ret;
}
/** Parse integer from integer argument */
@@ -771,7 +823,7 @@ parse_integer_arg(const char *key __rte_unused,
*i = atoi(value);
if (*i < 0) {
- MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n");
+ MRVL_LOG(ERR, "Argument has to be positive!");
return -EINVAL;
}
@@ -786,9 +838,8 @@ parse_name_arg(const char *key __rte_unused,
struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
- MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than "
- "%u bytes.\n", value,
- RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ MRVL_LOG(ERR, "Invalid name %s, should be less than %u bytes!",
+ value, RTE_CRYPTODEV_NAME_MAX_LEN - 1);
return -EINVAL;
}
@@ -864,7 +915,7 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
.private_data_size =
sizeof(struct mrvl_crypto_private),
.max_nb_queue_pairs =
- sam_get_num_inst() * SAM_HW_RING_NUM,
+ sam_get_num_inst() * sam_get_num_cios(0),
.socket_id = rte_socket_id()
},
.max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS
@@ -880,9 +931,8 @@ cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
ret = mrvl_pmd_parse_input_args(&init_params, args);
if (ret) {
- RTE_LOG(ERR, PMD,
- "Failed to parse initialisation arguments[%s]\n",
- args);
+ MRVL_LOG(ERR, "Failed to parse initialisation arguments[%s]!",
+ args);
return -EINVAL;
}
@@ -904,11 +954,11 @@ cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD,
- "Closing Marvell crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ MRVL_LOG(INFO, "Closing Marvell crypto device %s on numa socket %u.",
+ name, rte_socket_id());
sam_deinit();
+ rte_mvep_deinit(MVEP_MOD_T_SAM);
cryptodev = rte_cryptodev_pmd_get_named_dev(name);
if (cryptodev == NULL)
@@ -935,3 +985,8 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(crypto_mrvl_init_log)
+{
+ mrvl_logtype_driver = rte_log_register("pmd.crypto.mvsam");
+}
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index c045562c..9956f051 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -30,9 +30,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -50,9 +50,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 16,
+ .min = 12,
.max = 16,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -70,9 +70,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -90,8 +90,29 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 20,
+ .min = 12,
.max = 20,
+ .increment = 4
+ },
+ }, }
+ }, }
+ },
+ {
+ /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
.increment = 0
},
}, }
@@ -110,9 +131,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 28,
+ .min = 12,
.max = 28,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -130,9 +151,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -150,9 +171,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 32,
+ .min = 12,
.max = 32,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -170,9 +191,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -190,9 +211,9 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 48,
+ .min = 12,
.max = 48,
- .increment = 0
+ .increment = 4
},
}, }
}, }
@@ -210,9 +231,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 64,
- .max = 64,
- .increment = 0
+ .min = 12,
+ .max = 48,
+ .increment = 4
},
}, }
}, }
@@ -230,8 +251,8 @@ static const struct rte_cryptodev_capabilities
.increment = 0
},
.digest_size = {
- .min = 64,
- .max = 64,
+ .min = 12,
+ .max = 48,
.increment = 0
},
}, }
@@ -277,6 +298,26 @@ static const struct rte_cryptodev_capabilities
}, }
}, }
},
+ { /* AES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* AES GCM */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -372,6 +413,71 @@ static const struct rte_cryptodev_capabilities
}, }
}, }
},
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -551,7 +657,7 @@ mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
*/
int num = sam_get_num_inst();
if (num == 0) {
- MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ MRVL_LOG(ERR, "No crypto engines detected!");
return -1;
}
@@ -635,7 +741,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
int ret;
if (sess == NULL) {
- MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ MRVL_LOG(ERR, "Invalid session struct!");
return -EINVAL;
}
@@ -646,7 +752,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
if (ret != 0) {
- MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+ MRVL_LOG(ERR, "Failed to configure session parameters!");
/* Return session to mempool */
rte_mempool_put(mp, sess_private_data);
@@ -658,7 +764,7 @@ mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev,
mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
if (sam_session_create(&mrvl_sess->sam_sess_params,
&mrvl_sess->sam_sess) < 0) {
- MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ MRVL_LOG(DEBUG, "Failed to create session!");
return -EIO;
}
@@ -686,7 +792,7 @@ mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
if (mrvl_sess->sam_sess &&
sam_session_destroy(mrvl_sess->sam_sess) < 0) {
- MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ MRVL_LOG(ERR, "Error while destroying session!");
}
memset(sess, 0, sizeof(struct mrvl_crypto_session));
diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
index c16d95b4..6f8cf562 100644
--- a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -12,32 +12,21 @@
#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
/**< Marvell PMD device name */
-#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
-#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
- __func__, __LINE__, ## args)
-
-#else
-#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
-#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
-#endif
+/** MRVL PMD LOGTYPE DRIVER */
+int mrvl_logtype_driver;
+
+#define MRVL_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, mrvl_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
/**
* Handy bits->bytes conversion macro.
*/
#define BITS2BYTES(x) ((x) >> 3)
+#define MRVL_MAX_SEGMENTS 16
+
/** The operation order mode enumerator. */
enum mrvl_crypto_chain_order {
MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
@@ -84,6 +73,11 @@ struct mrvl_crypto_session {
uint16_t cipher_iv_offset;
} __rte_cache_aligned;
+struct mrvl_crypto_src_table {
+ uint16_t iter_ops;
+ struct sam_buf_info src_bd[MRVL_MAX_SEGMENTS];
+} __rte_cache_aligned;
+
/** Set and validate MRVL crypto session parameters */
extern int
mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index bb2b6e14..2bdcd019 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -308,7 +308,7 @@ null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev,
}
}
-struct rte_cryptodev_ops pmd_ops = {
+static struct rte_cryptodev_ops pmd_ops = {
.dev_configure = null_crypto_pmd_config,
.dev_start = null_crypto_pmd_start,
.dev_stop = null_crypto_pmd_stop,
diff --git a/drivers/crypto/octeontx/Makefile b/drivers/crypto/octeontx/Makefile
new file mode 100644
index 00000000..2e78e69b
--- /dev/null
+++ b/drivers/crypto/octeontx/Makefile
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_octeontx_crypto.a
+
+# library version
+LIBABIVER := 1
+
+# build flags
+CFLAGS += $(WERROR_FLAGS)
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_common_cpt
+
+VPATH += $(RTE_SDK)/drivers/crypto/octeontx
+
+CFLAGS += -O3 -DCPT_MODEL=CRYPTO_OCTEONTX
+CFLAGS += -I$(RTE_SDK)/drivers/common/cpt
+
+# PMD code
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_capabilities.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_hw_access.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += otx_cryptodev_ops.c
+
+# export include files
+SYMLINK-y-include +=
+
+# versioning export map
+EXPORT_MAP := rte_pmd_octeontx_crypto_version.map
+
+# library dependencies
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_eal
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_cryptodev
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_mempool
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_mbuf
+DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_CRYPTO) += lib/librte_malloc
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/octeontx/meson.build b/drivers/crypto/octeontx/meson.build
new file mode 100644
index 00000000..6511b402
--- /dev/null
+++ b/drivers/crypto/octeontx/meson.build
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Cavium, Inc
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_pci']
+deps += ['common_cpt']
+name = 'octeontx_crypto'
+
+sources = files('otx_cryptodev.c',
+ 'otx_cryptodev_capabilities.c',
+ 'otx_cryptodev_hw_access.c',
+ 'otx_cryptodev_mbox.c',
+ 'otx_cryptodev_ops.c')
+
+includes += include_directories('../../common/cpt')
+cflags += '-DCPT_MODEL=CRYPTO_OCTEONTX'
diff --git a/drivers/crypto/octeontx/otx_cryptodev.c b/drivers/crypto/octeontx/otx_cryptodev.c
new file mode 100644
index 00000000..269f0456
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev.c
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_common.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_log.h>
+#include <rte_pci.h>
+
+/* CPT common headers */
+#include "cpt_pmd_logs.h"
+
+#include "otx_cryptodev.h"
+#include "otx_cryptodev_ops.h"
+
+static int otx_cryptodev_logtype;
+
+static struct rte_pci_id pci_id_cpt_table[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID),
+ },
+ /* sentinel */
+ {
+ .device_id = 0
+ },
+};
+
+static void
+otx_cpt_logtype_init(void)
+{
+ cpt_logtype = otx_cryptodev_logtype;
+}
+
+static int
+otx_cpt_pci_probe(struct rte_pci_driver *pci_drv,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ int retval;
+
+ if (pci_drv == NULL)
+ return -ENODEV;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ cryptodev = rte_cryptodev_pmd_allocate(name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ cryptodev->device = &pci_dev->device;
+ cryptodev->device->driver = &pci_drv->driver;
+ cryptodev->driver_id = otx_cryptodev_driver_id;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* init logtype used in common */
+ otx_cpt_logtype_init();
+
+ /* Invoke PMD device initialization function */
+ retval = otx_cpt_dev_create(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ CPT_LOG_ERR("[DRV %s]: Failed to create device "
+ "(vendor_id: 0x%x device_id: 0x%x",
+ pci_drv->driver.name,
+ (unsigned int) pci_dev->id.vendor_id,
+ (unsigned int) pci_dev->id.device_id);
+
+ cryptodev->attached = RTE_CRYPTODEV_DETACHED;
+
+ return -ENXIO;
+}
+
+static int
+otx_cpt_pci_remove(struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ if (pci_dev->driver == NULL)
+ return -ENODEV;
+
+ /* free crypto device */
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ cryptodev->device = NULL;
+ cryptodev->device->driver = NULL;
+ cryptodev->data = NULL;
+
+ /* free metapool memory */
+ cleanup_global_resources();
+
+ return 0;
+}
+
+static struct rte_pci_driver otx_cryptodev_pmd = {
+ .id_table = pci_id_cpt_table,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = otx_cpt_pci_probe,
+ .remove = otx_cpt_pci_remove,
+};
+
+static struct cryptodev_driver otx_cryptodev_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_OCTEONTX_PMD, otx_cryptodev_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_OCTEONTX_PMD, pci_id_cpt_table);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(otx_cryptodev_drv, otx_cryptodev_pmd.driver,
+ otx_cryptodev_driver_id);
+
+RTE_INIT(otx_cpt_init_log)
+{
+ /* Bus level logs */
+ otx_cryptodev_logtype = rte_log_register("pmd.crypto.octeontx");
+ if (otx_cryptodev_logtype >= 0)
+ rte_log_set_level(otx_cryptodev_logtype, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev.h b/drivers/crypto/octeontx/otx_cryptodev.h
new file mode 100644
index 00000000..6c2871d7
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_H_
+#define _OTX_CRYPTODEV_H_
+
+/* Cavium OCTEON TX crypto PMD device name */
+#define CRYPTODEV_NAME_OCTEONTX_PMD crypto_octeontx
+
+/* Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177d
+#define CPT_81XX_PCI_VF_DEVICE_ID 0xa041
+
+/*
+ * Crypto device driver ID
+ */
+uint8_t otx_cryptodev_driver_id;
+
+#endif /* _OTX_CRYPTODEV_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_capabilities.c b/drivers/crypto/octeontx/otx_cryptodev_capabilities.c
new file mode 100644
index 00000000..946571cf
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_capabilities.c
@@ -0,0 +1,604 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_cryptodev.h>
+
+#include "otx_cryptodev_capabilities.h"
+
+static const struct rte_cryptodev_capabilities otx_capabilities[] = {
+ /* Symmetric capabilities */
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ }, },
+ }, },
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F9) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_KASUMI_F9,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 8,
+ .max = 64,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 16,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 20,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 28,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 32,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 48,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 8
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_ECB,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES XTS */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_XTS,
+ .block_size = 16,
+ .key_size = {
+ .min = 32,
+ .max = 64,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* KASUMI (F8) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_KASUMI_F8,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 1024,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ /* End of symmetric capabilities */
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+const struct rte_cryptodev_capabilities *
+otx_get_capabilities(void)
+{
+ return otx_capabilities;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_capabilities.h b/drivers/crypto/octeontx/otx_cryptodev_capabilities.h
new file mode 100644
index 00000000..fc62821b
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_capabilities.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_CAPABILITIES_H_
+#define _OTX_CRYPTODEV_CAPABILITIES_H_
+
+#include <rte_cryptodev.h>
+
+/*
+ * Get capabilities list for the device
+ *
+ */
+const struct rte_cryptodev_capabilities *
+otx_get_capabilities(void);
+
+#endif /* _OTX_CRYPTODEV_CAPABILITIES_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.c b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
new file mode 100644
index 00000000..5e705a83
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.c
@@ -0,0 +1,598 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+#include <assert.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_memzone.h>
+
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_mbox.h"
+
+#include "cpt_pmd_logs.h"
+#include "cpt_hw_types.h"
+
+/*
+ * VF HAL functions
+ * Access its own BAR0/4 registers by passing VF number as 0.
+ * OS/PCI maps them accordingly.
+ */
+
+static int
+otx_cpt_vf_init(struct cpt_vf *cptvf)
+{
+ int ret = 0;
+
+ /* Check ready with PF */
+ /* Gets chip ID / device Id from PF if ready */
+ ret = otx_cpt_check_pf_ready(cptvf);
+ if (ret) {
+ CPT_LOG_ERR("%s: PF not responding to READY msg",
+ cptvf->dev_name);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
+
+exit:
+ return ret;
+}
+
+/*
+ * Read Interrupt status of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static uint64_t
+otx_cpt_read_vf_misc_intr_status(struct cpt_vf *cptvf)
+{
+ return CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf), CPTX_VQX_MISC_INT(0, 0));
+}
+
+/*
+ * Clear mailbox interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_mbox_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.mbox = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear instruction NCB read error interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_irde_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.irde = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear NCB result write response error interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_nwrp_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.nwrp = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear swerr interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_swerr_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.swerr = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear hwerr interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_hwerr_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.hwerr = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear translation fault interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_fault_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.fault = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/*
+ * Clear doorbell overflow interrupt of the VF
+ *
+ * @param cptvf cptvf structure
+ */
+static void
+otx_cpt_clear_dovf_intr(struct cpt_vf *cptvf)
+{
+ cptx_vqx_misc_int_t vqx_misc_int;
+
+ vqx_misc_int.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0));
+ /* W1C for the VF */
+ vqx_misc_int.s.dovf = 1;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u);
+}
+
+/* Write to VQX_CTL register
+ */
+static void
+otx_cpt_write_vq_ctl(struct cpt_vf *cptvf, bool val)
+{
+ cptx_vqx_ctl_t vqx_ctl;
+
+ vqx_ctl.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_CTL(0, 0));
+ vqx_ctl.s.ena = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_CTL(0, 0), vqx_ctl.u);
+}
+
+/* Write to VQX_INPROG register
+ */
+static void
+otx_cpt_write_vq_inprog(struct cpt_vf *cptvf, uint8_t val)
+{
+ cptx_vqx_inprog_t vqx_inprg;
+
+ vqx_inprg.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_INPROG(0, 0));
+ vqx_inprg.s.inflight = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_INPROG(0, 0), vqx_inprg.u);
+}
+
+/* Write to VQX_DONE_WAIT NUMWAIT register
+ */
+static void
+otx_cpt_write_vq_done_numwait(struct cpt_vf *cptvf, uint32_t val)
+{
+ cptx_vqx_done_wait_t vqx_dwait;
+
+ vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0));
+ vqx_dwait.s.num_wait = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
+}
+
+/* Write to VQX_DONE_WAIT NUM_WAIT register
+ */
+static void
+otx_cpt_write_vq_done_timewait(struct cpt_vf *cptvf, uint16_t val)
+{
+ cptx_vqx_done_wait_t vqx_dwait;
+
+ vqx_dwait.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0));
+ vqx_dwait.s.time_wait = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u);
+}
+
+/* Write to VQX_SADDR register
+ */
+static void
+otx_cpt_write_vq_saddr(struct cpt_vf *cptvf, uint64_t val)
+{
+ cptx_vqx_saddr_t vqx_saddr;
+
+ vqx_saddr.u = val;
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_SADDR(0, 0), vqx_saddr.u);
+}
+
+static void
+otx_cpt_vfvq_init(struct cpt_vf *cptvf)
+{
+ uint64_t base_addr = 0;
+
+ /* Disable the VQ */
+ otx_cpt_write_vq_ctl(cptvf, 0);
+
+ /* Reset the doorbell */
+ otx_cpt_write_vq_doorbell(cptvf, 0);
+ /* Clear inflight */
+ otx_cpt_write_vq_inprog(cptvf, 0);
+
+ /* Write VQ SADDR */
+ base_addr = (uint64_t)(cptvf->cqueue.chead[0].dma_addr);
+ otx_cpt_write_vq_saddr(cptvf, base_addr);
+
+ /* Configure timerhold / coalescence */
+ otx_cpt_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD);
+ otx_cpt_write_vq_done_numwait(cptvf, CPT_COUNT_THOLD);
+
+ /* Enable the VQ */
+ otx_cpt_write_vq_ctl(cptvf, 1);
+}
+
+static int
+cpt_vq_init(struct cpt_vf *cptvf, uint8_t group)
+{
+ int err;
+
+ /* Convey VQ LEN to PF */
+ err = otx_cpt_send_vq_size_msg(cptvf);
+ if (err) {
+ CPT_LOG_ERR("%s: PF not responding to QLEN msg",
+ cptvf->dev_name);
+ err = -EBUSY;
+ goto cleanup;
+ }
+
+ /* CPT VF device initialization */
+ otx_cpt_vfvq_init(cptvf);
+
+ /* Send msg to PF to assign currnet Q to required group */
+ cptvf->vfgrp = group;
+ err = otx_cpt_send_vf_grp_msg(cptvf, group);
+ if (err) {
+ CPT_LOG_ERR("%s: PF not responding to VF_GRP msg",
+ cptvf->dev_name);
+ err = -EBUSY;
+ goto cleanup;
+ }
+
+ CPT_LOG_DP_DEBUG("%s: %s done", cptvf->dev_name, __func__);
+ return 0;
+
+cleanup:
+ return err;
+}
+
+void
+otx_cpt_poll_misc(struct cpt_vf *cptvf)
+{
+ uint64_t intr;
+
+ intr = otx_cpt_read_vf_misc_intr_status(cptvf);
+
+ if (!intr)
+ return;
+
+ /* Check for MISC interrupt types */
+ if (likely(intr & CPT_VF_INTR_MBOX_MASK)) {
+ CPT_LOG_DP_DEBUG("%s: Mailbox interrupt 0x%lx on CPT VF %d",
+ cptvf->dev_name, (unsigned int long)intr, cptvf->vfid);
+ otx_cpt_handle_mbox_intr(cptvf);
+ otx_cpt_clear_mbox_intr(cptvf);
+ } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) {
+ otx_cpt_clear_irde_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Instruction NCB read error interrupt "
+ "0x%lx on CPT VF %d", cptvf->dev_name,
+ (unsigned int long)intr, cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) {
+ otx_cpt_clear_nwrp_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: NCB response write error interrupt 0x%lx"
+ " on CPT VF %d", cptvf->dev_name,
+ (unsigned int long)intr, cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_SWERR_MASK)) {
+ otx_cpt_clear_swerr_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Software error interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_HWERR_MASK)) {
+ otx_cpt_clear_hwerr_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Hardware error interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_FAULT_MASK)) {
+ otx_cpt_clear_fault_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Translation fault interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) {
+ otx_cpt_clear_dovf_intr(cptvf);
+ CPT_LOG_DP_DEBUG("%s: Doorbell overflow interrupt 0x%lx on CPT VF "
+ "%d", cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+ } else
+ CPT_LOG_DP_ERR("%s: Unhandled interrupt 0x%lx in CPT VF %d",
+ cptvf->dev_name, (unsigned int long)intr,
+ cptvf->vfid);
+}
+
+int
+otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name)
+{
+ memset(cptvf, 0, sizeof(struct cpt_vf));
+
+ /* Bar0 base address */
+ cptvf->reg_base = reg_base;
+ strncpy(cptvf->dev_name, name, 32);
+
+ cptvf->pdev = pdev;
+
+ /* To clear if there are any pending mbox msgs */
+ otx_cpt_poll_misc(cptvf);
+
+ if (otx_cpt_vf_init(cptvf)) {
+ CPT_LOG_ERR("Failed to initialize CPT VF device");
+ return -1;
+ }
+
+ return 0;
+}
+
+int
+otx_cpt_deinit_device(void *dev)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ /* Do misc work one last time */
+ otx_cpt_poll_misc(cptvf);
+
+ return 0;
+}
+
+int
+otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance)
+{
+ int ret = -ENOENT, len, qlen, i;
+ int chunk_len, chunks, chunk_size;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+ struct cpt_instance *cpt_instance;
+ struct command_chunk *chunk_head = NULL, *chunk_prev = NULL;
+ struct command_chunk *chunk = NULL;
+ uint8_t *mem;
+ const struct rte_memzone *rz;
+ uint64_t dma_addr = 0, alloc_len, used_len;
+ uint64_t *next_ptr;
+ uint64_t pg_sz = sysconf(_SC_PAGESIZE);
+
+ CPT_LOG_DP_DEBUG("Initializing cpt resource %s", cptvf->dev_name);
+
+ cpt_instance = &cptvf->instance;
+
+ memset(&cptvf->cqueue, 0, sizeof(cptvf->cqueue));
+ memset(&cptvf->pqueue, 0, sizeof(cptvf->pqueue));
+
+ /* Chunks are of fixed size buffers */
+ chunks = DEFAULT_CMD_QCHUNKS;
+ chunk_len = DEFAULT_CMD_QCHUNK_SIZE;
+
+ qlen = chunks * chunk_len;
+ /* Chunk size includes 8 bytes of next chunk ptr */
+ chunk_size = chunk_len * CPT_INST_SIZE + CPT_NEXT_CHUNK_PTR_SIZE;
+
+ /* For command chunk structures */
+ len = chunks * RTE_ALIGN(sizeof(struct command_chunk), 8);
+
+ /* For pending queue */
+ len += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+
+ /* So that instruction queues start as pg size aligned */
+ len = RTE_ALIGN(len, pg_sz);
+
+ /* For Instruction queues */
+ len += chunks * RTE_ALIGN(chunk_size, 128);
+
+ /* Wastage after instruction queues */
+ len = RTE_ALIGN(len, pg_sz);
+
+ rz = rte_memzone_reserve_aligned(cptvf->dev_name, len, cptvf->node,
+ RTE_MEMZONE_SIZE_HINT_ONLY |
+ RTE_MEMZONE_256MB,
+ RTE_CACHE_LINE_SIZE);
+ if (!rz) {
+ ret = rte_errno;
+ goto cleanup;
+ }
+
+ mem = rz->addr;
+ dma_addr = rz->phys_addr;
+ alloc_len = len;
+
+ memset(mem, 0, len);
+
+ cpt_instance->rsvd = (uintptr_t)rz;
+
+ /* Pending queue setup */
+ cptvf->pqueue.rid_queue = (struct rid *)mem;
+ cptvf->pqueue.enq_tail = 0;
+ cptvf->pqueue.deq_head = 0;
+ cptvf->pqueue.pending_count = 0;
+
+ mem += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ len -= qlen * RTE_ALIGN(sizeof(struct rid), 8);
+ dma_addr += qlen * RTE_ALIGN(sizeof(struct rid), 8);
+
+ /* Alignment wastage */
+ used_len = alloc_len - len;
+ mem += RTE_ALIGN(used_len, pg_sz) - used_len;
+ len -= RTE_ALIGN(used_len, pg_sz) - used_len;
+ dma_addr += RTE_ALIGN(used_len, pg_sz) - used_len;
+
+ /* Init instruction queues */
+ chunk_head = &cptvf->cqueue.chead[0];
+ i = qlen;
+
+ chunk_prev = NULL;
+ for (i = 0; i < DEFAULT_CMD_QCHUNKS; i++) {
+ int csize;
+
+ chunk = &cptvf->cqueue.chead[i];
+ chunk->head = mem;
+ chunk->dma_addr = dma_addr;
+
+ csize = RTE_ALIGN(chunk_size, 128);
+ mem += csize;
+ dma_addr += csize;
+ len -= csize;
+
+ if (chunk_prev) {
+ next_ptr = (uint64_t *)(chunk_prev->head +
+ chunk_size - 8);
+ *next_ptr = (uint64_t)chunk->dma_addr;
+ }
+ chunk_prev = chunk;
+ }
+ /* Circular loop */
+ next_ptr = (uint64_t *)(chunk_prev->head + chunk_size - 8);
+ *next_ptr = (uint64_t)chunk_head->dma_addr;
+
+ assert(!len);
+
+ /* This is used for CPT(0)_PF_Q(0..15)_CTL.size config */
+ cptvf->qsize = chunk_size / 8;
+ cptvf->cqueue.qhead = chunk_head->head;
+ cptvf->cqueue.idx = 0;
+ cptvf->cqueue.cchunk = 0;
+
+ if (cpt_vq_init(cptvf, group)) {
+ CPT_LOG_ERR("Failed to initialize CPT VQ of device %s",
+ cptvf->dev_name);
+ ret = -EBUSY;
+ goto cleanup;
+ }
+
+ *instance = cpt_instance;
+
+ CPT_LOG_DP_DEBUG("Crypto device (%s) initialized", cptvf->dev_name);
+
+ return 0;
+cleanup:
+ rte_memzone_free(rz);
+ *instance = NULL;
+ return ret;
+}
+
+int
+otx_cpt_put_resource(struct cpt_instance *instance)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct rte_memzone *rz;
+
+ if (!cptvf) {
+ CPT_LOG_ERR("Invalid CPTVF handle");
+ return -EINVAL;
+ }
+
+ CPT_LOG_DP_DEBUG("Releasing cpt device %s", cptvf->dev_name);
+
+ rz = (struct rte_memzone *)instance->rsvd;
+ rte_memzone_free(rz);
+ return 0;
+}
+
+int
+otx_cpt_start_device(void *dev)
+{
+ int rc;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ rc = otx_cpt_send_vf_up(cptvf);
+ if (rc) {
+ CPT_LOG_ERR("Failed to mark CPT VF device %s UP, rc = %d",
+ cptvf->dev_name, rc);
+ return -EFAULT;
+ }
+
+ if ((cptvf->vftype != SE_TYPE) && (cptvf->vftype != AE_TYPE)) {
+ CPT_LOG_ERR("Fatal error, unexpected vf type %u, for CPT VF "
+ "device %s", cptvf->vftype, cptvf->dev_name);
+ return -ENOENT;
+ }
+
+ return 0;
+}
+
+void
+otx_cpt_stop_device(void *dev)
+{
+ int rc;
+ uint32_t pending, retries = 5;
+ struct cpt_vf *cptvf = (struct cpt_vf *)dev;
+
+ /* Wait for pending entries to complete */
+ pending = otx_cpt_read_vq_doorbell(cptvf);
+ while (pending) {
+ CPT_LOG_DP_DEBUG("%s: Waiting for pending %u cmds to complete",
+ cptvf->dev_name, pending);
+ sleep(1);
+ pending = otx_cpt_read_vq_doorbell(cptvf);
+ retries--;
+ if (!retries)
+ break;
+ }
+
+ if (!retries && pending) {
+ CPT_LOG_ERR("%s: Timeout waiting for commands(%u)",
+ cptvf->dev_name, pending);
+ return;
+ }
+
+ rc = otx_cpt_send_vf_down(cptvf);
+ if (rc) {
+ CPT_LOG_ERR("Failed to bring down vf %s, rc %d",
+ cptvf->dev_name, rc);
+ return;
+ }
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_hw_access.h b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
new file mode 100644
index 00000000..82b15eea
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_hw_access.h
@@ -0,0 +1,320 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+#ifndef _OTX_CRYPTODEV_HW_ACCESS_H_
+#define _OTX_CRYPTODEV_HW_ACCESS_H_
+
+#include <stdbool.h>
+
+#include <rte_branch_prediction.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_memory.h>
+#include <rte_prefetch.h>
+
+#include "cpt_common.h"
+#include "cpt_hw_types.h"
+#include "cpt_mcode_defines.h"
+#include "cpt_pmd_logs.h"
+
+#define CPT_INTR_POLL_INTERVAL_MS (50)
+
+/* Default command queue length */
+#define DEFAULT_CMD_QCHUNKS 2
+#define DEFAULT_CMD_QCHUNK_SIZE 1023
+#define DEFAULT_CMD_QLEN \
+ (DEFAULT_CMD_QCHUNK_SIZE * DEFAULT_CMD_QCHUNKS)
+
+#define CPT_CSR_REG_BASE(cpt) ((cpt)->reg_base)
+
+/* Read hw register */
+#define CPT_READ_CSR(__hw_addr, __offset) \
+ rte_read64_relaxed((uint8_t *)__hw_addr + __offset)
+
+/* Write hw register */
+#define CPT_WRITE_CSR(__hw_addr, __offset, __val) \
+ rte_write64_relaxed((__val), ((uint8_t *)__hw_addr + __offset))
+
+/* cpt instance */
+struct cpt_instance {
+ uint32_t queue_id;
+ uintptr_t rsvd;
+};
+
+struct command_chunk {
+ /** 128-byte aligned real_vaddr */
+ uint8_t *head;
+ /** 128-byte aligned real_dma_addr */
+ phys_addr_t dma_addr;
+};
+
+/**
+ * Command queue structure
+ */
+struct command_queue {
+ /** Command queue host write idx */
+ uint32_t idx;
+ /** Command queue chunk */
+ uint32_t cchunk;
+ /** Command queue head; instructions are inserted here */
+ uint8_t *qhead;
+ /** Command chunk list head */
+ struct command_chunk chead[DEFAULT_CMD_QCHUNKS];
+};
+
+/**
+ * CPT VF device structure
+ */
+struct cpt_vf {
+ /** CPT instance */
+ struct cpt_instance instance;
+ /** Register start address */
+ uint8_t *reg_base;
+ /** Command queue information */
+ struct command_queue cqueue;
+ /** Pending queue information */
+ struct pending_queue pqueue;
+ /** Meta information per vf */
+ struct cptvf_meta_info meta_info;
+
+ /** Below fields are accessed only in control path */
+
+ /** Env specific pdev representing the pci dev */
+ void *pdev;
+ /** Calculated queue size */
+ uint32_t qsize;
+ /** Device index (0...CPT_MAX_VQ_NUM)*/
+ uint8_t vfid;
+ /** VF type of cpt_vf_type_t (SE_TYPE(2) or AE_TYPE(1) */
+ uint8_t vftype;
+ /** VF group (0 - 8) */
+ uint8_t vfgrp;
+ /** Operating node: Bits (46:44) in BAR0 address */
+ uint8_t node;
+
+ /** VF-PF mailbox communication */
+
+ /** Flag if acked */
+ bool pf_acked;
+ /** Flag if not acked */
+ bool pf_nacked;
+
+ /** Device name */
+ char dev_name[32];
+} __rte_cache_aligned;
+
+/*
+ * CPT Registers map for 81xx
+ */
+
+/* VF registers */
+#define CPTX_VQX_CTL(a, b) (0x0000100ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_SADDR(a, b) (0x0000200ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_WAIT(a, b) (0x0000400ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_INPROG(a, b) (0x0000410ll + 0x1000000000ll * \
+ ((a) & 0x0) + 0x100000ll * (b))
+#define CPTX_VQX_DONE(a, b) (0x0000420ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ACK(a, b) (0x0000440ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_INT_W1S(a, b) (0x0000460ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_INT_W1C(a, b) (0x0000468ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ENA_W1S(a, b) (0x0000470ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DONE_ENA_W1C(a, b) (0x0000478ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_INT(a, b) (0x0000500ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_INT_W1S(a, b) (0x0000508ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_ENA_W1S(a, b) (0x0000510ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_MISC_ENA_W1C(a, b) (0x0000518ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VQX_DOORBELL(a, b) (0x0000600ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b))
+#define CPTX_VFX_PF_MBOXX(a, b, c) (0x0001000ll + 0x1000000000ll * \
+ ((a) & 0x1) + 0x100000ll * (b) + \
+ 8ll * ((c) & 0x1))
+
+/* VF HAL functions */
+
+void
+otx_cpt_poll_misc(struct cpt_vf *cptvf);
+
+int
+otx_cpt_hw_init(struct cpt_vf *cptvf, void *pdev, void *reg_base, char *name);
+
+int
+otx_cpt_deinit_device(void *dev);
+
+int
+otx_cpt_get_resource(void *dev, uint8_t group, struct cpt_instance **instance);
+
+int
+otx_cpt_put_resource(struct cpt_instance *instance);
+
+int
+otx_cpt_start_device(void *cptvf);
+
+void
+otx_cpt_stop_device(void *cptvf);
+
+/* Write to VQX_DOORBELL register
+ */
+static __rte_always_inline void
+otx_cpt_write_vq_doorbell(struct cpt_vf *cptvf, uint32_t val)
+{
+ cptx_vqx_doorbell_t vqx_dbell;
+
+ vqx_dbell.u = 0;
+ vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u);
+}
+
+static __rte_always_inline uint32_t
+otx_cpt_read_vq_doorbell(struct cpt_vf *cptvf)
+{
+ cptx_vqx_doorbell_t vqx_dbell;
+
+ vqx_dbell.u = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VQX_DOORBELL(0, 0));
+ return vqx_dbell.s.dbell_cnt;
+}
+
+static __rte_always_inline void
+otx_cpt_ring_dbell(struct cpt_instance *instance, uint16_t count)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ /* Memory barrier to flush pending writes */
+ rte_smp_wmb();
+ otx_cpt_write_vq_doorbell(cptvf, count);
+}
+
+static __rte_always_inline void *
+get_cpt_inst(struct command_queue *cqueue)
+{
+ CPT_LOG_DP_DEBUG("CPT queue idx %u\n", cqueue->idx);
+ return &cqueue->qhead[cqueue->idx * CPT_INST_SIZE];
+}
+
+static __rte_always_inline void
+fill_cpt_inst(struct cpt_instance *instance, void *req)
+{
+ struct command_queue *cqueue;
+ cpt_inst_s_t *cpt_ist_p;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct cpt_request_info *user_req = (struct cpt_request_info *)req;
+ cqueue = &cptvf->cqueue;
+ cpt_ist_p = get_cpt_inst(cqueue);
+ rte_prefetch_non_temporal(cpt_ist_p);
+
+ /* EI0, EI1, EI2, EI3 are already prepared */
+ /* HW W0 */
+ cpt_ist_p->u[0] = 0;
+ /* HW W1 */
+ cpt_ist_p->s8x.res_addr = user_req->comp_baddr;
+ /* HW W2 */
+ cpt_ist_p->u[2] = 0;
+ /* HW W3 */
+ cpt_ist_p->s8x.wq_ptr = 0;
+
+ /* MC EI0 */
+ cpt_ist_p->s8x.ei0 = user_req->ist.ei0;
+ /* MC EI1 */
+ cpt_ist_p->s8x.ei1 = user_req->ist.ei1;
+ /* MC EI2 */
+ cpt_ist_p->s8x.ei2 = user_req->ist.ei2;
+ /* MC EI3 */
+ cpt_ist_p->s8x.ei3 = user_req->ist.ei3;
+}
+
+static __rte_always_inline void
+mark_cpt_inst(struct cpt_instance *instance)
+{
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct command_queue *queue = &cptvf->cqueue;
+ if (unlikely(++queue->idx >= DEFAULT_CMD_QCHUNK_SIZE)) {
+ uint32_t cchunk = queue->cchunk;
+ MOD_INC(cchunk, DEFAULT_CMD_QCHUNKS);
+ queue->qhead = queue->chead[cchunk].head;
+ queue->idx = 0;
+ queue->cchunk = cchunk;
+ }
+}
+
+static __rte_always_inline uint8_t
+check_nb_command_id(struct cpt_request_info *user_req,
+ struct cpt_instance *instance)
+{
+ uint8_t ret = ERR_REQ_PENDING;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ volatile cpt_res_s_t *cptres;
+
+ cptres = (volatile cpt_res_s_t *)user_req->completion_addr;
+
+ if (unlikely(cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE)) {
+ /*
+ * Wait for some time for this command to get completed
+ * before timing out
+ */
+ if (rte_get_timer_cycles() < user_req->time_out)
+ return ret;
+ /*
+ * TODO: See if alternate caddr can be used to not loop
+ * longer than needed.
+ */
+ if ((cptres->s8x.compcode == CPT_8X_COMP_E_NOTDONE) &&
+ (user_req->extra_time < TIME_IN_RESET_COUNT)) {
+ user_req->extra_time++;
+ return ret;
+ }
+
+ if (cptres->s8x.compcode != CPT_8X_COMP_E_NOTDONE)
+ goto complete;
+
+ ret = ERR_REQ_TIMEOUT;
+ CPT_LOG_DP_ERR("Request %p timedout", user_req);
+ otx_cpt_poll_misc(cptvf);
+ goto exit;
+ }
+
+complete:
+ if (likely(cptres->s8x.compcode == CPT_8X_COMP_E_GOOD)) {
+ ret = 0; /* success */
+ if (unlikely((uint8_t)*user_req->alternate_caddr)) {
+ ret = (uint8_t)*user_req->alternate_caddr;
+ CPT_LOG_DP_ERR("Request %p : failed with microcode"
+ " error, MC completion code : 0x%x", user_req,
+ ret);
+ }
+ CPT_LOG_DP_DEBUG("MC status %.8x\n",
+ *((volatile uint32_t *)user_req->alternate_caddr));
+ CPT_LOG_DP_DEBUG("HW status %.8x\n",
+ *((volatile uint32_t *)user_req->completion_addr));
+ } else if ((cptres->s8x.compcode == CPT_8X_COMP_E_SWERR) ||
+ (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT)) {
+ ret = (uint8_t)*user_req->alternate_caddr;
+ if (!ret)
+ ret = ERR_BAD_ALT_CCODE;
+ CPT_LOG_DP_DEBUG("Request %p : failed with %s : err code :%x",
+ user_req,
+ (cptres->s8x.compcode == CPT_8X_COMP_E_FAULT) ?
+ "DMA Fault" : "Software error", ret);
+ } else {
+ CPT_LOG_DP_ERR("Request %p : unexpected completion code %d",
+ user_req, cptres->s8x.compcode);
+ ret = (uint8_t)*user_req->alternate_caddr;
+ }
+
+exit:
+ return ret;
+}
+
+#endif /* _OTX_CRYPTODEV_HW_ACCESS_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_mbox.c b/drivers/crypto/octeontx/otx_cryptodev_mbox.c
new file mode 100644
index 00000000..a8e51a8e
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_mbox.c
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <unistd.h>
+
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_mbox.h"
+
+void
+otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ /*
+ * MBOX[0] contains msg
+ * MBOX[1] contains data
+ */
+ mbx.msg = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 0));
+ mbx.data = CPT_READ_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 1));
+
+ CPT_LOG_DP_DEBUG("%s: Mailbox msg 0x%lx from PF",
+ cptvf->dev_name, (unsigned int long)mbx.msg);
+ switch (mbx.msg) {
+ case OTX_CPT_MSG_READY:
+ {
+ otx_cpt_chipid_vfid_t cid;
+
+ cid.u64 = mbx.data;
+ cptvf->pf_acked = true;
+ cptvf->vfid = cid.s.vfid;
+ CPT_LOG_DP_DEBUG("%s: Received VFID %d chip_id %d",
+ cptvf->dev_name,
+ cptvf->vfid, cid.s.chip_id);
+ }
+ break;
+ case OTX_CPT_MSG_QBIND_GRP:
+ cptvf->pf_acked = true;
+ cptvf->vftype = mbx.data;
+ CPT_LOG_DP_DEBUG("%s: VF %d type %s group %d",
+ cptvf->dev_name, cptvf->vfid,
+ ((mbx.data == SE_TYPE) ? "SE" : "AE"),
+ cptvf->vfgrp);
+ break;
+ case OTX_CPT_MBOX_MSG_TYPE_ACK:
+ cptvf->pf_acked = true;
+ break;
+ case OTX_CPT_MBOX_MSG_TYPE_NACK:
+ cptvf->pf_nacked = true;
+ break;
+ default:
+ CPT_LOG_DP_DEBUG("%s: Invalid msg from PF, msg 0x%lx",
+ cptvf->dev_name, (unsigned int long)mbx.msg);
+ break;
+ }
+}
+
+/* Send a mailbox message to PF
+ * @vf: vf from which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void
+otx_cpt_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+ /* Writing mbox(1) causes interrupt */
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 0), mbx->msg);
+ CPT_WRITE_CSR(CPT_CSR_REG_BASE(cptvf),
+ CPTX_VFX_PF_MBOXX(0, 0, 1), mbx->data);
+}
+
+static int32_t
+otx_cpt_send_msg_to_pf_timeout(struct cpt_vf *cptvf, struct cpt_mbox *mbx)
+{
+ int timeout = OTX_CPT_MBOX_MSG_TIMEOUT;
+ int sleep_ms = 10;
+
+ cptvf->pf_acked = false;
+ cptvf->pf_nacked = false;
+
+ otx_cpt_send_msg_to_pf(cptvf, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!cptvf->pf_acked) {
+ if (cptvf->pf_nacked)
+ return -EINVAL;
+ usleep(sleep_ms * 1000);
+ otx_cpt_poll_misc(cptvf);
+ if (cptvf->pf_acked)
+ break;
+ timeout -= sleep_ms;
+ if (!timeout) {
+ CPT_LOG_ERR("%s: PF didn't ack mbox msg %lx(vfid %u)",
+ cptvf->dev_name,
+ (unsigned int long)(mbx->msg & 0xFF),
+ cptvf->vfid);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+int
+otx_cpt_check_pf_ready(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_READY;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to READY msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_QLEN;
+
+ mbx.data = cptvf->qsize;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to vq_size msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_QBIND_GRP;
+
+ /* Convey group of the VF */
+ mbx.data = group;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to vf_type msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_up(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_VF_UP;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to UP msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
+
+int
+otx_cpt_send_vf_down(struct cpt_vf *cptvf)
+{
+ struct cpt_mbox mbx = {0, 0};
+
+ mbx.msg = OTX_CPT_MSG_VF_DOWN;
+ if (otx_cpt_send_msg_to_pf_timeout(cptvf, &mbx)) {
+ CPT_LOG_ERR("%s: PF didn't respond to DOWN msg",
+ cptvf->dev_name);
+ return 1;
+ }
+ return 0;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_mbox.h b/drivers/crypto/octeontx/otx_cryptodev_mbox.h
new file mode 100644
index 00000000..b05d1c50
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_mbox.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_MBOX_H_
+#define _OTX_CRYPTODEV_MBOX_H_
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include "cpt_common.h"
+#include "cpt_pmd_logs.h"
+
+#include "otx_cryptodev_hw_access.h"
+
+#define OTX_CPT_MBOX_MSG_TIMEOUT 2000 /* In Milli Seconds */
+
+#define OTX_CPT_MBOX_MSG_TYPE_REQ 0
+#define OTX_CPT_MBOX_MSG_TYPE_ACK 1
+#define OTX_CPT_MBOX_MSG_TYPE_NACK 2
+#define OTX_CPT_MBOX_MSG_TYPE_NOP 3
+
+/* CPT mailbox structure */
+struct cpt_mbox {
+ /** Message type MBOX[0] */
+ uint64_t msg;
+ /** Data MBOX[1] */
+ uint64_t data;
+};
+
+typedef enum {
+ OTX_CPT_MSG_VF_UP = 1,
+ OTX_CPT_MSG_VF_DOWN,
+ OTX_CPT_MSG_READY,
+ OTX_CPT_MSG_QLEN,
+ OTX_CPT_MSG_QBIND_GRP,
+ OTX_CPT_MSG_VQ_PRIORITY,
+ OTX_CPT_MSG_PF_TYPE,
+} otx_cpt_mbox_opcode_t;
+
+typedef union {
+ uint64_t u64;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint32_t chip_id;
+ uint8_t vfid;
+ uint8_t reserved[3];
+#else
+ uint8_t reserved[3];
+ uint8_t vfid;
+ uint32_t chip_id;
+#endif
+ } s;
+} otx_cpt_chipid_vfid_t;
+
+/* Poll handler to handle mailbox messages from VFs */
+void
+otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);
+
+/*
+ * Checks if VF is able to comminicate with PF
+ * and also gets the CPT number this VF is associated to.
+ */
+int
+otx_cpt_check_pf_ready(struct cpt_vf *cptvf);
+
+/*
+ * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF.
+ * Must be ACKed.
+ */
+int
+otx_cpt_send_vq_size_msg(struct cpt_vf *cptvf);
+
+/*
+ * Communicate VF group required to PF and get the VQ binded to that group
+ */
+int
+otx_cpt_send_vf_grp_msg(struct cpt_vf *cptvf, uint32_t group);
+
+/*
+ * Communicate to PF that VF is UP and running
+ */
+int
+otx_cpt_send_vf_up(struct cpt_vf *cptvf);
+
+/*
+ * Communicate to PF that VF is DOWN and running
+ */
+int
+otx_cpt_send_vf_down(struct cpt_vf *cptvf);
+
+#endif /* _OTX_CRYPTODEV_MBOX_H_ */
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.c b/drivers/crypto/octeontx/otx_cryptodev_ops.c
new file mode 100644
index 00000000..23f96591
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.c
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#include <rte_alarm.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "cpt_pmd_logs.h"
+#include "cpt_pmd_ops_helper.h"
+#include "cpt_ucode.h"
+#include "cpt_request_mgr.h"
+
+#include "otx_cryptodev.h"
+#include "otx_cryptodev_capabilities.h"
+#include "otx_cryptodev_hw_access.h"
+#include "otx_cryptodev_ops.h"
+
+static int otx_cryptodev_probe_count;
+static rte_spinlock_t otx_probe_count_lock = RTE_SPINLOCK_INITIALIZER;
+
+static struct rte_mempool *otx_cpt_meta_pool;
+static int otx_cpt_op_mlen;
+static int otx_cpt_op_sb_mlen;
+
+/* Forward declarations */
+
+static int
+otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
+
+/*
+ * Initializes global variables used by fast-path code
+ *
+ * @return
+ * - 0 on success, errcode on error
+ */
+static int
+init_global_resources(void)
+{
+ /* Get meta len for scatter gather mode */
+ otx_cpt_op_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
+
+ /* Extra 4B saved for future considerations */
+ otx_cpt_op_mlen += 4 * sizeof(uint64_t);
+
+ otx_cpt_meta_pool = rte_mempool_create("cpt_metabuf-pool", 4096 * 16,
+ otx_cpt_op_mlen, 512, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!otx_cpt_meta_pool) {
+ CPT_LOG_ERR("cpt metabuf pool not created");
+ return -ENOMEM;
+ }
+
+ /* Get meta len for direct mode */
+ otx_cpt_op_sb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
+
+ /* Extra 4B saved for future considerations */
+ otx_cpt_op_sb_mlen += 4 * sizeof(uint64_t);
+
+ return 0;
+}
+
+void
+cleanup_global_resources(void)
+{
+ /* Take lock */
+ rte_spinlock_lock(&otx_probe_count_lock);
+
+ /* Decrement the cryptodev count */
+ otx_cryptodev_probe_count--;
+
+ /* Free buffers */
+ if (otx_cpt_meta_pool && otx_cryptodev_probe_count == 0)
+ rte_mempool_free(otx_cpt_meta_pool);
+
+ /* Free lock */
+ rte_spinlock_unlock(&otx_probe_count_lock);
+}
+
+/* Alarm routines */
+
+static void
+otx_cpt_alarm_cb(void *arg)
+{
+ struct cpt_vf *cptvf = arg;
+ otx_cpt_poll_misc(cptvf);
+ rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
+ otx_cpt_alarm_cb, cptvf);
+}
+
+static int
+otx_cpt_periodic_alarm_start(void *arg)
+{
+ return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
+ otx_cpt_alarm_cb, arg);
+}
+
+static int
+otx_cpt_periodic_alarm_stop(void *arg)
+{
+ return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
+}
+
+/* PMD ops */
+
+static int
+otx_cpt_dev_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static int
+otx_cpt_dev_start(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ return otx_cpt_start_device(cptvf);
+}
+
+static void
+otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ otx_cpt_stop_device(cptvf);
+}
+
+static int
+otx_cpt_dev_close(struct rte_cryptodev *c_dev)
+{
+ void *cptvf = c_dev->data->dev_private;
+ int i, ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
+ ret = otx_cpt_que_pair_release(c_dev, i);
+ if (ret)
+ return ret;
+ }
+
+ otx_cpt_periodic_alarm_stop(cptvf);
+ otx_cpt_deinit_device(cptvf);
+
+ return 0;
+}
+
+static void
+otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = otx_get_capabilities();
+ info->sym.max_nb_sessions = 0;
+ info->driver_id = otx_cryptodev_driver_id;
+ info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
+ info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
+ }
+}
+
+static void
+otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_stats *stats __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+}
+
+static void
+otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
+{
+ CPT_PMD_INIT_FUNC_TRACE();
+}
+
+static int
+otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
+ uint16_t que_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id __rte_unused,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ void *cptvf = dev->data->dev_private;
+ struct cpt_instance *instance = NULL;
+ struct rte_pci_device *pci_dev;
+ int ret = -1;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (dev->data->queue_pairs[que_pair_id] != NULL) {
+ ret = otx_cpt_que_pair_release(dev, que_pair_id);
+ if (ret)
+ return ret;
+ }
+
+ if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
+ CPT_LOG_INFO("Number of descriptors too big %d, using default "
+ "queue length of %d", qp_conf->nb_descriptors,
+ DEFAULT_CMD_QLEN);
+ }
+
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ CPT_LOG_ERR("PCI mem address null");
+ return -EIO;
+ }
+
+ ret = otx_cpt_get_resource(cptvf, 0, &instance);
+ if (ret != 0) {
+ CPT_LOG_ERR("Error getting instance handle from device %s : "
+ "ret = %d", dev->data->name, ret);
+ return ret;
+ }
+
+ instance->queue_id = que_pair_id;
+ dev->data->queue_pairs[que_pair_id] = instance;
+
+ return 0;
+}
+
+static int
+otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
+{
+ struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
+ int ret;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ ret = otx_cpt_put_resource(instance);
+ if (ret != 0) {
+ CPT_LOG_ERR("Error putting instance handle of device %s : "
+ "ret = %d", dev->data->name, ret);
+ return ret;
+ }
+
+ dev->data->queue_pairs[que_pair_id] = NULL;
+
+ return 0;
+}
+
+static unsigned int
+otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return cpt_get_session_size();
+}
+
+static void
+otx_cpt_session_init(void *sym_sess, uint8_t driver_id)
+{
+ struct rte_cryptodev_sym_session *sess = sym_sess;
+ struct cpt_sess_misc *cpt_sess =
+ (struct cpt_sess_misc *) get_sym_session_private_data(sess, driver_id);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+ cpt_sess->ctx_dma_addr = rte_mempool_virt2iova(cpt_sess) +
+ sizeof(struct cpt_sess_misc);
+}
+
+static int
+otx_cpt_session_cfg(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ struct rte_crypto_sym_xform *chain;
+ void *sess_private_data = NULL;
+
+ CPT_PMD_INIT_FUNC_TRACE();
+
+ if (cpt_is_algo_supported(xform))
+ goto err;
+
+ if (unlikely(sess == NULL)) {
+ CPT_LOG_ERR("invalid session struct");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CPT_LOG_ERR("Could not allocate sess_private_data");
+ return -ENOMEM;
+ }
+
+ chain = xform;
+ while (chain) {
+ switch (chain->type) {
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (fill_sess_aead(chain, sess_private_data))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (fill_sess_cipher(chain, sess_private_data))
+ goto err;
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (chain->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (fill_sess_gmac(chain, sess_private_data))
+ goto err;
+ } else {
+ if (fill_sess_auth(chain, sess_private_data))
+ goto err;
+ }
+ break;
+ default:
+ CPT_LOG_ERR("Invalid crypto xform type");
+ break;
+ }
+ chain = chain->next;
+ }
+ set_sym_session_private_data(sess, dev->driver_id, sess_private_data);
+ otx_cpt_session_init(sess, dev->driver_id);
+ return 0;
+
+err:
+ if (sess_private_data)
+ rte_mempool_put(mempool, sess_private_data);
+ return -EPERM;
+}
+
+static void
+otx_cpt_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ void *sess_priv = get_sym_session_private_data(sess, dev->driver_id);
+
+ CPT_PMD_INIT_FUNC_TRACE();
+ if (sess_priv) {
+ memset(sess_priv, 0, otx_cpt_get_session_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_sym_session_private_data(sess, dev->driver_id, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static uint16_t
+otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ uint16_t count = 0;
+ int ret;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+
+ count = DEFAULT_CMD_QLEN - pqueue->pending_count;
+ if (nb_ops > count)
+ nb_ops = count;
+
+ count = 0;
+ while (likely(count < nb_ops)) {
+ ret = cpt_pmd_crypto_operation(instance, ops[count], pqueue,
+ otx_cryptodev_driver_id);
+ if (unlikely(ret))
+ break;
+ count++;
+ }
+ otx_cpt_ring_dbell(instance, count);
+ return count;
+}
+
+static uint16_t
+otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
+{
+ struct cpt_instance *instance = (struct cpt_instance *)qptr;
+ struct cpt_vf *cptvf = (struct cpt_vf *)instance;
+ struct pending_queue *pqueue = &cptvf->pqueue;
+ uint16_t nb_completed, i = 0;
+ uint8_t compcode[nb_ops];
+
+ nb_completed = cpt_dequeue_burst(instance, nb_ops,
+ (void **)ops, compcode, pqueue);
+ while (likely(i < nb_completed)) {
+ struct rte_crypto_op *cop;
+ void *metabuf;
+ uintptr_t *rsp;
+ uint8_t status;
+
+ rsp = (void *)ops[i];
+ status = compcode[i];
+ if (likely((i + 1) < nb_completed))
+ rte_prefetch0(ops[i+1]);
+ metabuf = (void *)rsp[0];
+ cop = (void *)rsp[1];
+
+ ops[i] = cop;
+
+ if (likely(status == 0)) {
+ if (likely(!rsp[2]))
+ cop->status =
+ RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ compl_auth_verify(cop, (uint8_t *)rsp[2],
+ rsp[3]);
+ } else if (status == ERR_GC_ICV_MISCOMPARE) {
+ /*auth data mismatch */
+ cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ free_op_meta(metabuf, cptvf->meta_info.cptvf_meta_pool);
+ i++;
+ }
+ return nb_completed;
+}
+
+static struct rte_cryptodev_ops cptvf_ops = {
+ /* Device related operations */
+ .dev_configure = otx_cpt_dev_config,
+ .dev_start = otx_cpt_dev_start,
+ .dev_stop = otx_cpt_dev_stop,
+ .dev_close = otx_cpt_dev_close,
+ .dev_infos_get = otx_cpt_dev_info_get,
+
+ .stats_get = otx_cpt_stats_get,
+ .stats_reset = otx_cpt_stats_reset,
+ .queue_pair_setup = otx_cpt_que_pair_setup,
+ .queue_pair_release = otx_cpt_que_pair_release,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .sym_session_get_size = otx_cpt_get_session_size,
+ .sym_session_configure = otx_cpt_session_cfg,
+ .sym_session_clear = otx_cpt_session_clear
+};
+
+static void
+otx_cpt_common_vars_init(struct cpt_vf *cptvf)
+{
+ cptvf->meta_info.cptvf_meta_pool = otx_cpt_meta_pool;
+ cptvf->meta_info.cptvf_op_mlen = otx_cpt_op_mlen;
+ cptvf->meta_info.cptvf_op_sb_mlen = otx_cpt_op_sb_mlen;
+}
+
+int
+otx_cpt_dev_create(struct rte_cryptodev *c_dev)
+{
+ struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
+ struct cpt_vf *cptvf = NULL;
+ void *reg_base;
+ char dev_name[32];
+ int ret;
+
+ if (pdev->mem_resource[0].phys_addr == 0ULL)
+ return -EIO;
+
+ /* for secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
+ sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cptvf == NULL) {
+ CPT_LOG_ERR("Cannot allocate memory for device private data");
+ return -ENOMEM;
+ }
+
+ snprintf(dev_name, 32, "%02x:%02x.%x",
+ pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
+
+ reg_base = pdev->mem_resource[0].addr;
+ if (!reg_base) {
+ CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
+ if (ret) {
+ CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
+ ret = -EIO;
+ goto fail;
+ }
+
+ /* Start off timer for mailbox interrupts */
+ otx_cpt_periodic_alarm_start(cptvf);
+
+ rte_spinlock_lock(&otx_probe_count_lock);
+ if (!otx_cryptodev_probe_count) {
+ ret = init_global_resources();
+ if (ret) {
+ rte_spinlock_unlock(&otx_probe_count_lock);
+ goto init_fail;
+ }
+ }
+ otx_cryptodev_probe_count++;
+ rte_spinlock_unlock(&otx_probe_count_lock);
+
+ /* Initialize data path variables used by common code */
+ otx_cpt_common_vars_init(cptvf);
+
+ c_dev->dev_ops = &cptvf_ops;
+
+ c_dev->enqueue_burst = otx_cpt_pkt_enqueue;
+ c_dev->dequeue_burst = otx_cpt_pkt_dequeue;
+
+ c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_IN_PLACE_SGL |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
+ RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT;
+
+ /* Save dev private data */
+ c_dev->data->dev_private = cptvf;
+
+ return 0;
+
+init_fail:
+ otx_cpt_periodic_alarm_stop(cptvf);
+ otx_cpt_deinit_device(cptvf);
+
+fail:
+ if (cptvf) {
+ /* Free private data allocated */
+ rte_free(cptvf);
+ }
+
+ return ret;
+}
diff --git a/drivers/crypto/octeontx/otx_cryptodev_ops.h b/drivers/crypto/octeontx/otx_cryptodev_ops.h
new file mode 100644
index 00000000..b3efecf0
--- /dev/null
+++ b/drivers/crypto/octeontx/otx_cryptodev_ops.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Cavium, Inc
+ */
+
+#ifndef _OTX_CRYPTODEV_OPS_H_
+#define _OTX_CRYPTODEV_OPS_H_
+
+#define OTX_CPT_MIN_HEADROOM_REQ (24)
+#define OTX_CPT_MIN_TAILROOM_REQ (8)
+#define CPT_NUM_QS_PER_VF (1)
+
+void
+cleanup_global_resources(void);
+
+int
+otx_cpt_dev_create(struct rte_cryptodev *c_dev);
+
+#endif /* _OTX_CRYPTODEV_OPS_H_ */
diff --git a/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map b/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map
new file mode 100644
index 00000000..521e51f4
--- /dev/null
+++ b/drivers/crypto/octeontx/rte_pmd_octeontx_crypto_version.map
@@ -0,0 +1,4 @@
+DPDK_18.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h
index 45f9a33d..eecb7d36 100644
--- a/drivers/crypto/openssl/compat.h
+++ b/drivers/crypto/openssl/compat.h
@@ -7,101 +7,190 @@
#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
-#define set_rsa_params(rsa, p, q, ret) \
- do {rsa->p = p; rsa->q = q; ret = 0; } while (0)
-
-#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
- do { \
- rsa->dmp1 = dmp1; \
- rsa->dmq1 = dmq1; \
- rsa->iqmp = iqmp; \
- ret = 0; \
- } while (0)
-
-#define set_rsa_keys(rsa, n, e, d, ret) \
- do { \
- rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \
- } while (0)
-
-#define set_dh_params(dh, p, g, ret) \
- do { \
- dh->p = p; \
- dh->q = NULL; \
- dh->g = g; \
- ret = 0; \
- } while (0)
-
-#define set_dh_priv_key(dh, priv_key, ret) \
- do { dh->priv_key = priv_key; ret = 0; } while (0)
-
-#define set_dsa_params(dsa, p, q, g, ret) \
- do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0)
-
-#define get_dh_pub_key(dh, pub_key) \
- (pub_key = dh->pub_key)
-
-#define get_dh_priv_key(dh, priv_key) \
- (priv_key = dh->priv_key)
-
-#define set_dsa_sign(sign, r, s) \
- do { sign->r = r; sign->s = s; } while (0)
-
-#define get_dsa_sign(sign, r, s) \
- do { r = sign->r; s = sign->s; } while (0)
-
-#define set_dsa_keys(dsa, pub, priv, ret) \
- do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0)
-
-#define set_dsa_pub_key(dsa, pub_key) \
- (dsa->pub_key = pub_key)
-
-#define get_dsa_priv_key(dsa, priv_key) \
- (priv_key = dsa->priv_key)
+static __rte_always_inline int
+set_rsa_params(RSA *rsa, BIGNUM *p, BIGNUM *q)
+{
+ rsa->p = p;
+ rsa->q = q;
+ return 0;
+}
+
+static __rte_always_inline int
+set_rsa_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp)
+{
+ rsa->dmp1 = dmp1;
+ rsa->dmq1 = dmq1;
+ rsa->iqmp = iqmp;
+ return 0;
+}
+
+static __rte_always_inline int
+set_rsa_keys(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d)
+{
+ rsa->n = n;
+ rsa->e = e;
+ rsa->d = d;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dh_params(DH *dh, BIGNUM *p, BIGNUM *g)
+{
+ dh->p = p;
+ dh->q = NULL;
+ dh->g = g;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dh_priv_key(DH *dh, BIGNUM *priv_key)
+{
+ dh->priv_key = priv_key;
+ return 0;
+}
+
+static __rte_always_inline int
+set_dsa_params(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+{
+ dsa->p = p;
+ dsa->q = q;
+ dsa->g = g;
+ return 0;
+}
+
+static __rte_always_inline void
+get_dh_pub_key(DH *dh, const BIGNUM **pub_key)
+{
+ *pub_key = dh->pub_key;
+}
+
+static __rte_always_inline void
+get_dh_priv_key(DH *dh, const BIGNUM **priv_key)
+{
+ *priv_key = dh->priv_key;
+}
+
+static __rte_always_inline void
+set_dsa_sign(DSA_SIG *sign, BIGNUM *r, BIGNUM *s)
+{
+ sign->r = r;
+ sign->s = s;
+}
+
+static __rte_always_inline void
+get_dsa_sign(DSA_SIG *sign, const BIGNUM **r, const BIGNUM **s)
+{
+ *r = sign->r;
+ *s = sign->s;
+}
+
+static __rte_always_inline int
+set_dsa_keys(DSA *dsa, BIGNUM *pub, BIGNUM *priv)
+{
+ dsa->pub_key = pub;
+ dsa->priv_key = priv;
+ return 0;
+}
+
+static __rte_always_inline void
+set_dsa_pub_key(DSA *dsa, BIGNUM *pub)
+{
+ dsa->pub_key = pub;
+}
+
+static __rte_always_inline void
+get_dsa_priv_key(DSA *dsa, BIGNUM **priv_key)
+{
+ *priv_key = dsa->priv_key;
+}
#else
-#define set_rsa_params(rsa, p, q, ret) \
- (ret = !RSA_set0_factors(rsa, p, q))
+static __rte_always_inline int
+set_rsa_params(RSA *rsa, BIGNUM *p, BIGNUM *q)
+{
+ return !(RSA_set0_factors(rsa, p, q));
+}
-#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \
- (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp))
+static __rte_always_inline int
+set_rsa_crt_params(RSA *rsa, BIGNUM *dmp1, BIGNUM *dmq1, BIGNUM *iqmp)
+{
+ return !(RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp));
+}
/* n, e must be non-null, d can be NULL */
-#define set_rsa_keys(rsa, n, e, d, ret) \
- (ret = !RSA_set0_key(rsa, n, e, d))
-
-#define set_dh_params(dh, p, g, ret) \
- (ret = !DH_set0_pqg(dh, p, NULL, g))
-
-#define set_dh_priv_key(dh, priv_key, ret) \
- (ret = !DH_set0_key(dh, NULL, priv_key))
-
-#define get_dh_pub_key(dh, pub_key) \
- (DH_get0_key(dh_key, &pub_key, NULL))
-
-#define get_dh_priv_key(dh, priv_key) \
- (DH_get0_key(dh_key, NULL, &priv_key))
-
-#define set_dsa_params(dsa, p, q, g, ret) \
- (ret = !DSA_set0_pqg(dsa, p, q, g))
-
-#define set_dsa_priv_key(dsa, priv_key) \
- (DSA_set0_key(dsa, NULL, priv_key))
-
-#define set_dsa_sign(sign, r, s) \
- (DSA_SIG_set0(sign, r, s))
-
-#define get_dsa_sign(sign, r, s) \
- (DSA_SIG_get0(sign, &r, &s))
-
-#define set_dsa_keys(dsa, pub, priv, ret) \
- (ret = !DSA_set0_key(dsa, pub, priv))
-
-#define set_dsa_pub_key(dsa, pub_key) \
- (DSA_set0_key(dsa, pub_key, NULL))
-#define get_dsa_priv_key(dsa, priv_key) \
- (DSA_get0_key(dsa, NULL, &priv_key))
+static __rte_always_inline int
+set_rsa_keys(RSA *rsa, BIGNUM *n, BIGNUM *e, BIGNUM *d)
+{
+ return !(RSA_set0_key(rsa, n, e, d));
+}
+
+static __rte_always_inline int
+set_dh_params(DH *dh, BIGNUM *p, BIGNUM *g)
+{
+ return !(DH_set0_pqg(dh, p, NULL, g));
+}
+
+static __rte_always_inline int
+set_dh_priv_key(DH *dh, BIGNUM *priv_key)
+{
+ return !(DH_set0_key(dh, NULL, priv_key));
+}
+
+static __rte_always_inline void
+get_dh_pub_key(DH *dh_key, const BIGNUM **pub_key)
+{
+ DH_get0_key(dh_key, pub_key, NULL);
+}
+
+static __rte_always_inline void
+get_dh_priv_key(DH *dh_key, const BIGNUM **priv_key)
+{
+ DH_get0_key(dh_key, NULL, priv_key);
+}
+
+static __rte_always_inline int
+set_dsa_params(DSA *dsa, BIGNUM *p, BIGNUM *q, BIGNUM *g)
+{
+ return !(DSA_set0_pqg(dsa, p, q, g));
+}
+
+static __rte_always_inline void
+set_dsa_priv_key(DSA *dsa, BIGNUM *priv_key)
+{
+ DSA_set0_key(dsa, NULL, priv_key);
+}
+
+static __rte_always_inline void
+set_dsa_sign(DSA_SIG *sign, BIGNUM *r, BIGNUM *s)
+{
+ DSA_SIG_set0(sign, r, s);
+}
+
+static __rte_always_inline void
+get_dsa_sign(DSA_SIG *sign, const BIGNUM **r, const BIGNUM **s)
+{
+ DSA_SIG_get0(sign, r, s);
+}
+
+static __rte_always_inline int
+set_dsa_keys(DSA *dsa, BIGNUM *pub, BIGNUM *priv)
+{
+ return !(DSA_set0_key(dsa, pub, priv));
+}
+
+static __rte_always_inline void
+set_dsa_pub_key(DSA *dsa, BIGNUM *pub_key)
+{
+ DSA_set0_key(dsa, pub_key, NULL);
+}
+
+static __rte_always_inline void
+get_dsa_priv_key(DSA *dsa, const BIGNUM **priv_key)
+{
+ DSA_get0_key(dsa, NULL, priv_key);
+}
#endif /* version < 10100000 */
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 7d263aba..003116dc 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1509,15 +1509,7 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
srclen = op->sym->auth.data.length;
- if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst = qp->temp_digest;
- else {
- dst = op->sym->auth.digest.data;
- if (dst == NULL)
- dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
- op->sym->auth.data.offset +
- op->sym->auth.data.length);
- }
+ dst = qp->temp_digest;
switch (sess->auth.mode) {
case OPENSSL_AUTH_AS_AUTH:
@@ -1540,6 +1532,15 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
+ } else {
+ uint8_t *auth_dst;
+
+ auth_dst = op->sym->auth.digest.data;
+ if (auth_dst == NULL)
+ auth_dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ op->sym->auth.data.length);
+ memcpy(auth_dst, dst, sess->auth.digest_length);
}
if (status != 0)
@@ -1564,7 +1565,7 @@ process_openssl_dsa_sign_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
const BIGNUM *r = NULL, *s = NULL;
- get_dsa_sign(sign, r, s);
+ get_dsa_sign(sign, &r, &s);
op->r.length = BN_bn2bin(r, op->r.data);
op->s.length = BN_bn2bin(s, op->s.data);
@@ -1666,7 +1667,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
- set_dh_priv_key(dh_key, priv_key, ret);
+ ret = set_dh_priv_key(dh_key, priv_key);
if (ret) {
OPENSSL_LOG(ERR, "Failed to set private key\n");
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -1715,7 +1716,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
return -1;
}
- set_dh_priv_key(dh_key, priv_key, ret);
+ ret = set_dh_priv_key(dh_key, priv_key);
if (ret) {
OPENSSL_LOG(ERR, "Failed to set private key\n");
cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -1743,7 +1744,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
__func__, __LINE__);
/* get the generated keys */
- get_dh_pub_key(dh_key, pub_key);
+ get_dh_pub_key(dh_key, &pub_key);
/* output public key */
op->pub_key.length = BN_bn2bin(pub_key,
@@ -1758,7 +1759,7 @@ process_openssl_dh_op(struct rte_crypto_op *cop,
__func__, __LINE__);
/* get the generated keys */
- get_dh_priv_key(dh_key, priv_key);
+ get_dh_priv_key(dh_key, &priv_key);
/* provide generated private key back to user */
op->priv_key.length = BN_bn2bin(priv_key,
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index de228439..c2b029ec 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -26,9 +26,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -68,9 +68,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -110,9 +110,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -131,9 +131,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 0
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -152,9 +152,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -194,9 +194,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -236,9 +236,9 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -875,14 +875,14 @@ static int openssl_set_asym_session_parameters(
RSA_free(rsa);
goto err_rsa;
}
- set_rsa_params(rsa, p, q, ret);
+ ret = set_rsa_params(rsa, p, q);
if (ret) {
OPENSSL_LOG(ERR,
"failed to set rsa params\n");
RSA_free(rsa);
goto err_rsa;
}
- set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret);
+ ret = set_rsa_crt_params(rsa, dmp1, dmq1, iqmp);
if (ret) {
OPENSSL_LOG(ERR,
"failed to set crt params\n");
@@ -896,7 +896,7 @@ static int openssl_set_asym_session_parameters(
}
}
- set_rsa_keys(rsa, n, e, d, ret);
+ ret = set_rsa_keys(rsa, n, e, d);
if (ret) {
OPENSSL_LOG(ERR, "Failed to load rsa keys\n");
RSA_free(rsa);
@@ -1005,7 +1005,7 @@ err_rsa:
"failed to allocate resources\n");
goto err_dh;
}
- set_dh_params(dh, p, g, ret);
+ ret = set_dh_params(dh, p, g);
if (ret) {
DH_free(dh);
goto err_dh;
@@ -1087,7 +1087,7 @@ err_dh:
goto err_dsa;
}
- set_dsa_params(dsa, p, q, g, ret);
+ ret = set_dsa_params(dsa, p, q, g);
if (ret) {
DSA_free(dsa);
OPENSSL_LOG(ERR, "Failed to dsa params\n");
@@ -1101,7 +1101,7 @@ err_dh:
* both versions
*/
/* just set dummy public for very 1st call */
- set_dsa_keys(dsa, pub_key, priv_key, ret);
+ ret = set_dsa_keys(dsa, pub_key, priv_key);
if (ret) {
DSA_free(dsa);
OPENSSL_LOG(ERR, "Failed to set keys\n");
diff --git a/drivers/crypto/qat/qat_sym_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h
index eea08bc7..7cba87d6 100644
--- a/drivers/crypto/qat/qat_sym_capabilities.h
+++ b/drivers/crypto/qat/qat_sym_capabilities.h
@@ -154,6 +154,26 @@
}, } \
}, } \
}, \
+ { /* AES CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ } \
+ }, } \
+ }, } \
+ }, \
{ /* AES CCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c
index 96f442e8..c3f70040 100644
--- a/drivers/crypto/qat/qat_sym_pmd.c
+++ b/drivers/crypto/qat/qat_sym_pmd.c
@@ -290,6 +290,7 @@ qat_sym_dev_create(struct qat_pci_device *qat_pci_dev)
internals->qat_dev_capabilities = qat_gen1_sym_capabilities;
break;
case QAT_GEN2:
+ case QAT_GEN3:
internals->qat_dev_capabilities = qat_gen2_sym_capabilities;
break;
default:
diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h
index d3432854..5563d5be 100644
--- a/drivers/crypto/qat/qat_sym_pmd.h
+++ b/drivers/crypto/qat/qat_sym_pmd.h
@@ -12,7 +12,7 @@
#include "qat_sym_capabilities.h"
#include "qat_device.h"
-/**< Intel(R) QAT Symmetric Crypto PMD device name */
+/** Intel(R) QAT Symmetric Crypto PMD driver name */
#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
extern uint8_t cryptodev_qat_driver_id;
diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c
index 1d58220a..8196e233 100644
--- a/drivers/crypto/qat/qat_sym_session.c
+++ b/drivers/crypto/qat/qat_sym_session.c
@@ -498,6 +498,7 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
struct qat_sym_dev_private *internals = dev->data->dev_private;
uint8_t *key_data = auth_xform->key.data;
uint8_t key_length = auth_xform->key.length;
+ session->aes_cmac = 0;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -518,6 +519,10 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
+ session->aes_cmac = 1;
+ break;
case RTE_CRYPTO_AUTH_AES_GMAC:
if (qat_sym_validate_aes_key(auth_xform->key.length,
&session->qat_cipher_alg) != 0) {
@@ -555,7 +560,6 @@ qat_sym_session_configure_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
QAT_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
@@ -817,6 +821,8 @@ static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return ICP_QAT_HW_SHA512_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_MD5:
return ICP_QAT_HW_MD5_STATE1_SZ;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum digest size in this case */
return ICP_QAT_HW_SHA512_STATE1_SZ;
@@ -843,6 +849,8 @@ static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg)
return SHA512_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_GALOIS_128:
return 16;
+ case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
+ return ICP_QAT_HW_AES_BLK_SZ;
case ICP_QAT_HW_AUTH_ALGO_MD5:
return MD5_CBLOCK;
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
@@ -991,11 +999,28 @@ static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg,
#define HMAC_OPAD_VALUE 0x5c
#define HASH_XCBC_PRECOMP_KEY_NUM 3
+static const uint8_t AES_CMAC_SEED[ICP_QAT_HW_AES_128_KEY_SZ];
+
+static void aes_cmac_key_derive(uint8_t *base, uint8_t *derived)
+{
+ int i;
+
+ derived[0] = base[0] << 1;
+ for (i = 1; i < ICP_QAT_HW_AES_BLK_SZ ; i++) {
+ derived[i] = base[i] << 1;
+ derived[i - 1] |= base[i] >> 7;
+ }
+
+ if (base[0] & 0x80)
+ derived[ICP_QAT_HW_AES_BLK_SZ - 1] ^= QAT_AES_CMAC_CONST_RB;
+}
+
static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
const uint8_t *auth_key,
uint16_t auth_keylen,
uint8_t *p_state_buf,
- uint16_t *p_state_len)
+ uint16_t *p_state_len,
+ uint8_t aes_cmac)
{
int block_size;
uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)];
@@ -1003,47 +1028,91 @@ static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
int i;
if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) {
- static uint8_t qat_aes_xcbc_key_seed[
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
- };
- uint8_t *in = NULL;
- uint8_t *out = p_state_buf;
- int x;
- AES_KEY enc_key;
+ /* CMAC */
+ if (aes_cmac) {
+ AES_KEY enc_key;
+ uint8_t *in = NULL;
+ uint8_t k0[ICP_QAT_HW_AES_128_KEY_SZ];
+ uint8_t *k1, *k2;
- in = rte_zmalloc("working mem for key",
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
- if (in == NULL) {
- QAT_LOG(ERR, "Failed to alloc memory");
- return -ENOMEM;
- }
+ auth_keylen = ICP_QAT_HW_AES_128_KEY_SZ;
+
+ in = rte_zmalloc("AES CMAC K1",
+ ICP_QAT_HW_AES_128_KEY_SZ, 16);
+
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, AES_CMAC_SEED,
+ ICP_QAT_HW_AES_128_KEY_SZ);
+ rte_memcpy(p_state_buf, auth_key, auth_keylen);
- rte_memcpy(in, qat_aes_xcbc_key_seed,
- ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
- for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
&enc_key) != 0) {
- rte_free(in -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
- memset(out -
- (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
- 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ rte_free(in);
return -EFAULT;
}
- AES_encrypt(in, out, &enc_key);
- in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
- out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+
+ AES_encrypt(in, k0, &enc_key);
+
+ k1 = p_state_buf + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+ k2 = k1 + ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+ aes_cmac_key_derive(k0, k1);
+ aes_cmac_key_derive(k1, k2);
+
+ memset(k0, 0, ICP_QAT_HW_AES_128_KEY_SZ);
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in);
+ return 0;
+ } else {
+ static uint8_t qat_aes_xcbc_key_seed[
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = {
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03,
+ };
+
+ uint8_t *in = NULL;
+ uint8_t *out = p_state_buf;
+ int x;
+ AES_KEY enc_key;
+
+ in = rte_zmalloc("working mem for key",
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ QAT_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
+ rte_memcpy(in, qat_aes_xcbc_key_seed,
+ ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
+ if (AES_set_encrypt_key(auth_key,
+ auth_keylen << 3,
+ &enc_key) != 0) {
+ rte_free(in -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ));
+ memset(out -
+ (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ),
+ 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
+ return -EFAULT;
+ }
+ AES_encrypt(in, out, &enc_key);
+ in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ;
+ }
+ *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
+ rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
+ return 0;
}
- *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ;
- rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ);
- return 0;
+
} else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
(hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
uint8_t *in = NULL;
@@ -1417,7 +1486,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2
|| cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9
- || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3)
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3
+ || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC
+ )
hash->auth_counter.counter = 0;
else
hash->auth_counter.counter = rte_bswap32(
@@ -1430,40 +1501,45 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
*/
switch (cdesc->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA224:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA224_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA256_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA384:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
state2_size = ICP_QAT_HW_SHA384_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512,
- authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(SHA)precompute failed");
return -EFAULT;
}
@@ -1471,10 +1547,16 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC:
state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ;
+
+ if (cdesc->aes_cmac)
+ memset(cdesc->cd_cur_ptr, 0, state1_size);
if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC,
authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
- QAT_LOG(ERR, "(XCBC)precompute failed");
+ &state2_size, cdesc->aes_cmac)) {
+ cdesc->aes_cmac ? QAT_LOG(ERR,
+ "(CMAC)precompute failed")
+ : QAT_LOG(ERR,
+ "(XCBC)precompute failed");
return -EFAULT;
}
break;
@@ -1482,9 +1564,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
case ICP_QAT_HW_AUTH_ALGO_GALOIS_64:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM;
state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ;
- if (qat_sym_do_precomputes(cdesc->qat_hash_alg,
- authkey, authkeylen, cdesc->cd_cur_ptr + state1_size,
- &state2_size)) {
+ if (qat_sym_do_precomputes(cdesc->qat_hash_alg, authkey,
+ authkeylen, cdesc->cd_cur_ptr + state1_size,
+ &state2_size, cdesc->aes_cmac)) {
QAT_LOG(ERR, "(GCM)precompute failed");
return -EFAULT;
}
@@ -1542,9 +1624,9 @@ int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc,
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
- if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5,
- authkey, authkeylen, cdesc->cd_cur_ptr,
- &state1_size)) {
+ if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey,
+ authkeylen, cdesc->cd_cur_ptr, &state1_size,
+ cdesc->aes_cmac)) {
QAT_LOG(ERR, "(MD5)precompute failed");
return -EFAULT;
}
diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h
index e8f51e5b..43e25ceb 100644
--- a/drivers/crypto/qat/qat_sym_session.h
+++ b/drivers/crypto/qat/qat_sym_session.h
@@ -36,6 +36,8 @@
ICP_QAT_HW_CIPHER_KEY_CONVERT, \
ICP_QAT_HW_CIPHER_DECRYPT)
+#define QAT_AES_CMAC_CONST_RB 0x87
+
enum qat_sym_proto_flag {
QAT_CRYPTO_PROTO_FLAG_NONE = 0,
QAT_CRYPTO_PROTO_FLAG_CCM = 1,
@@ -75,6 +77,7 @@ struct qat_sym_session {
uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
enum qat_device_gen min_qat_dev_gen;
+ uint8_t aes_cmac;
};
int
diff --git a/drivers/crypto/scheduler/meson.build b/drivers/crypto/scheduler/meson.build
new file mode 100644
index 00000000..c5ba2d68
--- /dev/null
+++ b/drivers/crypto/scheduler/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Luca Boccassi <bluca@debian.org>
+
+deps += ['bus_vdev', 'reorder']
+name = 'crypto_scheduler'
+sources = files(
+ 'rte_cryptodev_scheduler.c',
+ 'scheduler_failover.c',
+ 'scheduler_multicore.c',
+ 'scheduler_pkt_size_distr.c',
+ 'scheduler_pmd.c',
+ 'scheduler_pmd_ops.c',
+ 'scheduler_roundrobin.c',
+)
+
+headers = files(
+ 'rte_cryptodev_scheduler.h',
+ 'rte_cryptodev_scheduler_operations.h',
+)
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 6e4919c4..a2142860 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -174,7 +174,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -233,7 +233,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -290,7 +290,7 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -308,28 +308,28 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id,
switch (mode) {
case CDEV_SCHED_MODE_ROUNDROBIN:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- roundrobin_scheduler) < 0) {
+ crypto_scheduler_roundrobin) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_PKT_SIZE_DISTR:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- pkt_size_based_distr_scheduler) < 0) {
+ crypto_scheduler_pkt_size_based_distr) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_FAILOVER:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- failover_scheduler) < 0) {
+ crypto_scheduler_failover) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
break;
case CDEV_SCHED_MODE_MULTICORE:
if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id,
- multicore_scheduler) < 0) {
+ crypto_scheduler_multicore) < 0) {
CR_SCHED_LOG(ERR, "Failed to load scheduler");
return -1;
}
@@ -353,7 +353,7 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -375,7 +375,7 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -403,7 +403,7 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -425,7 +425,7 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -498,7 +498,7 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves)
return -ENOTSUP;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
@@ -566,7 +566,7 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id,
return -EINVAL;
}
- if (dev->driver_id != cryptodev_driver_id) {
+ if (dev->driver_id != cryptodev_scheduler_driver_id) {
CR_SCHED_LOG(ERR, "Operation not supported");
return -ENOTSUP;
}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 3faea409..9a72a90a 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -270,13 +270,13 @@ struct rte_cryptodev_scheduler {
};
/** Round-robin mode scheduler */
-extern struct rte_cryptodev_scheduler *roundrobin_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin;
/** Packet-size based distribution mode scheduler */
-extern struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr;
/** Fail-over mode scheduler */
-extern struct rte_cryptodev_scheduler *failover_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_failover;
/** multi-core mode scheduler */
-extern struct rte_cryptodev_scheduler *multicore_scheduler;
+extern struct rte_cryptodev_scheduler *crypto_scheduler_multicore;
#ifdef __cplusplus
}
diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c
index ddfb5b81..3a023b8a 100644
--- a/drivers/crypto/scheduler/scheduler_failover.c
+++ b/drivers/crypto/scheduler/scheduler_failover.c
@@ -197,7 +197,7 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -208,7 +208,7 @@ struct rte_cryptodev_scheduler_ops scheduler_fo_ops = {
NULL /*option_get */
};
-struct rte_cryptodev_scheduler fo_scheduler = {
+static struct rte_cryptodev_scheduler fo_scheduler = {
.name = "failover-scheduler",
.description = "scheduler which enqueues to the primary slave, "
"and only then enqueues to the secondary slave "
@@ -217,4 +217,4 @@ struct rte_cryptodev_scheduler fo_scheduler = {
.ops = &scheduler_fo_ops
};
-struct rte_cryptodev_scheduler *failover_scheduler = &fo_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_failover = &fo_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index d410e69d..7808e9a3 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -392,7 +392,7 @@ exit:
return -1;
}
-struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -403,11 +403,11 @@ struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
NULL /* option_get */
};
-struct rte_cryptodev_scheduler mc_scheduler = {
+static struct rte_cryptodev_scheduler mc_scheduler = {
.name = "multicore-scheduler",
.description = "scheduler which will run burst across multiple cpu cores",
.mode = CDEV_SCHED_MODE_MULTICORE,
.ops = &scheduler_mc_ops
};
-struct rte_cryptodev_scheduler *multicore_scheduler = &mc_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_multicore = &mc_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 74129b66..45c8dceb 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -398,7 +398,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type,
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -409,7 +409,7 @@ struct rte_cryptodev_scheduler_ops scheduler_ps_ops = {
scheduler_option_get
};
-struct rte_cryptodev_scheduler psd_scheduler = {
+static struct rte_cryptodev_scheduler psd_scheduler = {
.name = "packet-size-based-scheduler",
.description = "scheduler which will distribute crypto op "
"burst based on the packet size",
@@ -417,4 +417,4 @@ struct rte_cryptodev_scheduler psd_scheduler = {
.ops = &scheduler_ps_ops
};
-struct rte_cryptodev_scheduler *pkt_size_based_distr_scheduler = &psd_scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_pkt_size_based_distr = &psd_scheduler;
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index a9221a94..20198ccb 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -14,7 +14,7 @@
#include "rte_cryptodev_scheduler.h"
#include "scheduler_pmd_private.h"
-uint8_t cryptodev_driver_id;
+uint8_t cryptodev_scheduler_driver_id;
struct scheduler_init_params {
struct rte_cryptodev_pmd_init_params def_p;
@@ -38,7 +38,7 @@ struct scheduler_init_params {
#define RTE_CRYPTODEV_VDEV_COREMASK ("coremask")
#define RTE_CRYPTODEV_VDEV_CORELIST ("corelist")
-const char *scheduler_valid_params[] = {
+static const char * const scheduler_valid_params[] = {
RTE_CRYPTODEV_VDEV_NAME,
RTE_CRYPTODEV_VDEV_SLAVE,
RTE_CRYPTODEV_VDEV_MODE,
@@ -91,7 +91,7 @@ cryptodev_scheduler_create(const char *name,
return -EFAULT;
}
- dev->driver_id = cryptodev_driver_id;
+ dev->driver_id = cryptodev_scheduler_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
sched_ctx = dev->data->dev_private;
@@ -569,4 +569,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
cryptodev_scheduler_pmd_drv.driver,
- cryptodev_driver_id);
+ cryptodev_scheduler_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 778071ca..939105aa 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -522,7 +522,7 @@ scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev,
}
}
-struct rte_cryptodev_ops scheduler_pmd_ops = {
+static struct rte_cryptodev_ops scheduler_pmd_ops = {
.dev_configure = scheduler_pmd_config,
.dev_start = scheduler_pmd_start,
.dev_stop = scheduler_pmd_stop,
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index d5e602a2..3ed480c1 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -63,7 +63,7 @@ struct scheduler_qp_ctx {
} __rte_cache_aligned;
-extern uint8_t cryptodev_driver_id;
+extern uint8_t cryptodev_scheduler_driver_id;
static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c
index c7082a64..9b891d97 100644
--- a/drivers/crypto/scheduler/scheduler_roundrobin.c
+++ b/drivers/crypto/scheduler/scheduler_roundrobin.c
@@ -190,7 +190,7 @@ scheduler_create_private_ctx(__rte_unused struct rte_cryptodev *dev)
return 0;
}
-struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
+static struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
slave_attach,
slave_detach,
scheduler_start,
@@ -201,7 +201,7 @@ struct rte_cryptodev_scheduler_ops scheduler_rr_ops = {
NULL /* option_get */
};
-struct rte_cryptodev_scheduler scheduler = {
+static struct rte_cryptodev_scheduler scheduler = {
.name = "roundrobin-scheduler",
.description = "scheduler which will round robin burst across "
"slave crypto devices",
@@ -209,4 +209,4 @@ struct rte_cryptodev_scheduler scheduler = {
.ops = &scheduler_rr_ops
};
-struct rte_cryptodev_scheduler *roundrobin_scheduler = &scheduler;
+struct rte_cryptodev_scheduler *crypto_scheduler_roundrobin = &scheduler;
diff --git a/drivers/crypto/zuc/meson.build b/drivers/crypto/zuc/meson.build
new file mode 100644
index 00000000..b8ca7107
--- /dev/null
+++ b/drivers/crypto/zuc/meson.build
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Intel Corporation
+
+lib = cc.find_library('libsso_zuc', required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+endif
+
+sources = files('rte_zuc_pmd.c', 'rte_zuc_pmd_ops.c')
+deps += ['bus_vdev']