aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/dpaa2_sec
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/crypto/dpaa2_sec
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/crypto/dpaa2_sec')
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile11
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c788
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_event.h18
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h210
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc.h816
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/algo.h58
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/ipsec.h195
-rw-r--r--drivers/crypto/dpaa2_sec/hw/desc/pdcp.h2796
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h346
-rw-r--r--drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h22
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c128
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h25
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h73
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build2
-rw-r--r--drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map8
15 files changed, 4477 insertions, 1019 deletions
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index da3d8f84..f537f76a 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -4,13 +4,6 @@
#
include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
-$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
-endif
-endif
-
#
# library name
#
@@ -20,7 +13,6 @@ LIB = librte_pmd_dpaa2_sec.a
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
ifeq ($(shell test $(GCC_VERSION) -gt 70 && echo 1), 1)
@@ -41,7 +33,7 @@ CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
EXPORT_MAP := rte_pmd_dpaa2_sec_version.map
# library version
-LIBABIVER := 1
+LIBABIVER := 2
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
@@ -51,5 +43,6 @@ LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_common_dpaax
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 2a3c61c6..6095c602 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
*/
@@ -10,7 +10,6 @@
#include <rte_mbuf.h>
#include <rte_cryptodev.h>
-#include <rte_security_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -24,10 +23,12 @@
#include <dpaa2_hw_pvt.h>
#include <dpaa2_hw_dpio.h>
#include <dpaa2_hw_mempool.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_mc_sys.h>
#include "dpaa2_sec_priv.h"
+#include "dpaa2_sec_event.h"
#include "dpaa2_sec_logs.h"
/* Required types */
@@ -35,6 +36,7 @@ typedef uint64_t dma_addr_t;
/* RTA header files */
#include <hw/desc/ipsec.h>
+#include <hw/desc/pdcp.h>
#include <hw/desc/algo.h>
/* Minimum job descriptor consists of a oneword job descriptor HEADER and
@@ -62,11 +64,87 @@ static uint8_t cryptodev_driver_id;
int dpaa2_logtype_sec;
static inline int
+build_proto_compound_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *ip_fle, *op_fle;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *src_mbuf = sym_op->m_src;
+ struct rte_mbuf *dst_mbuf = sym_op->m_dst;
+ int retval;
+
+ if (!dst_mbuf)
+ dst_mbuf = src_mbuf;
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ /* we are using the first FLE entry to store Mbuf */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ DPAA2_SEC_ERR("Memory alloc failed");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
+
+ op_fle = fle + 1;
+ ip_fle = fle + 2;
+
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(op_fle, bpid);
+ DPAA2_SET_FLE_BPID(ip_fle, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(op_fle);
+ DPAA2_SET_FLE_IVP(ip_fle);
+ }
+
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(op_fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
+
+ /* Configure Output FLE with dst mbuf data */
+ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_MBUF_VADDR_TO_IOVA(dst_mbuf));
+ DPAA2_SET_FLE_OFFSET(op_fle, dst_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(op_fle, dst_mbuf->buf_len);
+
+ /* Configure Input FLE with src mbuf data */
+ DPAA2_SET_FLE_ADDR(ip_fle, DPAA2_MBUF_VADDR_TO_IOVA(src_mbuf));
+ DPAA2_SET_FLE_OFFSET(ip_fle, src_mbuf->data_off);
+ DPAA2_SET_FLE_LEN(ip_fle, src_mbuf->pkt_len);
+
+ DPAA2_SET_FD_LEN(fd, ip_fle->length);
+ DPAA2_SET_FLE_FIN(ip_fle);
+
+#ifdef ENABLE_HFN_OVERRIDE
+ if (sess->ctxt_type == DPAA2_SEC_PDCP && sess->pdcp.hfn_ovd) {
+ /*enable HFN override override */
+ DPAA2_SET_FLE_INTERNAL_JD(ip_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FLE_INTERNAL_JD(op_fle, sess->pdcp.hfn_ovd);
+ DPAA2_SET_FD_INTERNAL_JD(fd, sess->pdcp.hfn_ovd);
+ }
+#endif
+
+ return 0;
+
+}
+
+static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
struct rte_crypto_sym_op *sym_op = op->sym;
+ if (sym_op->m_dst)
+ return build_proto_compound_fd(sess, op, fd, bpid);
+
struct ctxt_priv *priv = sess->ctxt;
struct sec_flow_context *flc;
struct rte_mbuf *mbuf = sym_op->m_src;
@@ -1124,6 +1202,9 @@ build_sec_fd(struct rte_crypto_op *op,
case DPAA2_SEC_IPSEC:
ret = build_proto_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_PDCP:
+ ret = build_proto_compound_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_HASH_CIPHER:
default:
DPAA2_SEC_ERR("error: Unsupported session");
@@ -1145,6 +1226,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
struct qbman_swp *swp;
uint16_t num_tx = 0;
+ uint32_t flags[MAX_TX_RING_SLOTS] = {0};
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
@@ -1172,9 +1254,19 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
- frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
+ frames_to_send = (nb_ops > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_ops;
for (loop = 0; loop < frames_to_send; loop++) {
+ if ((*ops)->sym->m_src->seqn) {
+ uint8_t dqrr_index = (*ops)->sym->m_src->seqn - 1;
+
+ flags[loop] = QBMAN_ENQUEUE_FLAG_DCA | dqrr_index;
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ (*ops)->sym->m_src->seqn = DPAA2_INVALID_MBUF_SEQN;
+ }
+
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
mb_pool = (*ops)->sym->m_src->pool;
@@ -1191,7 +1283,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
while (loop < frames_to_send) {
loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
- NULL,
+ &flags[loop],
frames_to_send - loop);
}
@@ -1216,6 +1308,9 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1226,9 +1321,6 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
else
mbuf->data_off += SEC_FLC_DHR_INBOUND;
- diff = len - mbuf->pkt_len;
- mbuf->pkt_len += diff;
- mbuf->data_len += diff;
return op;
}
@@ -1273,6 +1365,16 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
+ if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
+ dpaa2_sec_session *sess = (dpaa2_sec_session *)
+ get_sec_session_private_data(op->sym->sec_session);
+ if (sess->ctxt_type == DPAA2_SEC_IPSEC) {
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ dst->pkt_len = len;
+ dst->data_len = len;
+ }
+ }
+
DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
" fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
(void *)dst,
@@ -1321,8 +1423,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
qbman_pull_desc_clear(&pulldesc);
qbman_pull_desc_set_numframes(&pulldesc,
- (nb_ops > DPAA2_DQRR_RING_SIZE) ?
- DPAA2_DQRR_RING_SIZE : nb_ops);
+ (nb_ops > dpaa2_dqrr_size) ?
+ dpaa2_dqrr_size : nb_ops);
qbman_pull_desc_set_fq(&pulldesc, fqid);
qbman_pull_desc_set_storage(&pulldesc, dq_storage,
(dma_addr_t)DPAA2_VADDR_TO_IOVA(dq_storage),
@@ -2099,6 +2201,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
return -1;
}
+ memset(session, 0, sizeof(dpaa2_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -2139,107 +2242,127 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
}
static int
-dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
- struct rte_security_session_conf *conf,
- void *sess)
+dpaa2_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *aeaddata)
{
- struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
- dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
- struct ctxt_priv *priv;
- struct ipsec_encap_pdb encap_pdb;
- struct ipsec_decap_pdb decap_pdb;
- struct alginfo authdata, cipherdata;
- int bufsize;
- struct sec_flow_context *flc;
-
PMD_INIT_FUNC_TRACE();
- if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
- cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
- } else {
- auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for aead key");
+ return -1;
}
- priv = (struct ctxt_priv *)rte_zmalloc(NULL,
- sizeof(struct ctxt_priv) +
- sizeof(struct sec_flc_desc),
- RTE_CACHE_LINE_SIZE);
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
- if (priv == NULL) {
- DPAA2_SEC_ERR("No memory for priv CTXT");
- return -ENOMEM;
- }
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
- flc = &priv->flc_desc[0].flc;
+ aeaddata->key = (size_t)session->aead_key.data;
+ aeaddata->keylen = session->aead_key.length;
+ aeaddata->key_enc_flags = 0;
+ aeaddata->key_type = RTA_DATA_IMM;
- session->ctxt_type = DPAA2_SEC_IPSEC;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for cipher key");
- rte_free(priv);
- return -ENOMEM;
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ aeaddata->algtype = OP_ALG_ALGSEL_AES;
+ aeaddata->algmode = OP_ALG_AAI_CCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_CCM;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
+ return -1;
}
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA2_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- rte_free(priv);
- return -ENOMEM;
+ return 0;
+}
+
+static int
+dpaa2_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
+ struct rte_crypto_auth_xform *auth_xform,
+ dpaa2_sec_session *session,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ }
+
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- authdata.key = (size_t)session->auth_key.data;
- authdata.keylen = session->auth_key.length;
- authdata.key_enc_flags = 0;
- authdata.key_type = RTA_DATA_IMM;
- switch (auth_xform->algo) {
+ authdata->key = (size_t)session->auth_key.data;
+ authdata->keylen = session->auth_key.length;
+ authdata->key_enc_flags = 0;
+ authdata->key_type = RTA_DATA_IMM;
+ switch (session->auth_alg) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA256_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA384_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_SHA512_HMAC:
- authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
- authdata.algmode = OP_ALG_AAI_HMAC;
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata->algmode = OP_ALG_AAI_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_CMAC:
- authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ authdata->algtype = OP_PCL_IPSEC_AES_CMAC_96;
break;
case RTE_CRYPTO_AUTH_NULL:
- authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
- session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ authdata->algtype = OP_PCL_IPSEC_HMAC_NULL;
break;
case RTE_CRYPTO_AUTH_SHA224_HMAC:
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
@@ -2255,50 +2378,119 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
+ session->auth_alg);
+ return -1;
}
- cipherdata.key = (size_t)session->cipher_key.data;
- cipherdata.keylen = session->cipher_key.length;
- cipherdata.key_enc_flags = 0;
- cipherdata.key_type = RTA_DATA_IMM;
+ cipherdata->key = (size_t)session->cipher_key.data;
+ cipherdata->keylen = session->cipher_key.length;
+ cipherdata->key_enc_flags = 0;
+ cipherdata->key_type = RTA_DATA_IMM;
- switch (cipher_xform->algo) {
+ switch (session->cipher_alg) {
case RTE_CRYPTO_CIPHER_AES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
- cipherdata.algtype = OP_PCL_IPSEC_3DES;
- cipherdata.algmode = OP_ALG_AAI_CBC;
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ cipherdata->algtype = OP_PCL_IPSEC_3DES;
+ cipherdata->algmode = OP_ALG_AAI_CBC;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
- cipherdata.algmode = OP_ALG_AAI_CTR;
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ cipherdata->algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata->algmode = OP_ALG_AAI_CTR;
break;
case RTE_CRYPTO_CIPHER_NULL:
- cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ cipherdata->algtype = OP_PCL_IPSEC_NULL;
break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
+ session->cipher_alg);
+ return -1;
default:
DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
+ session->cipher_alg);
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef RTE_LIBRTE_SECURITY_TEST
+static uint8_t aes_cbc_iv[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f };
+#endif
+
+static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_aead_xform *aead_xform = NULL;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ int bufsize;
+ struct sec_flow_context *flc;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ int ret = -1;
+
+ PMD_INIT_FUNC_TRACE();
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &conf->crypto_xform->auth;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ ret = dpaa2_sec_ipsec_proto_init(cipher_xform, auth_xform,
+ session, &cipherdata, &authdata);
+ } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &conf->crypto_xform->aead;
+ ret = dpaa2_sec_ipsec_aead_init(aead_xform,
+ session, &cipherdata);
+ } else {
+ DPAA2_SEC_ERR("XFORM not specified");
+ ret = -EINVAL;
+ goto out;
+ }
+ if (ret) {
+ DPAA2_SEC_ERR("Failed to process xform");
goto out;
}
+ session->ctxt_type = DPAA2_SEC_IPSEC;
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
struct ip ip4_hdr;
@@ -2310,7 +2502,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
ip4_hdr.ip_id = 0;
ip4_hdr.ip_off = 0;
ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
- ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_p = IPPROTO_ESP;
ip4_hdr.ip_sum = 0;
ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
@@ -2322,13 +2514,14 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
encap_pdb.spi = ipsec_xform->spi;
encap_pdb.ip_hdr_len = sizeof(struct ip);
session->dir = DIR_ENC;
bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
- 1, 0, &encap_pdb,
+ 1, 0, SHR_SERIAL, &encap_pdb,
(uint8_t *)&ip4_hdr,
&cipherdata, &authdata);
} else if (ipsec_xform->direction ==
@@ -2338,7 +2531,8 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
decap_pdb.options = sizeof(struct ip) << 16;
session->dir = DIR_DEC;
bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
- 1, 0, &decap_pdb, &cipherdata, &authdata);
+ 1, 0, SHR_SERIAL,
+ &decap_pdb, &cipherdata, &authdata);
} else
goto out;
@@ -2372,6 +2566,244 @@ out:
rte_free(session->auth_key.data);
rte_free(session->cipher_key.data);
rte_free(priv);
+ return ret;
+}
+
+static int
+dpaa2_sec_set_pdcp_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
+ struct rte_crypto_sym_xform *xform = conf->crypto_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo authdata, cipherdata;
+ int bufsize = -1;
+ struct sec_flow_context *flc;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = true;
+#else
+ int swap = false;
+#endif
+
+ PMD_INIT_FUNC_TRACE();
+
+ memset(session, 0, sizeof(dpaa2_sec_session));
+
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ DPAA2_SEC_ERR("No memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ /* find xfrm types */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ cipher_xform = &xform->cipher;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ session->ext_params.aead_ctxt.auth_cipher_text = true;
+ cipher_xform = &xform->cipher;
+ auth_xform = &xform->next->auth;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ session->ext_params.aead_ctxt.auth_cipher_text = false;
+ cipher_xform = &xform->next->cipher;
+ auth_xform = &xform->auth;
+ } else {
+ DPAA2_SEC_ERR("Invalid crypto type");
+ return -EINVAL;
+ }
+
+ session->ctxt_type = DPAA2_SEC_PDCP;
+ if (cipher_xform) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for cipher key");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->cipher_key.length = cipher_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->dir =
+ (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ session->dir = DIR_ENC;
+ }
+
+ session->pdcp.domain = pdcp_xform->domain;
+ session->pdcp.bearer = pdcp_xform->bearer;
+ session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
+ session->pdcp.sn_size = pdcp_xform->sn_size;
+#ifdef ENABLE_HFN_OVERRIDE
+ session->pdcp.hfn_ovd = pdcp_xform->hfn_ovd;
+#endif
+ session->pdcp.hfn = pdcp_xform->hfn;
+ session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
+
+ cipherdata.key = (size_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (session->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_CIPHER_ZUC_EEA3:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = PDCP_CIPHER_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ session->cipher_alg);
+ goto out;
+ }
+
+ /* Auth is only applicable for control mode operation. */
+ if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
+ if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5) {
+ DPAA2_SEC_ERR(
+ "PDCP Seq Num size should be 5 bits for cmode");
+ goto out;
+ }
+ if (auth_xform) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA2_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ }
+ authdata.key = (size_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ switch (session->auth_alg) {
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ authdata.algtype = PDCP_AUTH_TYPE_SNOW;
+ break;
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ authdata.algtype = PDCP_AUTH_TYPE_ZUC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = PDCP_AUTH_TYPE_AES;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = PDCP_AUTH_TYPE_NULL;
+ break;
+ default:
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ session->auth_alg);
+ goto out;
+ }
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_c_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, &authdata,
+ 0);
+ } else {
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_encap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ else if (session->dir == DIR_DEC)
+ bufsize = cnstr_shdsc_pdcp_u_plane_decap(
+ priv->flc_desc[0].desc, 1, swap,
+ (enum pdcp_sn_size)pdcp_xform->sn_size,
+ pdcp_xform->hfn,
+ pdcp_xform->bearer,
+ pdcp_xform->pkt_dir,
+ pdcp_xform->hfn_threshold,
+ &cipherdata, 0);
+ }
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (size_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
return -1;
}
@@ -2397,6 +2829,10 @@ dpaa2_sec_security_session_create(void *dev,
break;
case RTE_SECURITY_PROTOCOL_MACSEC:
return -ENOTSUP;
+ case RTE_SECURITY_PROTOCOL_PDCP:
+ ret = dpaa2_sec_set_pdcp_session(cdev, conf,
+ sess_private_data);
+ break;
default:
return -EINVAL;
}
@@ -2686,6 +3122,129 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
}
}
+static void __attribute__((hot))
+dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+
+ qbman_swp_dqrr_consume(swp, dq);
+}
+static void
+dpaa2_sec_process_atomic_event(struct qbman_swp *swp __attribute__((unused)),
+ const struct qbman_fd *fd,
+ const struct qbman_result *dq,
+ struct dpaa2_queue *rxq,
+ struct rte_event *ev)
+{
+ uint8_t dqrr_index;
+ struct rte_crypto_op *crypto_op = (struct rte_crypto_op *)ev->event_ptr;
+ /* Prefetching mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size));
+
+ /* Prefetching ipsec crypto_op stored in priv data of mbuf */
+ rte_prefetch0((void *)(size_t)(DPAA2_GET_FD_ADDR(fd)-64));
+
+ ev->flow_id = rxq->ev.flow_id;
+ ev->sub_event_type = rxq->ev.sub_event_type;
+ ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
+ ev->op = RTE_EVENT_OP_NEW;
+ ev->sched_type = rxq->ev.sched_type;
+ ev->queue_id = rxq->ev.queue_id;
+ ev->priority = rxq->ev.priority;
+
+ ev->event_ptr = sec_fd_to_mbuf(fd, ((struct rte_cryptodev *)
+ (rxq->dev))->driver_id);
+ dqrr_index = qbman_get_dqrr_idx(dq);
+ crypto_op->sym->m_src->seqn = dqrr_index + 1;
+ DPAA2_PER_LCORE_DQRR_SIZE++;
+ DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
+ DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = crypto_op->sym->m_src;
+}
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpaa2_sec_qp *qp = dev->data->queue_pairs[qp_id];
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ if (event->sched_type == RTE_SCHED_TYPE_PARALLEL)
+ qp->rx_vq.cb = dpaa2_sec_process_parallel_event;
+ else if (event->sched_type == RTE_SCHED_TYPE_ATOMIC)
+ qp->rx_vq.cb = dpaa2_sec_process_atomic_event;
+ else
+ return -EINVAL;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_DPCON;
+ cfg.dest_cfg.dest_id = dpcon_id;
+ cfg.dest_cfg.priority = event->priority;
+
+ cfg.options |= DPSECI_QUEUE_OPT_USER_CTX;
+ cfg.user_ctx = (size_t)(qp);
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC) {
+ cfg.options |= DPSECI_QUEUE_OPT_ORDER_PRESERVATION;
+ cfg.order_preservation_en = 1;
+ }
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret) {
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+ return ret;
+ }
+
+ memcpy(&qp->rx_vq.ev, event, sizeof(struct rte_event));
+
+ return 0;
+}
+
+int
+dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id)
+{
+ struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
+ struct fsl_mc_io *dpseci = (struct fsl_mc_io *)priv->hw;
+ struct dpseci_rx_queue_cfg cfg;
+ int ret;
+
+ memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
+ cfg.options = DPSECI_QUEUE_OPT_DEST;
+ cfg.dest_cfg.dest_type = DPSECI_DEST_NONE;
+
+ ret = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
+ qp_id, &cfg);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Error in dpseci_set_queue: ret: %d\n", ret);
+
+ return ret;
+}
+
static struct rte_cryptodev_ops crypto_ops = {
.dev_configure = dpaa2_sec_dev_configure,
.dev_start = dpaa2_sec_dev_start,
@@ -2708,7 +3267,7 @@ dpaa2_sec_capabilities_get(void *device __rte_unused)
return dpaa2_sec_security_cap;
}
-struct rte_security_ops dpaa2_sec_security_ops = {
+static const struct rte_security_ops dpaa2_sec_security_ops = {
.session_create = dpaa2_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2843,7 +3402,7 @@ init_error:
}
static int
-cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
+cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv __rte_unused,
struct rte_dpaa2_device *dpaa2_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2871,7 +3430,6 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
new file mode 100644
index 00000000..97709942
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_event.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2018 NXP
+ *
+ */
+
+#ifndef _DPAA2_SEC_EVENT_H_
+#define _DPAA2_SEC_EVENT_H_
+
+int
+dpaa2_sec_eventq_attach(const struct rte_cryptodev *dev,
+ int qp_id,
+ uint16_t dpcon_id,
+ const struct rte_event *event);
+
+int dpaa2_sec_eventq_detach(const struct rte_cryptodev *dev,
+ int qp_id);
+
+#endif /* _DPAA2_SEC_EVENT_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index d015be1e..51751103 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -8,6 +8,8 @@
#ifndef _RTE_DPAA2_SEC_PMD_PRIVATE_H_
#define _RTE_DPAA2_SEC_PMD_PRIVATE_H_
+#include <rte_security_driver.h>
+
#define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
/**< NXP DPAA2 - SEC PMD device name */
@@ -135,6 +137,19 @@ struct dpaa2_sec_aead_ctxt {
uint8_t auth_cipher_text; /**< Authenticate/cipher ordering */
};
+/*
+ * The structure is to be filled by user for PDCP Protocol
+ */
+struct dpaa2_pdcp_ctxt {
+ enum rte_security_pdcp_domain domain; /*!< Data/Control mode*/
+ int8_t bearer; /*!< PDCP bearer ID */
+ int8_t pkt_dir;/*!< PDCP Frame Direction 0:UL 1:DL*/
+ int8_t hfn_ovd;/*!< Overwrite HFN per packet*/
+ uint32_t hfn; /*!< Hyper Frame Number */
+ uint32_t hfn_threshold; /*!< HFN Threashold for key renegotiation */
+ uint8_t sn_size; /*!< Sequence number size, 7/12/15 */
+};
+
typedef struct dpaa2_sec_session_entry {
void *ctxt;
uint8_t ctxt_type;
@@ -158,15 +173,20 @@ typedef struct dpaa2_sec_session_entry {
} auth_key;
};
};
- struct {
- uint16_t length; /**< IV length in bytes */
- uint16_t offset; /**< IV offset in bytes */
- } iv;
- uint16_t digest_length;
- uint8_t status;
union {
- struct dpaa2_sec_aead_ctxt aead_ctxt;
- } ext_params;
+ struct {
+ struct {
+ uint16_t length; /**< IV length in bytes */
+ uint16_t offset; /**< IV offset in bytes */
+ } iv;
+ uint16_t digest_length;
+ uint8_t status;
+ union {
+ struct dpaa2_sec_aead_ctxt aead_ctxt;
+ } ext_params;
+ };
+ struct dpaa2_pdcp_ctxt pdcp;
+ };
} dpaa2_sec_session;
static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
@@ -390,6 +410,162 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+static const struct rte_cryptodev_capabilities dpaa2_pdcp_capabilities[] = {
+ { /* SNOW 3G (UIA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* SNOW 3G (UEA2) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* NULL (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = { 0 }
+ }, },
+ }, },
+ },
+ { /* NULL (CIPHER) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_NULL,
+ .block_size = 1,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ }
+ }, },
+ }, }
+ },
+ { /* ZUC (EEA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* ZUC (EIA3) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_ZUC_EIA3,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 4,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
static const struct rte_security_capability dpaa2_sec_security_cap[] = {
{ /* IPsec Lookaside Protocol offload ESP Transport Egress */
.action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
@@ -413,6 +589,24 @@ static const struct rte_security_capability dpaa2_sec_security_cap[] = {
},
.crypto_capabilities = dpaa2_sec_capabilities
},
+ { /* PDCP Lookaside Protocol offload Data */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_DATA,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
+ { /* PDCP Lookaside Protocol offload Control */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_PDCP,
+ .pdcp = {
+ .domain = RTE_SECURITY_PDCP_MODE_CONTROL,
+ .capa_flags = 0
+ },
+ .crypto_capabilities = dpaa2_pdcp_capabilities
+ },
{
.action = RTE_SECURITY_ACTION_TYPE_NONE
}
diff --git a/drivers/crypto/dpaa2_sec/hw/desc.h b/drivers/crypto/dpaa2_sec/hw/desc.h
index e9255832..5d99dd8a 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc.h
@@ -588,7 +588,7 @@
#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12_PRF (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS_PRF (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
@@ -612,7 +612,7 @@
#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10 (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
#define OP_PCLID_IPSEC_NEW (0x11 << OP_PCLID_SHIFT)
#define OP_PCLID_3G_DCRC (0x31 << OP_PCLID_SHIFT)
@@ -665,643 +665,179 @@
#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
-/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_1 0x009C
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_1 0x009D
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_2 0x009E
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_2 0x009F
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_3 0x00A0
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_3 0x00A1
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_4 0x00A2
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_4 0x00A3
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_5 0x00A4
-#define OP_PCL_SSL30_AES_256_GCM_SHA384_5 0x00A5
-#define OP_PCL_SSL30_AES_128_GCM_SHA256_6 0x00A6
-
-#define OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384 0x00A7
-#define OP_PCL_TLS_PSK_AES_128_GCM_SHA256 0x00A8
-#define OP_PCL_TLS_PSK_AES_256_GCM_SHA384 0x00A9
-#define OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256 0x00AA
-#define OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384 0x00AB
-#define OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256 0x00AC
-#define OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384 0x00AD
-#define OP_PCL_TLS_PSK_AES_128_CBC_SHA256 0x00AE
-#define OP_PCL_TLS_PSK_AES_256_CBC_SHA384 0x00AF
-#define OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256 0x00B2
-#define OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384 0x00B3
-#define OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256 0x00B6
-#define OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384 0x00B7
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
-
-/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256 0xC023
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384 0xC024
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256 0xC025
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384 0xC026
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256 0xC027
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384 0xC028
-#define OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256 0xC029
-#define OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384 0xC02A
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256 0xC02B
-#define OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384 0xC02C
-#define OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256 0xC02D
-#define OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384 0xC02E
-#define OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256 0xC02F
-#define OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384 0xC030
-#define OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256 0xC031
-#define OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384 0xC032
-#define OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA 0xC033
-#define OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA 0xC034
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA 0xC035
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA 0xC036
-#define OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256 0xC037
-#define OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384 0xC038
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
-
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA160 0xff90
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA384 0xff93
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA224 0xff94
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA512 0xff95
-#define OP_PCL_TLS_PVT_AES_192_CBC_SHA256 0xff96
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE 0xfffe
-#define OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF 0xffff
-
-/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
-
-
-/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
-
-/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
-
-/* For DTLS - OP_PCLID_DTLS */
-
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+/*
+ * For SSL/TLS/DTLS - OP_PCL_TLS
+ * For more details see IANA TLS Cipher Suite registry:
+ * https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ * Note: for private/internal use (reserved by IANA) - OP_PCL_PVT_TLS
+ */
+#define OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5 0x0003
+#define OP_PCL_TLS_RSA_WITH_RC4_128_MD5 0x0004
+#define OP_PCL_TLS_RSA_WITH_RC4_128_SHA 0x0005
+#define OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS_RSA_WITH_DES_CBC_SHA 0x0009
+#define OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA 0x000a
+#define OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA 0x000b
+#define OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA 0x000c
+#define OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA 0x000d
+#define OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA 0x000e
+#define OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA 0x000f
+#define OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA 0x0010
+#define OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA 0x0011
+#define OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA 0x0012
+#define OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA 0x0013
+#define OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA 0x0014
+#define OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA 0x0015
+#define OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA 0x0016
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 0x0017
+#define OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5 0x0018
+#define OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA 0x0019
+#define OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA 0x001a
+#define OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA 0x001b
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_SHA 0x0020
+#define OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 0x0023
+#define OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5 0x0022
+#define OP_PCL_TLS_KRB5_WITH_RC4_128_MD5 0x0024
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA 0x0026
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA 0x0028
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 0x0029
+#define OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 0x002b
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA 0x0030
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA 0x0031
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA 0x0032
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA 0x0033
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA 0x0034
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA 0x0036
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA 0x0037
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA 0x0038
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA 0x0039
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA 0x003a
+#define OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256 0x003c
+#define OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256 0x003d
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 0x003e
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 0x003f
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 0x0040
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 0x0067
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 0x0068
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 0x0069
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 0x006a
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 0x006b
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256 0x006c
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256 0x006d
+#define OP_PCL_TLS_PSK_WITH_RC4_128_SHA 0x008a
+#define OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA 0x008b
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA 0x008c
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA 0x008d
+#define OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA 0x008e
+#define OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA 0x008f
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA 0x0090
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA 0x0091
+#define OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA 0x0092
+#define OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA 0x0093
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA 0x0094
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA 0x0095
+#define OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256 0x009c
+#define OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384 0x009d
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 0x009e
+#define OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 0x009f
+#define OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 0x00a0
+#define OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 0x00a1
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 0x00a2
+#define OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 0x00a3
+#define OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 0x00a4
+#define OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 0x00a5
+#define OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256 0x00a6
+#define OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384 0x00a7
+#define OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256 0x00a8
+#define OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384 0x00a9
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 0x00aa
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 0x00ab
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 0x00ac
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 0x00ad
+#define OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256 0x00ae
+#define OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384 0x00af
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 0x00b2
+#define OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 0x00b3
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 0x00b6
+#define OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 0x00b7
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA 0xc002
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc003
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA 0xc004
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA 0xc005
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA 0xc007
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA 0xc008
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA 0xc009
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA 0xc00a
+#define OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA 0xc00c
+#define OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA 0xc00d
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA 0xc00e
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA 0xc00f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA 0xc011
+#define OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA 0xc012
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA 0xc013
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA 0xc014
+#define OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA 0xc016
+#define OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA 0xc017
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA 0xc018
+#define OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA 0xc019
+#define OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA 0xc01a
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA 0xc01b
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA 0xc01c
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA 0xc01d
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA 0xc01e
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA 0xc01f
+#define OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA 0xc020
+#define OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA 0xc021
+#define OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA 0xc022
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 0xc023
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 0xc024
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 0xc025
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 0xc026
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 0xc027
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 0xc028
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 0xc029
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 0xc02a
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 0xc02b
+#define OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 0xc02c
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 0xc02d
+#define OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 0xc02e
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 0xc02f
+#define OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 0xc030
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 0xc031
+#define OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 0xc032
+#define OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA 0xc033
+#define OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA 0xc034
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA 0xc035
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA 0xc036
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 0xc037
+#define OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 0xc038
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_PVT_TLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_PVT_TLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA160 0xff90
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA384 0xff93
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA224 0xff94
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA512 0xff95
+#define OP_PCL_PVT_TLS_AES_192_CBC_SHA256 0xff96
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE 0xfffe
+#define OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF 0xffff
/* 802.16 WiMAX protinfos */
#define OP_PCL_WIMAX_OFDM 0x0201
@@ -1332,7 +868,7 @@
#define OP_PCL_LTE_MIXED_AUTH_SHIFT 0
#define OP_PCL_LTE_MIXED_AUTH_MASK (3 << OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_ENC_SHIFT 8
-#define OP_PCL_LTE_MIXED_ENC_MASK (3 < OP_PCL_LTE_MIXED_ENC_SHIFT)
+#define OP_PCL_LTE_MIXED_ENC_MASK (3 << OP_PCL_LTE_MIXED_ENC_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_NULL (OP_PCL_LTE_NULL << \
OP_PCL_LTE_MIXED_AUTH_SHIFT)
#define OP_PCL_LTE_MIXED_AUTH_SNOW (OP_PCL_LTE_SNOW << \
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/algo.h b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
index 91f3e067..febcb6d0 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/algo.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/algo.h
@@ -410,6 +410,35 @@ cnstr_shdsc_kasumi_f9(uint32_t *descbuf, bool ps, bool swap,
}
/**
+ * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
+ * @descbuf: pointer to descriptor-under-construction buffer
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ *
+ * Return: size of descriptor written in words or negative number on error
+ */
+static inline int
+cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
+{
+ struct program prg;
+ struct program *p = &prg;
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
+ OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
+ OP_ALG_AS_FINALIZE, 0, DIR_ENC);
+ SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
* cnstr_shdsc_gcm_encap - AES-GCM encap as a shared descriptor
* @descbuf: pointer to descriptor-under-construction buffer
* @ps: if 36/40bit addressing is desired, this parameter must be true
@@ -614,33 +643,4 @@ cnstr_shdsc_gcm_decap(uint32_t *descbuf, bool ps, bool swap,
return PROGRAM_FINALIZE(p);
}
-/**
- * cnstr_shdsc_crc - CRC32 Accelerator (IEEE 802 CRC32 protocol mode)
- * @descbuf: pointer to descriptor-under-construction buffer
- * @swap: must be true when core endianness doesn't match SEC endianness
- *
- * Return: size of descriptor written in words or negative number on error
- */
-static inline int
-cnstr_shdsc_crc(uint32_t *descbuf, bool swap)
-{
- struct program prg;
- struct program *p = &prg;
-
- PROGRAM_CNTXT_INIT(p, descbuf, 0);
- if (swap)
- PROGRAM_SET_BSWAP(p);
-
- SHR_HDR(p, SHR_ALWAYS, 1, 0);
-
- MATHB(p, SEQINSZ, SUB, MATH2, VSEQINSZ, 4, 0);
- ALG_OPERATION(p, OP_ALG_ALGSEL_CRC,
- OP_ALG_AAI_802 | OP_ALG_AAI_DOC,
- OP_ALG_AS_FINALIZE, 0, DIR_ENC);
- SEQFIFOLOAD(p, MSG2, 0, VLF | LAST2);
- SEQSTORE(p, CONTEXT2, 0, 4, 0);
-
- return PROGRAM_FINALIZE(p);
-}
-
#endif /* __DESC_ALGO_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
index 35cc02a6..d256a391 100644
--- a/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
+++ b/drivers/crypto/dpaa2_sec/hw/desc/ipsec.h
@@ -522,44 +522,133 @@ enum ipsec_icv_size {
/*
* IPSec ESP Datapath Protocol Override Register (DPOVRD)
+ * IPSEC_N_* defines are for IPsec new mode.
*/
-#define IPSEC_DECO_DPOVRD_USE 0x80
+/**
+ * IPSEC_DPOVRD_USE - DPOVRD will override values specified in the PDB
+ */
+#define IPSEC_DPOVRD_USE BIT(31)
-struct ipsec_deco_dpovrd {
- uint8_t ovrd_ecn;
- uint8_t ip_hdr_len;
- uint8_t nh_offset;
- union {
- uint8_t next_header; /* next header if encap */
- uint8_t rsvd; /* reserved if decap */
- };
-};
+/**
+ * IPSEC_DPOVRD_ECN_SHIFT - Explicit Congestion Notification
+ *
+ * If set, MSB of the 4 bits indicates that the 2 LSBs will replace the ECN bits
+ * in the IP header.
+ */
+#define IPSEC_DPOVRD_ECN_SHIFT 24
-struct ipsec_new_encap_deco_dpovrd {
-#define IPSEC_NEW_ENCAP_DECO_DPOVRD_USE 0x8000
- uint16_t ovrd_ip_hdr_len; /* OVRD + outer IP header material
- * length
- */
-#define IPSEC_NEW_ENCAP_OIMIF 0x80
- uint8_t oimif_aoipho; /* OIMIF + actual outer IP header
- * offset
- */
- uint8_t rsvd;
-};
+/**
+ * IPSEC_DPOVRD_ECN_MASK - See IPSEC_DPOVRD_ECN_SHIFT
+ */
+#define IPSEC_DPOVRD_ECN_MASK (0xf << IPSEC_ENCAP_DPOVRD_ECN_SHIFT)
-struct ipsec_new_decap_deco_dpovrd {
- uint8_t ovrd;
- uint8_t aoipho_hi; /* upper nibble of actual outer IP
- * header
- */
- uint16_t aoipho_lo_ip_hdr_len; /* lower nibble of actual outer IP
- * header + outer IP header material
- */
-};
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_SHIFT - The length (in bytes) of the portion of the
+ * IP header that is not encrypted
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_SHIFT 16
+
+/**
+ * IPSEC_DPOVRD_IP_HDR_LEN_MASK - See IPSEC_DPOVRD_IP_HDR_LEN_SHIFT
+ */
+#define IPSEC_DPOVRD_IP_HDR_LEN_MASK (0xff << IPSEC_DPOVRD_IP_HDR_LEN_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_SHIFT - The location of the next header field within
+ * the IP header of the transport mode packet
+ *
+ * Encap:
+ * ESP_Trailer_NH <-- IP_Hdr[DPOVRD[NH_OFFSET]]
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- DPOVRD[NH]
+ *Decap:
+ * IP_Hdr[DPOVRD[NH_OFFSET]] <-- ESP_Trailer_NH
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_SHIFT 8
+
+/**
+ * IPSEC_DPOVRD_NH_OFFSET_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ */
+#define IPSEC_DPOVRD_NH_OFFSET_MASK (0xff << IPSEC_DPOVRD_NH_OFFSET_SHIFT)
+
+/**
+ * IPSEC_DPOVRD_NH_MASK - See IPSEC_DPOVRD_NH_OFFSET_SHIFT
+ * Valid only for encapsulation.
+ */
+#define IPSEC_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT - Outer IP header Material length (encap)
+ * Valid only if L2_COPY is not set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT 16
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIM_LEN_MASK \
+ (0xfff << IPSEC_N_ENCAP_DPOVRD_OIM_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT - L2 header length
+ * Valid only if L2_COPY is set.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT 16
-static inline void
-__gen_auth_key(struct program *program, struct alginfo *authdata)
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK - See IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK \
+ (0xff << IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_OIMIF - Outer IP header Material in Input Frame
+ */
+#define IPSEC_N_ENCAP_DPOVRD_OIMIF BIT(15)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_L2_COPY - L2 header present in input frame
+ *
+ * Note: For Era <= 8, this bit is reserved (not used) by HW.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_L2_COPY BIT(14)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (encap)
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT 8
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_ENCAP_DPOVRD_AOIPHO_MASK \
+ (0x3c << IPSEC_N_ENCAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_ENCAP_DPOVRD_NH_MASK - Next Header
+ *
+ * Used in the Next Header field of the encapsulated payload.
+ */
+#define IPSEC_N_ENCAP_DPOVRD_NH_MASK 0xff
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT - Actual Outer IP Header Offset (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT 12
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK - See IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT
+ */
+#define IPSEC_N_DECAP_DPOVRD_AOIPHO_MASK \
+ (0xff << IPSEC_N_DECAP_DPOVRD_AOIPHO_SHIFT)
+
+/**
+ * IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK - Outer IP header Material length (decap)
+ */
+#define IPSEC_N_DECAP_DPOVRD_OIM_LEN_MASK 0xfff
+
+static inline void __gen_auth_key(struct program *program,
+ struct alginfo *authdata)
{
uint32_t dkp_protid;
@@ -603,6 +692,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -621,6 +711,7 @@ __gen_auth_key(struct program *program, struct alginfo *authdata)
*/
static inline int
cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -638,7 +729,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
COPY_DATA(p, pdb->ip_hdr, pdb->ip_hdr_len);
SET_LABEL(p, hdr);
@@ -669,6 +760,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: if true, perform descriptor byte swapping on a 4-byte boundary
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -687,6 +779,7 @@ cnstr_shdsc_ipsec_encap(uint32_t *descbuf, bool ps, bool swap,
*/
static inline int
cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -704,7 +797,7 @@ cnstr_shdsc_ipsec_decap(uint32_t *descbuf, bool ps, bool swap,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, BOTH|SHRD);
@@ -1040,7 +1133,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or keys can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_ENC_BASE_DESC_LEN (5 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_ENC_BASE_DESC_LEN (12 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1052,7 +1145,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* layers to determine whether Outer IP Header and/or key can be inlined or
* not. To be used as first parameter of rta_inline_query().
*/
-#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (4 * CAAM_CMD_SZ + \
+#define IPSEC_NEW_NULL_ENC_BASE_DESC_LEN (11 * CAAM_CMD_SZ + \
sizeof(struct ipsec_encap_pdb))
/**
@@ -1061,6 +1154,7 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1080,11 +1174,21 @@ cnstr_shdsc_ipsec_decap_des_aes_xcbc(uint32_t *descbuf,
* compute MDHA on the fly in HW.
* Valid algorithm values - one of OP_PCL_IPSEC_*
*
+ * Note: L2 header copy functionality is implemented assuming that bits 14
+ * (currently reserved) and 16-23 (part of Outer IP Header Material Length)
+ * in DPOVRD register are not used (which is usually the case when L3 header
+ * is provided in PDB).
+ * When DPOVRD[14] is set, frame starts with an L2 header; in this case, the
+ * L2 header length is found at DPOVRD[23:16]. SEC uses this length to copy
+ * the header and then it deletes DPOVRD[23:16] (so there is no side effect
+ * when later running IPsec protocol).
+ *
* Return: size of descriptor written in words or negative number on error
*/
static inline int
cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_encap_pdb *pdb,
uint8_t *opt_ip_hdr,
struct alginfo *cipherdata,
@@ -1097,6 +1201,8 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
REFERENCE(pkeyjmp);
LABEL(hdr);
REFERENCE(phdr);
+ LABEL(l2copy);
+ REFERENCE(pl2copy);
if (rta_sec_era < RTA_SEC_ERA_8) {
pr_err("IPsec new mode encap: available only for Era %d or above\n",
@@ -1109,7 +1215,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_encap_pdb(p, pdb, cipherdata->algtype);
@@ -1128,6 +1234,16 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
}
SET_LABEL(p, hdr);
+ MATHB(p, DPOVRD, AND, IPSEC_N_ENCAP_DPOVRD_L2_COPY, NONE, 4, IMMED2);
+ pl2copy = JUMP(p, l2copy, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ MATHI(p, DPOVRD, RSHIFT, IPSEC_N_ENCAP_DPOVRD_L2_LEN_SHIFT, VSEQOUTSZ,
+ 1, 0);
+ MATHB(p, DPOVRD, AND, ~IPSEC_N_ENCAP_DPOVRD_L2_LEN_MASK, DPOVRD, 4,
+ IMMED2);
+ /* TODO: CLASS2 corresponds to AUX=2'b10; add more intuitive defines */
+ SEQFIFOSTORE(p, METADATA, 0, 0, CLASS2 | VLF);
+ SET_LABEL(p, l2copy);
+
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
if (authdata->keylen)
__gen_auth_key(p, authdata);
@@ -1138,6 +1254,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
OP_PCLID_IPSEC_NEW,
(uint16_t)(cipherdata->algtype | authdata->algtype));
+ PATCH_JUMP(p, pl2copy, l2copy);
PATCH_JUMP(p, pkeyjmp, keyjmp);
PATCH_HDR(p, phdr, hdr);
return PROGRAM_FINALIZE(p);
@@ -1171,6 +1288,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
* @descbuf: pointer to buffer used for descriptor construction
* @ps: if 36/40bit addressing is desired, this parameter must be true
* @swap: must be true when core endianness doesn't match SEC endianness
+ * @share: sharing type of shared descriptor
* @pdb: pointer to the PDB to be used with this descriptor
* This structure will be copied inline to the descriptor under
* construction. No error checking will be made. Refer to the
@@ -1188,6 +1306,7 @@ cnstr_shdsc_ipsec_new_encap(uint32_t *descbuf, bool ps,
static inline int
cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
bool swap,
+ enum rta_share_type share,
struct ipsec_decap_pdb *pdb,
struct alginfo *cipherdata,
struct alginfo *authdata)
@@ -1211,7 +1330,7 @@ cnstr_shdsc_ipsec_new_decap(uint32_t *descbuf, bool ps,
PROGRAM_SET_BSWAP(p);
if (ps)
PROGRAM_SET_36BIT_ADDR(p);
- phdr = SHR_HDR(p, SHR_SERIAL, hdr, 0);
+ phdr = SHR_HDR(p, share, hdr, 0);
__rta_copy_ipsec_decap_pdb(p, pdb, cipherdata->algtype);
SET_LABEL(p, hdr);
pkeyjmp = JUMP(p, keyjmp, LOCAL_JUMP, ALL_TRUE, SHRD);
diff --git a/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
new file mode 100644
index 00000000..719ef605
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/hw/desc/pdcp.h
@@ -0,0 +1,2796 @@
+/*
+ * Copyright 2008-2013 Freescale Semiconductor, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause or GPL-2.0+
+ */
+
+#ifndef __DESC_PDCP_H__
+#define __DESC_PDCP_H__
+
+#include "hw/rta.h"
+#include "common.h"
+
+/**
+ * DOC: PDCP Shared Descriptor Constructors
+ *
+ * Shared descriptors for PDCP protocol.
+ */
+
+/**
+ * PDCP_NULL_MAX_FRAME_LEN - The maximum frame frame length that is supported by
+ * PDCP NULL protocol.
+ */
+#define PDCP_NULL_MAX_FRAME_LEN 0x00002FFF
+
+/**
+ * PDCP_MAC_I_LEN - The length of the MAC-I for PDCP protocol operation
+ */
+#define PDCP_MAC_I_LEN 0x00000004
+
+/**
+ * PDCP_MAX_FRAME_LEN_STATUS - The status returned in FD status/command field in
+ * case the input frame is larger than
+ * PDCP_NULL_MAX_FRAME_LEN.
+ */
+#define PDCP_MAX_FRAME_LEN_STATUS 0xF1
+
+/**
+ * PDCP_C_PLANE_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the PDCP
+ * Control Plane header. For PDCP Control Plane, the SN
+ * is constant (5 bits) as opposed to PDCP Data Plane
+ * (7/12/15 bits).
+ */
+#define PDCP_C_PLANE_SN_MASK 0x1F000000
+#define PDCP_C_PLANE_SN_MASK_BE 0x0000001F
+
+/**
+ * PDCP_U_PLANE_15BIT_SN_MASK - This mask is used in the PDCP descriptors for
+ * extracting the sequence number (SN) from the
+ * PDCP User Plane header. For PDCP Control Plane,
+ * the SN is constant (5 bits) as opposed to PDCP
+ * Data Plane (7/12/15 bits).
+ */
+#define PDCP_U_PLANE_15BIT_SN_MASK 0xFF7F0000
+#define PDCP_U_PLANE_15BIT_SN_MASK_BE 0x00007FFF
+
+/**
+ * PDCP_BEARER_MASK - This mask is used masking out the bearer for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Count-C (32 bit) | Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the upper 64 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The lower 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_BEARER_MASK 0x00000004FFFFFFFFull
+#define PDCP_BEARER_MASK_BE 0xFFFFFFFF04000000ull
+
+/**
+ * PDCP_DIR_MASK - This mask is used masking out the direction for PDCP
+ * processing with SNOW f9 in LTE.
+ *
+ * The value on which this mask is applied is formatted as below:
+ * Bearer (5 bit) | Direction (1 bit) | 0 (26 bits)
+ *
+ * Applying this mask is done for creating the lower 32 bits of the IV needed
+ * for SNOW f9.
+ *
+ * The upper 32 bits of the mask are used for masking the direction for AES
+ * CMAC IV.
+ */
+#define PDCP_DIR_MASK 0x00000000000000F8ull
+#define PDCP_DIR_MASK_BE 0xF800000000000000ull
+
+/**
+ * PDCP_NULL_INT_MAC_I_VAL - The value of the PDCP PDU MAC-I in case NULL
+ * integrity is used.
+ */
+
+#define PDCP_NULL_INT_MAC_I_VAL 0x00000000
+
+/**
+ * PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS - The status used to report ICV check
+ * failed in case of NULL integrity
+ * Control Plane processing.
+ */
+#define PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS 0x0A
+/**
+ * PDCP_DPOVRD_HFN_OV_EN - Value to be used in the FD status/cmd field to
+ * indicate the HFN override mechanism is active for the
+ * frame.
+ */
+#define PDCP_DPOVRD_HFN_OV_EN 0x80000000
+
+/**
+ * PDCP_P4080REV2_HFN_OV_BUFLEN - The length in bytes of the supplementary space
+ * that must be provided by the user at the
+ * beginning of the input frame buffer for
+ * P4080 REV 2.
+ *
+ * The format of the frame buffer is the following:
+ *
+ * |<---PDCP_P4080REV2_HFN_OV_BUFLEN-->|
+ * //===================================||============||==============\\
+ * || PDCP_DPOVRD_HFN_OV_EN | HFN value || PDCP Header|| PDCP Payload ||
+ * \\===================================||============||==============//
+ *
+ * If HFN override mechanism is not desired, then the MSB of the first 4 bytes
+ * must be set to 0b.
+ */
+#define PDCP_P4080REV2_HFN_OV_BUFLEN 4
+
+/**
+ * enum cipher_type_pdcp - Type selectors for cipher types in PDCP protocol OP
+ * instructions.
+ * @PDCP_CIPHER_TYPE_NULL: NULL
+ * @PDCP_CIPHER_TYPE_SNOW: SNOW F8
+ * @PDCP_CIPHER_TYPE_AES: AES
+ * @PDCP_CIPHER_TYPE_ZUC: ZUCE
+ * @PDCP_CIPHER_TYPE_INVALID: invalid option
+ */
+enum cipher_type_pdcp {
+ PDCP_CIPHER_TYPE_NULL,
+ PDCP_CIPHER_TYPE_SNOW,
+ PDCP_CIPHER_TYPE_AES,
+ PDCP_CIPHER_TYPE_ZUC,
+ PDCP_CIPHER_TYPE_INVALID
+};
+
+/**
+ * enum auth_type_pdcp - Type selectors for integrity types in PDCP protocol OP
+ * instructions.
+ * @PDCP_AUTH_TYPE_NULL: NULL
+ * @PDCP_AUTH_TYPE_SNOW: SNOW F9
+ * @PDCP_AUTH_TYPE_AES: AES CMAC
+ * @PDCP_AUTH_TYPE_ZUC: ZUCA
+ * @PDCP_AUTH_TYPE_INVALID: invalid option
+ */
+enum auth_type_pdcp {
+ PDCP_AUTH_TYPE_NULL,
+ PDCP_AUTH_TYPE_SNOW,
+ PDCP_AUTH_TYPE_AES,
+ PDCP_AUTH_TYPE_ZUC,
+ PDCP_AUTH_TYPE_INVALID
+};
+
+/**
+ * enum pdcp_dir - Type selectors for direction for PDCP protocol
+ * @PDCP_DIR_UPLINK: uplink direction
+ * @PDCP_DIR_DOWNLINK: downlink direction
+ * @PDCP_DIR_INVALID: invalid option
+ */
+enum pdcp_dir {
+ PDCP_DIR_UPLINK = 0,
+ PDCP_DIR_DOWNLINK = 1,
+ PDCP_DIR_INVALID
+};
+
+/**
+ * enum pdcp_plane - PDCP domain selectors
+ * @PDCP_CONTROL_PLANE: Control Plane
+ * @PDCP_DATA_PLANE: Data Plane
+ * @PDCP_SHORT_MAC: Short MAC
+ */
+enum pdcp_plane {
+ PDCP_CONTROL_PLANE,
+ PDCP_DATA_PLANE,
+ PDCP_SHORT_MAC
+};
+
+/**
+ * enum pdcp_sn_size - Sequence Number Size selectors for PDCP protocol
+ * @PDCP_SN_SIZE_5: 5bit sequence number
+ * @PDCP_SN_SIZE_7: 7bit sequence number
+ * @PDCP_SN_SIZE_12: 12bit sequence number
+ * @PDCP_SN_SIZE_15: 15bit sequence number
+ * @PDCP_SN_SIZE_18: 18bit sequence number
+ */
+enum pdcp_sn_size {
+ PDCP_SN_SIZE_5 = 5,
+ PDCP_SN_SIZE_7 = 7,
+ PDCP_SN_SIZE_12 = 12,
+ PDCP_SN_SIZE_15 = 15
+};
+
+/*
+ * PDCP Control Plane Protocol Data Blocks
+ */
+#define PDCP_C_PLANE_PDB_HFN_SHIFT 5
+#define PDCP_C_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_C_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_C_PLANE_PDB_HFN_THR_SHIFT 5
+
+#define PDCP_U_PLANE_PDB_OPT_SHORT_SN 0x2
+#define PDCP_U_PLANE_PDB_OPT_15B_SN 0x4
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT 15
+#define PDCP_U_PLANE_PDB_BEARER_SHIFT 27
+#define PDCP_U_PLANE_PDB_DIR_SHIFT 26
+#define PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT 7
+#define PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT 12
+#define PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT 15
+
+struct pdcp_pdb {
+ union {
+ uint32_t opt;
+ uint32_t rsvd;
+ } opt_res;
+ uint32_t hfn_res; /* HyperFrame number,(27, 25 or 21 bits),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t bearer_dir_res;/* Bearer(5 bits), packet direction (1 bit),
+ * left aligned & right-padded with zeros.
+ */
+ uint32_t hfn_thr_res; /* HyperFrame number threshold (27, 25 or 21
+ * bits), left aligned & right-padded with
+ * zeros.
+ */
+};
+
+/*
+ * PDCP internal PDB types
+ */
+enum pdb_type_e {
+ PDCP_PDB_TYPE_NO_PDB,
+ PDCP_PDB_TYPE_FULL_PDB,
+ PDCP_PDB_TYPE_REDUCED_PDB,
+ PDCP_PDB_TYPE_INVALID
+};
+
+/*
+ * Function for appending the portion of a PDCP Control Plane shared descriptor
+ * which performs NULL encryption and integrity (i.e. copies the input frame
+ * to the output frame, appending 32 bits of zeros at the end (MAC-I for
+ * NULL integrity).
+ */
+static inline int
+pdcp_insert_cplane_null_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ } else {
+ MATHB(p, VSEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4,
+ IMMED2);
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, MATH0, 4, 0);
+ }
+
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, VSEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+ else
+ MATHB(p, VSEQOUTSZ, ADD, ZERO, MATH0, 4, 0);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /* Placeholder for MOVE command with length from M1 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, MATH1, XOR, MATH1, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, OFIFO, 0, 4, IMMED);
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+insert_copy_frame_op(struct program *p,
+ struct alginfo *cipherdata __maybe_unused,
+ unsigned int dir __maybe_unused)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQOUTSZ, SUB, ONE, VSEQOUTSZ, 4, 0);
+ MATHB(p, VSEQINSZ, SUB, ONE, MATH0, 4, 0);
+ MATHB(p, MATH0, ADD, ONE, MATH0, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, SEQINSZ, SUB, PDCP_NULL_MAX_FRAME_LEN, NONE, 4,
+ IFB | IMMED2);
+ JUMP(p, PDCP_MAX_FRAME_LEN_STATUS, HALT_STATUS, ALL_FALSE, MATH_N);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, VSEQINSZ, ADD, ZERO, MATH0, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH0, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M0 register */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_int_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata __maybe_unused,
+ struct alginfo *authdata, unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_SNOW:
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK, MATH2, 8,
+ IMMED2);
+ MOVEB(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK, MATH3, 8, IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH2, AND, PDCP_BEARER_MASK_BE, MATH2, 8,
+ IMMED2);
+
+ MOVE(p, DESCBUF, 0x0C, MATH3, 0, 4, WAITCOMP | IMMED);
+ MATHB(p, MATH3, AND, PDCP_DIR_MASK_BE, MATH3, 8,
+ IMMED2);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 0x0C, WAITCOMP | IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9, OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ /*
+ * Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ /* Insert Auth Key */
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 8, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ } else {
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4,
+ 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4,
+ 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4,
+ 0);
+ }
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ } else {
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH1, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH1, 0, DESCBUF, 0,
+ 8, WAITCOMP | IMMED);
+ }
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ } else {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0,
+ VLF | LAST1 | LAST2 | FLUSH1);
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /*
+ * Placeholder for MOVE command with length from
+ * M1 register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV1, 4, LAST1 | FLUSH1);
+ else
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ SEQINPTR(p, 0, 1, RTO);
+ if (swap == false) {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVEB(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVEB(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+
+ } else {
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, IMMED);
+ }
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+
+ MATHB(p, MATH1, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, MATH1, SUB, ZERO, VSEQOUTSZ, 4, 0);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+
+ if (dir == OP_TYPE_DECAP_PROTOCOL)
+ SEQFIFOLOAD(p, ICV2, 4, LAST2);
+ else
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "pdcp_insert_cplane_int_only_op", authdata->algtype);
+ return -EINVAL;
+ }
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_enc_only_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata __maybe_unused,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ (uint16_t)cipherdata->algtype << 8);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL, ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, ADD, ONE, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4,
+ IMMED2);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ DIR_ENC : DIR_DEC);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_cplane_enc_only_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ FIFOLOAD(p, MSG1, PDCP_NULL_INT_MAC_I_VAL, 4,
+ LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, OFIFO, 0, MATH1, 4, PDCP_MAC_I_LEN, WAITCOMP | IMMED);
+ MATHB(p, MATH1, XOR, PDCP_NULL_INT_MAC_I_VAL, NONE, 4, IMMED2);
+ JUMP(p, PDCP_NULL_INT_ICV_CHECK_FAILED_STATUS,
+ HALT_STATUS, ALL_FALSE, MATH_Z);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_acc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_hfn_ovrd __maybe_unused)
+{
+ /* Insert Auth Key */
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL, (uint16_t)cipherdata->algtype);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ LABEL(back_to_sd_offset);
+ LABEL(end_desc);
+ LABEL(local_offset);
+ LABEL(jump_to_beginning);
+ LABEL(fifo_load_mac_i_offset);
+ REFERENCE(seqin_ptr_read);
+ REFERENCE(seqin_ptr_write);
+ REFERENCE(seq_out_read);
+ REFERENCE(jump_back_to_sd_cmd);
+ REFERENCE(move_mac_i_to_desc_buf);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0)) {
+ SEQINPTR(p, 0, 1, RTO);
+ } else {
+ SEQINPTR(p, 0, 5, RTO);
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+ }
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN - 1, VSEQOUTSZ,
+ 4, IMMED2);
+ /*
+ * Note: Although the calculations below might seem a
+ * little off, the logic is the following:
+ *
+ * - SEQ IN PTR RTO below needs the full length of the
+ * frame; in case of P4080_REV_2_HFN_OV_WORKAROUND,
+ * this means the length of the frame to be processed
+ * + 4 bytes (the HFN override flag and value).
+ * The length of the frame to be processed minus 1
+ * byte is in the VSIL register (because
+ * VSIL = SIL + 3, due to 1 byte, the header being
+ * already written by the SEQ STORE above). So for
+ * calculating the length to use in RTO, I add one
+ * to the VSIL value in order to obtain the total
+ * frame length. This helps in case of P4080 which
+ * can have the value 0 as an operand in a MATH
+ * command only as SRC1 When the HFN override
+ * workaround is not enabled, the length of the
+ * frame is given by the SIL register; the
+ * calculation is similar to the one in the SEC 4.2
+ * and SEC 5.3 cases.
+ */
+ if (era_2_sw_hfn_ovrd)
+ MATHB(p, VSEQOUTSZ, ADD, ONE, MATH1, 4,
+ 0);
+ else
+ MATHB(p, SEQINSZ, ADD, MATH3, MATH1, 4,
+ 0);
+ }
+ /*
+ * Placeholder for filling the length in
+ * SEQIN PTR RTO below
+ */
+ seqin_ptr_read = MOVE(p, DESCBUF, 0, MATH1, 0, 6, IMMED);
+ seqin_ptr_write = MOVE(p, MATH1, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ SET_LABEL(p, local_offset);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, 0, RTO);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ SEQFIFOLOAD(p, SKIP, 5, 0);
+ MATHB(p, SEQINSZ, ADD, ONE, SEQINSZ, 4, 0);
+ }
+
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2 ||
+ (rta_sec_era == RTA_SEC_ERA_2 &&
+ era_2_sw_hfn_ovrd == 0))
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ PATCH_MOVE(p, seqin_ptr_read, local_offset);
+ PATCH_MOVE(p, seqin_ptr_write, local_offset);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_5)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ else
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+/*
+ * TODO: To be changed when proper support is added in RTA (can't load a
+ * command that is also written by RTA (or patch it for that matter).
+ * Change when proper RTA support is added.
+ */
+ if (p->ps)
+ WORD(p, 0x168B0004);
+ else
+ WORD(p, 0x16880404);
+
+ jump_back_to_sd_cmd = JUMP(p, 0, LOCAL_JUMP, ALL_TRUE, 0);
+ /*
+ * Placeholder for command reading the SEQ OUT command in
+ * JD. Done for rereading the decrypted data and performing
+ * the integrity check
+ */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 20,
+ WAITCOMP | IMMED);
+ else
+ seq_out_read = MOVE(p, DESCBUF, 0, MATH1, 0, 16,
+ WAITCOMP | IMMED);
+
+ MATHB(p, MATH1, XOR, CMD_SEQ_IN_PTR ^ CMD_SEQ_OUT_PTR, MATH1, 4,
+ IMMED2);
+ /* Placeholder for overwriting the SEQ IN with SEQ OUT */
+/*
+ * TODO: RTA currently doesn't support patching of length of a MOVE command
+ * Thus, it is inserted as a raw word, as per PS setting.
+ */
+ if (p->ps)
+ MOVE(p, MATH1, 0, DESCBUF, 0, 24, IMMED);
+ else
+ MOVE(p, MATH1, 0, DESCBUF, 0, 20, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_4)
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ move_mac_i_to_desc_buf = MOVE(p, OFIFO, 0, DESCBUF, 0,
+ 4, WAITCOMP | IMMED);
+ else
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ LOAD(p, CCTRL_RESET_CHA_ALL, CCTRL, 0, 4, IMMED);
+ else
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ /*
+ * Placeholder for jump in SD for executing the new SEQ IN PTR
+ * command (which is actually the old SEQ OUT PTR command
+ * copied over from JD.
+ */
+ SET_LABEL(p, jump_to_beginning);
+ JUMP(p, 1 - jump_to_beginning, LOCAL_JUMP, ALL_TRUE, 0);
+ SET_LABEL(p, back_to_sd_offset);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ /* Read the # of bytes written in the output buffer + 1 (HDR) */
+ MATHB(p, VSEQOUTSZ, ADD, ONE, VSEQINSZ, 4, 0);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 8, IMMED);
+ else
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd)
+ SEQFIFOLOAD(p, SKIP, 4, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ if (rta_sec_era >= RTA_SEC_ERA_4) {
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ } else {
+ SET_LABEL(p, fifo_load_mac_i_offset);
+ FIFOLOAD(p, ICV1, fifo_load_mac_i_offset, 4,
+ LAST1 | FLUSH1 | IMMED);
+ }
+
+ SET_LABEL(p, end_desc);
+
+ if (!p->ps) {
+ PATCH_MOVE(p, seq_out_read, end_desc + 1);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ } else {
+ PATCH_MOVE(p, seq_out_read, end_desc + 2);
+ PATCH_JUMP(p, jump_back_to_sd_cmd,
+ back_to_sd_offset + jump_back_to_sd_cmd - 5);
+
+ if (rta_sec_era <= RTA_SEC_ERA_3)
+ PATCH_MOVE(p, move_mac_i_to_desc_buf,
+ fifo_load_mac_i_offset + 1);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, SUB, ONE, VSEQINSZ, 4, 0);
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2, 4,
+ IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3, 4,
+ IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, MATH1, 4, IMMED2);
+
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ }
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ else
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ NFIFOADD(p, IFIFO, ICV2, 4, LAST2);
+
+ if (rta_sec_era <= RTA_SEC_ERA_2) {
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+ MOVE(p, MATH0, 0, IFIFOAB2, 0, 4, WAITCOMP | IMMED);
+ } else {
+ MOVE(p, MATH0, 0, IFIFO, 0, 4, WAITCOMP | IMMED);
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_snow_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ SET_LABEL(p, keyjump);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_aes_zuc_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ MOVE(p, MATH2, 0, CONTEXT1, 16, 8, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT2, 0, 8, WAITCOMP | IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL)
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ else
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST1 | FLUSH1);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_ENC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ /* Save ICV */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, WAITCOMP | IMMED);
+ }
+
+ /* Reset ZUCA mode and done interrupt */
+ LOAD(p, CLRW_CLR_C2MODE, CLRW, 0, 4, IMMED);
+ LOAD(p, CIRQ_ZADI, ICTRL, 0, 4, IMMED);
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_snow_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ LABEL(keyjump);
+ REFERENCE(pkeyjump);
+
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ pkeyjump = JUMP(p, keyjump, LOCAL_JUMP, ALL_TRUE, SHRD | SELF | BOTH);
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key, authdata->keylen,
+ INLINE_KEY(authdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+
+ return 0;
+ }
+
+ SET_LABEL(p, keyjump);
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MOVE(p, MATH0, 7, IFIFOAB2, 0, 1, IMMED);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH1, 8, 0);
+ MOVE(p, MATH1, 0, CONTEXT1, 0, 8, IMMED);
+ MOVE(p, MATH1, 0, CONTEXT2, 0, 4, IMMED);
+ if (swap == false) {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK), MATH3,
+ 4, IMMED2);
+ } else {
+ MATHB(p, MATH1, AND, lower_32_bits(PDCP_BEARER_MASK_BE), MATH2,
+ 4, IMMED2);
+ MATHB(p, MATH1, AND, upper_32_bits(PDCP_DIR_MASK_BE), MATH3,
+ 4, IMMED2);
+ }
+ MATHB(p, MATH3, SHLD, MATH3, MATH3, 8, 0);
+ MOVE(p, MATH2, 4, OFIFO, 0, 12, IMMED);
+ MOVE(p, OFIFO, 0, CONTEXT2, 4, 12, IMMED);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MATHB(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+ MATHB(p, VSEQOUTSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ }
+
+ SEQSTORE(p, MATH0, 7, 1, 0);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST2);
+ } else {
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSGOUTSNOOP, 0, VLF | LAST2);
+ }
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ dir == OP_TYPE_ENCAP_PROTOCOL ?
+ ICV_CHECK_DISABLE : ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC);
+
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ SEQFIFOLOAD(p, MSG1, 4, LAST1 | FLUSH1);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CLASS1 | NOP | NIFP);
+
+ if (rta_sec_era >= RTA_SEC_ERA_6)
+ /*
+ * For SEC ERA 6, there's a problem with the OFIFO
+ * pointer, and thus it needs to be reset here before
+ * moving to M0.
+ */
+ LOAD(p, 0, DCTRL, 0, LDLEN_RST_CHA_OFIFO_PTR, IMMED);
+
+ /* Put ICV to M0 before sending it to C2 for comparison. */
+ MOVE(p, OFIFO, 0, MATH0, 0, 4, WAITCOMP | IMMED);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS2 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC2 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH0, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ PATCH_JUMP(p, pkeyjump, keyjump);
+ return 0;
+}
+
+static inline int
+pdcp_insert_cplane_zuc_aes_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned int dir,
+ unsigned char era_2_sw_hfn_ovrd __maybe_unused)
+{
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_CTRL_MIXED,
+ ((uint16_t)cipherdata->algtype << 8) |
+ (uint16_t)authdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 7, 1, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_C_PLANE_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 4, MATH2, 0, 0x08, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+ SEQSTORE(p, MATH0, 7, 1, 0);
+ if (dir == OP_TYPE_ENCAP_PROTOCOL) {
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ MOVE(p, MATH2, 0, IFIFOAB1, 0, 0x08, IMMED);
+ MOVE(p, MATH0, 7, IFIFOAB1, 0, 1, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, VSEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+ MOVE(p, CONTEXT1, 0, MATH3, 0, 4, WAITCOMP | IMMED);
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+ SEQINPTR(p, 0, PDCP_NULL_MAX_FRAME_LEN, RTO);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ SEQFIFOLOAD(p, SKIP, 1, 0);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF);
+ MOVE(p, MATH3, 0, IFIFOAB1, 0, 4, LAST1 | FLUSH1 | IMMED);
+ } else {
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, IMMED);
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MATHB(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
+
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ MOVE(p, CONTEXT1, 0, CONTEXT2, 0, 8, IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_DEC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF | CONT);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ MOVE(p, OFIFO, 0, MATH3, 0, 4, IMMED);
+
+ LOAD(p, CLRW_RESET_CLS1_CHA |
+ CLRW_CLR_C1KEY |
+ CLRW_CLR_C1CTX |
+ CLRW_CLR_C1ICV |
+ CLRW_CLR_C1DATAS |
+ CLRW_CLR_C1MODE,
+ CLRW, 0, 4, IMMED);
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+
+ SEQINPTR(p, 0, 0, SOP);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_ENABLE,
+ DIR_DEC);
+
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+
+ MOVE(p, CONTEXT2, 0, IFIFOAB1, 0, 8, IMMED);
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ LOAD(p, NFIFOENTRY_STYPE_ALTSOURCE |
+ NFIFOENTRY_DEST_CLASS1 |
+ NFIFOENTRY_DTYPE_ICV |
+ NFIFOENTRY_LC1 |
+ NFIFOENTRY_FC1 | 4, NFIFO_SZL, 0, 4, IMMED);
+ MOVE(p, MATH3, 0, ALTSOURCE, 0, 4, IMMED);
+ }
+
+ return 0;
+}
+
+static inline int
+pdcp_insert_uplane_15bit_op(struct program *p,
+ bool swap __maybe_unused,
+ struct alginfo *cipherdata,
+ unsigned int dir)
+{
+ int op;
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags, cipherdata->key,
+ cipherdata->keylen, INLINE_KEY(cipherdata));
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ PROTOCOL(p, dir, OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ return 0;
+ }
+
+ SEQLOAD(p, MATH0, 6, 2, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ if (swap == false)
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK, MATH1, 8,
+ IFB | IMMED2);
+ else
+ MATHB(p, MATH0, AND, PDCP_U_PLANE_15BIT_SN_MASK_BE, MATH1, 8,
+ IFB | IMMED2);
+ SEQSTORE(p, MATH0, 6, 2, 0);
+ MATHB(p, MATH1, SHLD, MATH1, MATH1, 8, 0);
+ MOVE(p, DESCBUF, 8, MATH2, 0, 8, WAITCOMP | IMMED);
+ MATHB(p, MATH1, OR, MATH2, MATH2, 8, 0);
+
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, MATH3, VSEQOUTSZ, 4, 0);
+
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ op = dir == OP_TYPE_ENCAP_PROTOCOL ? DIR_ENC : DIR_DEC;
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_SNOW:
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 8, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F8,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_AES:
+ MOVE(p, MATH2, 0, CONTEXT1, 0x10, 0x10, WAITCOMP | IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CTR,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ MOVE(p, MATH2, 0, CONTEXT1, 0, 0x08, IMMED);
+ MOVE(p, MATH2, 0, CONTEXT1, 0x08, 0x08, WAITCOMP | IMMED);
+
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCE,
+ OP_ALG_AAI_F8,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ op);
+ break;
+
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "pdcp_insert_uplane_15bit_op", cipherdata->algtype);
+ return -EINVAL;
+ }
+
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | FLUSH1);
+
+ return 0;
+}
+
+/*
+ * Function for inserting the snippet of code responsible for creating
+ * the HFN override code via either DPOVRD or via the input frame.
+ */
+static inline int
+insert_hfn_ov_op(struct program *p,
+ uint32_t shift,
+ enum pdb_type_e pdb_type,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ uint32_t imm = PDCP_DPOVRD_HFN_OV_EN;
+ uint16_t hfn_pdb_offset;
+
+ if (rta_sec_era == RTA_SEC_ERA_2 && !era_2_sw_hfn_ovrd)
+ return 0;
+
+ switch (pdb_type) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ /*
+ * If there is no PDB, then HFN override mechanism does not
+ * make any sense, thus in this case the function will
+ * return the pointer to the current position in the
+ * descriptor buffer
+ */
+ return 0;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ hfn_pdb_offset = 4;
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ hfn_pdb_offset = 8;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, DPOVRD, AND, imm, NONE, 8, IFB | IMMED2);
+ } else {
+ SEQLOAD(p, MATH0, 4, 4, 0);
+ JUMP(p, 1, LOCAL_JUMP, ALL_TRUE, CALM);
+ MATHB(p, MATH0, AND, imm, NONE, 8, IFB | IMMED2);
+ SEQSTORE(p, MATH0, 4, 4, 0);
+ }
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ JUMP(p, 6, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+ else
+ JUMP(p, 5, LOCAL_JUMP, ALL_TRUE, MATH_Z);
+
+ if (rta_sec_era > RTA_SEC_ERA_2)
+ MATHB(p, DPOVRD, LSHIFT, shift, MATH0, 4, IMMED2);
+ else
+ MATHB(p, MATH0, LSHIFT, shift, MATH0, 4, IMMED2);
+
+ MATHB(p, MATH0, SHLD, MATH0, MATH0, 8, 0);
+ MOVE(p, MATH0, 0, DESCBUF, hfn_pdb_offset, 4, IMMED);
+
+ if (rta_sec_era >= RTA_SEC_ERA_8)
+ /*
+ * For ERA8, DPOVRD could be handled by the PROTOCOL command
+ * itself. For now, this is not done. Thus, clear DPOVRD here
+ * to alleviate any side-effects.
+ */
+ MATHB(p, DPOVRD, AND, ZERO, DPOVRD, 4, STL);
+
+ return 0;
+}
+
+/*
+ * PDCP Control PDB creation function
+ */
+static inline enum pdb_type_e
+cnstr_pdcp_c_plane_pdb(struct program *p,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata)
+{
+ struct pdcp_pdb pdb;
+ enum pdb_type_e
+ pdb_mask[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ PDCP_PDB_TYPE_NO_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_FULL_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* AES CTR */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_FULL_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_REDUCED_PDB /* ZUC-I */
+ },
+ { /* ZUC-E */
+ PDCP_PDB_TYPE_FULL_PDB, /* NULL */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* SNOW f9 */
+ PDCP_PDB_TYPE_REDUCED_PDB, /* AES CMAC */
+ PDCP_PDB_TYPE_FULL_PDB /* ZUC-I */
+ },
+ };
+
+ if (rta_sec_era >= RTA_SEC_ERA_8) {
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return PDCP_PDB_TYPE_FULL_PDB;
+ }
+
+ switch (pdb_mask[cipherdata->algtype][authdata->algtype]) {
+ case PDCP_PDB_TYPE_NO_PDB:
+ break;
+
+ case PDCP_PDB_TYPE_REDUCED_PDB:
+ __rta_out32(p, (hfn << PDCP_C_PLANE_PDB_HFN_SHIFT));
+ __rta_out32(p,
+ (uint32_t)((bearer <<
+ PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction <<
+ PDCP_C_PLANE_PDB_DIR_SHIFT)));
+ break;
+
+ case PDCP_PDB_TYPE_FULL_PDB:
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ /* This is a HW issue. Bit 2 should be set to zero,
+ * but it does not work this way. Override here.
+ */
+ pdb.opt_res.rsvd = 0x00000002;
+
+ /* Copy relevant information from user to PDB */
+ pdb.hfn_res = hfn << PDCP_C_PLANE_PDB_HFN_SHIFT;
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_C_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_C_PLANE_PDB_DIR_SHIFT));
+ pdb.hfn_thr_res =
+ hfn_threshold << PDCP_C_PLANE_PDB_HFN_THR_SHIFT;
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ break;
+
+ default:
+ return PDCP_PDB_TYPE_INVALID;
+ }
+
+ return pdb_mask[cipherdata->algtype][authdata->algtype];
+}
+
+/*
+ * PDCP UPlane PDB creation function
+ */
+static inline int
+cnstr_pdcp_u_plane_pdb(struct program *p,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn, unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold)
+{
+ struct pdcp_pdb pdb;
+ /* Read options from user */
+ /* Depending on sequence number length, the HFN and HFN threshold
+ * have different lengths.
+ */
+ memset(&pdb, 0x00, sizeof(struct pdcp_pdb));
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ pdb.opt_res.opt |= PDCP_U_PLANE_PDB_OPT_SHORT_SN;
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_SHORT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_SHORT_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_12:
+ pdb.opt_res.opt &= (uint32_t)(~PDCP_U_PLANE_PDB_OPT_SHORT_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_LONG_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_LONG_SN_HFN_THR_SHIFT;
+ break;
+
+ case PDCP_SN_SIZE_15:
+ pdb.opt_res.opt = (uint32_t)(PDCP_U_PLANE_PDB_OPT_15B_SN);
+ pdb.hfn_res = hfn << PDCP_U_PLANE_PDB_15BIT_SN_HFN_SHIFT;
+ pdb.hfn_thr_res =
+ hfn_threshold<<PDCP_U_PLANE_PDB_15BIT_SN_HFN_THR_SHIFT;
+ break;
+
+ default:
+ pr_err("Invalid Sequence Number Size setting in PDB\n");
+ return -EINVAL;
+ }
+
+ pdb.bearer_dir_res = (uint32_t)
+ ((bearer << PDCP_U_PLANE_PDB_BEARER_SHIFT) |
+ (direction << PDCP_U_PLANE_PDB_DIR_SHIFT));
+
+ /* copy PDB in descriptor*/
+ __rta_out32(p, pdb.opt_res.opt);
+ __rta_out32(p, pdb.hfn_res);
+ __rta_out32(p, pdb.bearer_dir_res);
+ __rta_out32(p, pdb.hfn_thr_res);
+
+ return 0;
+}
+/**
+ * cnstr_shdsc_pdcp_c_plane_encap - Function for creating a PDCP Control Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int,
+ unsigned char __maybe_unused) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_ENCAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_c_plane_decap - Function for creating a PDCP Control Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_c_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ uint32_t hfn,
+ unsigned char bearer,
+ unsigned char direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ struct alginfo *authdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ static int
+ (*pdcp_cp_fp[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID])
+ (struct program*, bool swap, struct alginfo *,
+ struct alginfo *, unsigned int, unsigned char) = {
+ { /* NULL */
+ pdcp_insert_cplane_null_op, /* NULL */
+ pdcp_insert_cplane_int_only_op, /* SNOW f9 */
+ pdcp_insert_cplane_int_only_op, /* AES CMAC */
+ pdcp_insert_cplane_int_only_op /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_acc_op, /* SNOW f9 */
+ pdcp_insert_cplane_snow_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_snow_zuc_op /* ZUC-I */
+ },
+ { /* AES CTR */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_aes_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_acc_op, /* AES CMAC */
+ pdcp_insert_cplane_aes_zuc_op /* ZUC-I */
+ },
+ { /* ZUC-E */
+ pdcp_insert_cplane_enc_only_op, /* NULL */
+ pdcp_insert_cplane_zuc_snow_op, /* SNOW f9 */
+ pdcp_insert_cplane_zuc_aes_op, /* AES CMAC */
+ pdcp_insert_cplane_acc_op /* ZUC-I */
+ },
+ };
+ static enum rta_share_type
+ desc_share[PDCP_CIPHER_TYPE_INVALID][PDCP_AUTH_TYPE_INVALID] = {
+ { /* NULL */
+ SHR_WAIT, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ { /* SNOW f8 */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* AES CTR */
+ SHR_ALWAYS, /* NULL */
+ SHR_ALWAYS, /* SNOW f9 */
+ SHR_ALWAYS, /* AES CMAC */
+ SHR_WAIT /* ZUC-I */
+ },
+ { /* ZUC-E */
+ SHR_ALWAYS, /* NULL */
+ SHR_WAIT, /* SNOW f9 */
+ SHR_WAIT, /* AES CMAC */
+ SHR_ALWAYS /* ZUC-I */
+ },
+ };
+ enum pdb_type_e pdb_type;
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, desc_share[cipherdata->algtype][authdata->algtype], 0, 0);
+
+ pdb_type = cnstr_pdcp_c_plane_pdb(p,
+ hfn,
+ bearer,
+ direction,
+ hfn_threshold,
+ cipherdata,
+ authdata);
+
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, PDCP_SN_SIZE_5, pdb_type,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ err = pdcp_cp_fp[cipherdata->algtype][authdata->algtype](p,
+ swap,
+ cipherdata,
+ authdata,
+ OP_TYPE_DECAP_PROTOCOL,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ PATCH_HDR(p, 0, pdb_end);
+
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_encap - Function for creating a PDCP User Plane
+ * encapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_encap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN ovrd for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ (uint64_t)cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_ENCAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_ENCAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_u_plane_decap - Function for creating a PDCP User Plane
+ * decapsulation descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @sn_size: selects Sequence Number Size: 7/12/15 bits
+ * @hfn: starting Hyper Frame Number to be used together with the SN from the
+ * PDCP frames.
+ * @bearer: radio bearer ID
+ * @direction: the direction of the PDCP frame (UL/DL)
+ * @hfn_threshold: HFN value that once reached triggers a warning from SEC that
+ * keys should be renegotiated at the earliest convenience.
+ * @cipherdata: pointer to block cipher transform definitions
+ * Valid algorithm values are those from cipher_type_pdcp enum.
+ * @era_2_sw_hfn_ovrd: if software HFN override mechanism is desired for
+ * this descriptor. Note: Can only be used for
+ * SEC ERA 2.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_u_plane_decap(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ enum pdcp_sn_size sn_size,
+ uint32_t hfn,
+ unsigned short bearer,
+ unsigned short direction,
+ uint32_t hfn_threshold,
+ struct alginfo *cipherdata,
+ unsigned char era_2_sw_hfn_ovrd)
+{
+ struct program prg;
+ struct program *p = &prg;
+ int err;
+ LABEL(pdb_end);
+
+ if (rta_sec_era != RTA_SEC_ERA_2 && era_2_sw_hfn_ovrd) {
+ pr_err("Cannot select SW HFN override for other era than 2");
+ return -EINVAL;
+ }
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 0, 0);
+ if (cnstr_pdcp_u_plane_pdb(p, sn_size, hfn, bearer, direction,
+ hfn_threshold)) {
+ pr_err("Error creating PDCP UPlane PDB\n");
+ return -EINVAL;
+ }
+ SET_LABEL(p, pdb_end);
+
+ err = insert_hfn_ov_op(p, sn_size, PDCP_PDB_TYPE_FULL_PDB,
+ era_2_sw_hfn_ovrd);
+ if (err)
+ return err;
+
+ switch (sn_size) {
+ case PDCP_SN_SIZE_7:
+ case PDCP_SN_SIZE_12:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ case PDCP_CIPHER_TYPE_AES:
+ case PDCP_CIPHER_TYPE_SNOW:
+ /* Insert Cipher Key */
+ KEY(p, KEY1, cipherdata->key_enc_flags,
+ cipherdata->key, cipherdata->keylen,
+ INLINE_KEY(cipherdata));
+ PROTOCOL(p, OP_TYPE_DECAP_PROTOCOL,
+ OP_PCLID_LTE_PDCP_USER,
+ (uint16_t)cipherdata->algtype);
+ break;
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+ default:
+ pr_err("%s: Invalid encrypt algorithm selected: %d\n",
+ "cnstr_pcl_shdsc_pdcp_u_plane_decap",
+ cipherdata->algtype);
+ return -EINVAL;
+ }
+ break;
+
+ case PDCP_SN_SIZE_15:
+ switch (cipherdata->algtype) {
+ case PDCP_CIPHER_TYPE_NULL:
+ insert_copy_frame_op(p,
+ cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ break;
+
+ default:
+ err = pdcp_insert_uplane_15bit_op(p, swap, cipherdata,
+ OP_TYPE_DECAP_PROTOCOL);
+ if (err)
+ return err;
+ break;
+ }
+ break;
+
+ case PDCP_SN_SIZE_5:
+ default:
+ pr_err("Invalid SN size selected\n");
+ return -ENOTSUP;
+ }
+
+ PATCH_HDR(p, 0, pdb_end);
+ return PROGRAM_FINALIZE(p);
+}
+
+/**
+ * cnstr_shdsc_pdcp_short_mac - Function for creating a PDCP Short MAC
+ * descriptor.
+ * @descbuf: pointer to buffer for descriptor construction
+ * @ps: if 36/40bit addressing is desired, this parameter must be true
+ * @swap: must be true when core endianness doesn't match SEC endianness
+ * @authdata: pointer to authentication transform definitions
+ * Valid algorithm values are those from auth_type_pdcp enum.
+ *
+ * Return: size of descriptor written in words or negative number on error.
+ * Once the function returns, the value of this parameter can be used
+ * for reclaiming the space that wasn't used for the descriptor.
+ *
+ * Note: descbuf must be large enough to contain a full 256 byte long
+ * descriptor; after the function returns, by subtracting the actual number of
+ * bytes used, the user can reuse the remaining buffer space for other purposes.
+ */
+static inline int
+cnstr_shdsc_pdcp_short_mac(uint32_t *descbuf,
+ bool ps,
+ bool swap,
+ struct alginfo *authdata)
+{
+ struct program prg;
+ struct program *p = &prg;
+ uint32_t iv[3] = {0, 0, 0};
+ LABEL(local_offset);
+ REFERENCE(move_cmd_read_descbuf);
+ REFERENCE(move_cmd_write_descbuf);
+
+ PROGRAM_CNTXT_INIT(p, descbuf, 0);
+ if (swap)
+ PROGRAM_SET_BSWAP(p);
+ if (ps)
+ PROGRAM_SET_36BIT_ADDR(p);
+
+ SHR_HDR(p, SHR_ALWAYS, 1, 0);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MATHB(p, SEQINSZ, SUB, ZERO, VSEQINSZ, 4, 0);
+ MATHB(p, SEQINSZ, SUB, ZERO, MATH1, 4, 0);
+ } else {
+ MATHB(p, SEQINSZ, ADD, ONE, MATH1, 4, 0);
+ MATHB(p, MATH1, SUB, ONE, MATH1, 4, 0);
+ MATHB(p, ZERO, ADD, MATH1, VSEQINSZ, 4, 0);
+ MOVE(p, MATH1, 0, MATH0, 0, 8, IMMED);
+
+ /*
+ * Since MOVELEN is available only starting with
+ * SEC ERA 3, use poor man's MOVELEN: create a MOVE
+ * command dynamically by writing the length from M1 by
+ * OR-ing the command in the M1 register and MOVE the
+ * result into the descriptor buffer. Care must be taken
+ * wrt. the location of the command because of SEC
+ * pipelining. The actual MOVEs are written at the end
+ * of the descriptor due to calculations needed on the
+ * offset in the descriptor for the MOVE command.
+ */
+ move_cmd_read_descbuf = MOVE(p, DESCBUF, 0, MATH0, 0, 6,
+ IMMED);
+ move_cmd_write_descbuf = MOVE(p, MATH0, 0, DESCBUF, 0, 8,
+ WAITCOMP | IMMED);
+ }
+ MATHB(p, ZERO, ADD, MATH1, VSEQOUTSZ, 4, 0);
+
+ switch (authdata->algtype) {
+ case PDCP_AUTH_TYPE_NULL:
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+
+ LOAD(p, (uintptr_t)iv, MATH0, 0, 8, IMMED | COPY);
+ SEQFIFOLOAD(p, MSG1, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, MATH0, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_SNOW:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0x04000000) : 0x04000000;
+ iv[2] = swap ? swab32(0xF8000000) : 0xF8000000;
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_SNOW_F9,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB1, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_AES:
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, MATH0, 0, 8, IMMED | COPY);
+ MOVE(p, MATH0, 0, IFIFOAB1, 0, 8, IMMED);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_AES,
+ OP_ALG_AAI_CMAC,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+
+ if (rta_sec_era > RTA_SEC_ERA_2) {
+ MOVE(p, AB2, 0, OFIFO, 0, MATH1, 0);
+ } else {
+ SET_LABEL(p, local_offset);
+
+ /* Shut off automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_DISABLE_AUTO_NFIFO, 0, IMMED);
+
+ /* Placeholder for MOVE command with length from M1
+ * register
+ */
+ MOVE(p, IFIFOAB2, 0, OFIFO, 0, 0, IMMED);
+
+ /* Enable automatic Info FIFO entries */
+ LOAD(p, 0, DCTRL, LDOFF_ENABLE_AUTO_NFIFO, 0, IMMED);
+ }
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT1, 0, 4, 0);
+
+ break;
+
+ case PDCP_AUTH_TYPE_ZUC:
+ if (rta_sec_era < RTA_SEC_ERA_5) {
+ pr_err("Invalid era for selected algorithm\n");
+ return -ENOTSUP;
+ }
+ iv[0] = 0xFFFFFFFF;
+ iv[1] = swap ? swab32(0xFC000000) : 0xFC000000;
+ iv[2] = 0x00000000; /* unused */
+
+ KEY(p, KEY2, authdata->key_enc_flags, authdata->key,
+ authdata->keylen, INLINE_KEY(authdata));
+ LOAD(p, (uintptr_t)&iv, CONTEXT2, 0, 12, IMMED | COPY);
+ ALG_OPERATION(p, OP_ALG_ALGSEL_ZUCA,
+ OP_ALG_AAI_F9,
+ OP_ALG_AS_INITFINAL,
+ ICV_CHECK_DISABLE,
+ DIR_ENC);
+ SEQFIFOSTORE(p, MSG, 0, 0, VLF);
+ MOVE(p, AB1, 0, OFIFO, 0, MATH1, 0);
+ SEQFIFOLOAD(p, MSGINSNOOP, 0, VLF | LAST1 | LAST2 | FLUSH1);
+ SEQSTORE(p, CONTEXT2, 0, 4, 0);
+
+ break;
+
+ default:
+ pr_err("%s: Invalid integrity algorithm selected: %d\n",
+ "cnstr_shdsc_pdcp_short_mac", authdata->algtype);
+ return -EINVAL;
+ }
+
+
+ if (rta_sec_era < RTA_SEC_ERA_3) {
+ PATCH_MOVE(p, move_cmd_read_descbuf, local_offset);
+ PATCH_MOVE(p, move_cmd_write_descbuf, local_offset);
+ }
+
+ return PROGRAM_FINALIZE(p);
+}
+
+#endif /* __DESC_PDCP_H__ */
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
index d9a5b0e5..cf8dfb91 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/protocol_cmd.h
@@ -14,178 +14,176 @@ static inline int
__rta_ssl_proto(uint16_t protoinfo)
{
switch (protoinfo) {
- case OP_PCL_SSL30_RC4_40_MD5_2:
- case OP_PCL_SSL30_RC4_128_MD5_2:
- case OP_PCL_SSL30_RC4_128_SHA_5:
- case OP_PCL_SSL30_RC4_40_MD5_3:
- case OP_PCL_SSL30_RC4_128_MD5_3:
- case OP_PCL_SSL30_RC4_128_SHA:
- case OP_PCL_SSL30_RC4_128_MD5:
- case OP_PCL_SSL30_RC4_40_SHA:
- case OP_PCL_SSL30_RC4_40_MD5:
- case OP_PCL_SSL30_RC4_128_SHA_2:
- case OP_PCL_SSL30_RC4_128_SHA_3:
- case OP_PCL_SSL30_RC4_128_SHA_4:
- case OP_PCL_SSL30_RC4_128_SHA_6:
- case OP_PCL_SSL30_RC4_128_SHA_7:
- case OP_PCL_SSL30_RC4_128_SHA_8:
- case OP_PCL_SSL30_RC4_128_SHA_9:
- case OP_PCL_SSL30_RC4_128_SHA_10:
- case OP_PCL_TLS_ECDHE_PSK_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_DH_anon_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_KRB5_WITH_RC4_128_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_RC4_40_MD5:
+ case OP_PCL_TLS_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_RC4_128_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_RC4_128_SHA:
if (rta_sec_era == RTA_SEC_ERA_7)
return -EINVAL;
/* fall through if not Era 7 */
- case OP_PCL_SSL30_DES40_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_SHA_2:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_5:
- case OP_PCL_SSL30_DES40_CBC_SHA_2:
- case OP_PCL_SSL30_DES_CBC_SHA_3:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_6:
- case OP_PCL_SSL30_DES40_CBC_SHA_3:
- case OP_PCL_SSL30_DES_CBC_SHA_4:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_SHA_4:
- case OP_PCL_SSL30_DES_CBC_SHA_5:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_8:
- case OP_PCL_SSL30_DES40_CBC_SHA_5:
- case OP_PCL_SSL30_DES_CBC_SHA_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_9:
- case OP_PCL_SSL30_DES40_CBC_SHA_6:
- case OP_PCL_SSL30_DES_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_10:
- case OP_PCL_SSL30_DES_CBC_SHA:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA:
- case OP_PCL_SSL30_DES_CBC_MD5:
- case OP_PCL_SSL30_3DES_EDE_CBC_MD5:
- case OP_PCL_SSL30_DES40_CBC_SHA_7:
- case OP_PCL_SSL30_DES40_CBC_MD5:
- case OP_PCL_SSL30_AES_128_CBC_SHA:
- case OP_PCL_SSL30_AES_128_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_5:
- case OP_PCL_SSL30_AES_128_CBC_SHA_6:
- case OP_PCL_SSL30_AES_256_CBC_SHA:
- case OP_PCL_SSL30_AES_256_CBC_SHA_2:
- case OP_PCL_SSL30_AES_256_CBC_SHA_3:
- case OP_PCL_SSL30_AES_256_CBC_SHA_4:
- case OP_PCL_SSL30_AES_256_CBC_SHA_5:
- case OP_PCL_SSL30_AES_256_CBC_SHA_6:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_2:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_3:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_4:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_5:
- case OP_PCL_TLS12_AES_128_CBC_SHA256_6:
- case OP_PCL_TLS12_AES_256_CBC_SHA256_6:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_2:
- case OP_PCL_SSL30_AES_128_CBC_SHA_7:
- case OP_PCL_SSL30_AES_256_CBC_SHA_7:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_3:
- case OP_PCL_SSL30_AES_128_CBC_SHA_8:
- case OP_PCL_SSL30_AES_256_CBC_SHA_8:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_4:
- case OP_PCL_SSL30_AES_128_CBC_SHA_9:
- case OP_PCL_SSL30_AES_256_CBC_SHA_9:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_1:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_1:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_2:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_2:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_3:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_3:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_4:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_4:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_5:
- case OP_PCL_SSL30_AES_256_GCM_SHA384_5:
- case OP_PCL_SSL30_AES_128_GCM_SHA256_6:
- case OP_PCL_TLS_DH_ANON_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_GCM_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_GCM_SHA384:
- case OP_PCL_TLS_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_DHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_DHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS_RSA_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_RSA_PSK_AES_256_CBC_SHA384:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_10:
- case OP_PCL_SSL30_AES_256_CBC_SHA_10:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_12:
- case OP_PCL_SSL30_AES_128_CBC_SHA_11:
- case OP_PCL_SSL30_AES_256_CBC_SHA_11:
- case OP_PCL_SSL30_AES_128_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_12:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_14:
- case OP_PCL_SSL30_AES_128_CBC_SHA_13:
- case OP_PCL_SSL30_AES_256_CBC_SHA_13:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_14:
- case OP_PCL_SSL30_AES_256_CBC_SHA_14:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_16:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_17:
- case OP_PCL_SSL30_3DES_EDE_CBC_SHA_18:
- case OP_PCL_SSL30_AES_128_CBC_SHA_15:
- case OP_PCL_SSL30_AES_128_CBC_SHA_16:
- case OP_PCL_SSL30_AES_128_CBC_SHA_17:
- case OP_PCL_SSL30_AES_256_CBC_SHA_15:
- case OP_PCL_SSL30_AES_256_CBC_SHA_16:
- case OP_PCL_SSL30_AES_256_CBC_SHA_17:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_CBC_SHA384:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_ECDSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_ECDSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDHE_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDH_RSA_AES_128_GCM_SHA256:
- case OP_PCL_TLS_ECDH_RSA_AES_256_GCM_SHA384:
- case OP_PCL_TLS_ECDHE_PSK_3DES_EDE_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA:
- case OP_PCL_TLS_ECDHE_PSK_AES_128_CBC_SHA256:
- case OP_PCL_TLS_ECDHE_PSK_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_MD5:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA160:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA224:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA256:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA384:
- case OP_PCL_TLS12_3DES_EDE_CBC_SHA512:
- case OP_PCL_TLS12_AES_128_CBC_SHA160:
- case OP_PCL_TLS12_AES_128_CBC_SHA224:
- case OP_PCL_TLS12_AES_128_CBC_SHA256:
- case OP_PCL_TLS12_AES_128_CBC_SHA384:
- case OP_PCL_TLS12_AES_128_CBC_SHA512:
- case OP_PCL_TLS12_AES_192_CBC_SHA160:
- case OP_PCL_TLS12_AES_192_CBC_SHA224:
- case OP_PCL_TLS12_AES_192_CBC_SHA256:
- case OP_PCL_TLS12_AES_192_CBC_SHA512:
- case OP_PCL_TLS12_AES_256_CBC_SHA160:
- case OP_PCL_TLS12_AES_256_CBC_SHA224:
- case OP_PCL_TLS12_AES_256_CBC_SHA256:
- case OP_PCL_TLS12_AES_256_CBC_SHA384:
- case OP_PCL_TLS12_AES_256_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA160:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA384:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA224:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA512:
- case OP_PCL_TLS_PVT_AES_192_CBC_SHA256:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FE:
- case OP_PCL_TLS_PVT_MASTER_SECRET_PRF_FF:
+ case OP_PCL_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_KRB5_WITH_DES_CBC_MD5:
+ case OP_PCL_TLS_KRB5_WITH_3DES_EDE_CBC_MD5:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA:
+ case OP_PCL_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_DSS_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DH_anon_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DH_anon_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDH_anon_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256:
+ case OP_PCL_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384:
+ case OP_PCL_TLS_RSA_WITH_AES_128_CBC_SHA256:
+ case OP_PCL_TLS_RSA_WITH_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_MD5:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA160:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA224:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA256:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA384:
+ case OP_PCL_PVT_TLS_3DES_EDE_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_128_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA160:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA224:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA384:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA512:
+ case OP_PCL_PVT_TLS_AES_256_CBC_SHA256:
+ case OP_PCL_PVT_TLS_AES_192_CBC_SHA384:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FE:
+ case OP_PCL_PVT_TLS_MASTER_SECRET_PRF_FF:
return 0;
}
@@ -323,6 +321,12 @@ static const uint32_t proto_blob_flags[] = {
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
+ OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM,
+ OP_PCL_BLOB_FORMAT_MASK | OP_PCL_BLOB_BLACK | OP_PCL_BLOB_TKEK |
OP_PCL_BLOB_EKT | OP_PCL_BLOB_REG_MASK | OP_PCL_BLOB_SEC_MEM
};
@@ -556,7 +560,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS10_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS11_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_TLS12_PRF, __rta_ssl_proto},
- {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS10_PRF, __rta_ssl_proto},
+ {OP_TYPE_UNI_PROTOCOL, OP_PCLID_DTLS_PRF, __rta_ssl_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV1_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_IKEV2_PRF, __rta_ike_proto},
{OP_TYPE_UNI_PROTOCOL, OP_PCLID_PUBLICKEYPAIR, __rta_dlc_proto},
@@ -568,7 +572,7 @@ static const struct proto_map proto_table[] = {
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS10, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS11, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_TLS12, __rta_ssl_proto},
- {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS10, __rta_ssl_proto},
+ {OP_TYPE_DECAP_PROTOCOL, OP_PCLID_DTLS, __rta_ssl_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_MACSEC, __rta_macsec_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIFI, __rta_wifi_proto},
{OP_TYPE_DECAP_PROTOCOL, OP_PCLID_WIMAX, __rta_wimax_proto},
diff --git a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
index 6e666108..5357187f 100644
--- a/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
+++ b/drivers/crypto/dpaa2_sec/hw/rta/sec_run_time_asm.h
@@ -497,6 +497,28 @@ __rta_out64(struct program *program, bool is_ext, uint64_t val)
}
}
+static inline void __rta_out_be64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_be32(program, upper_32_bits(val));
+ __rta_out_be32(program, lower_32_bits(val));
+ } else {
+ __rta_out_be32(program, lower_32_bits(val));
+ }
+}
+
+static inline void __rta_out_le64(struct program *program, bool is_ext,
+ uint64_t val)
+{
+ if (is_ext) {
+ __rta_out_le32(program, lower_32_bits(val));
+ __rta_out_le32(program, upper_32_bits(val));
+ } else {
+ __rta_out_le32(program, lower_32_bits(val));
+ }
+}
+
static inline unsigned int
rta_word(struct program *program, uint32_t val)
{
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index de8ca970..87e0defd 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -6,6 +6,7 @@
*/
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
+#include <fsl_dpopr.h>
#include <fsl_dpseci.h>
#include <fsl_dpseci_cmd.h>
@@ -116,11 +117,13 @@ int dpseci_create(struct fsl_mc_io *mc_io,
cmd_flags,
dprc_token);
cmd_params = (struct dpseci_cmd_create *)cmd.params;
- for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ for (i = 0; i < 8; i++)
cmd_params->priorities[i] = cfg->priorities[i];
+ for (i = 0; i < 8; i++)
+ cmd_params->priorities2[i] = cfg->priorities[8 + i];
cmd_params->num_tx_queues = cfg->num_tx_queues;
cmd_params->num_rx_queues = cfg->num_rx_queues;
- cmd_params->options = cfg->options;
+ cmd_params->options = cpu_to_le32(cfg->options);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
@@ -302,7 +305,7 @@ int dpseci_get_attributes(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(rsp_params->id);
- attr->options = rsp_params->options;
+ attr->options = le32_to_cpu(rsp_params->options);
attr->num_tx_queues = rsp_params->num_tx_queues;
attr->num_rx_queues = rsp_params->num_rx_queues;
@@ -490,6 +493,8 @@ int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
attr->arc4_acc_num = rsp_params->arc4_acc_num;
attr->des_acc_num = rsp_params->des_acc_num;
attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
return 0;
}
@@ -569,6 +574,113 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
return 0;
}
+/**
+ * dpseci_set_opr() - Set Order Restoration configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @options: Configuration mode options
+ * can be OPR_OPT_CREATE or OPR_OPT_RETIRE
+ * @cfg: Configuration options for the OPR
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg)
+{
+ struct dpseci_cmd_set_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_set_opr *)cmd.params;
+ cmd_params->index = index;
+ cmd_params->options = options;
+ cmd_params->oloe = cfg->oloe;
+ cmd_params->oeane = cfg->oeane;
+ cmd_params->olws = cfg->olws;
+ cmd_params->oa = cfg->oa;
+ cmd_params->oprrws = cfg->oprrws;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_opr() - Retrieve Order Restoration config and query.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @index: The queue index
+ * @cfg: Returned OPR configuration
+ * @qry: Returned OPR query
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry)
+{
+ struct dpseci_rsp_get_opr *rsp_params;
+ struct dpseci_cmd_get_opr *cmd_params;
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_OPR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_get_opr *)cmd.params;
+ cmd_params->index = index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpseci_rsp_get_opr *)cmd.params;
+ cfg->oloe = rsp_params->oloe;
+ cfg->oeane = rsp_params->oeane;
+ cfg->olws = rsp_params->olws;
+ cfg->oa = rsp_params->oa;
+ cfg->oprrws = rsp_params->oprrws;
+ qry->rip = dpseci_get_field(rsp_params->flags, RIP);
+ qry->enable = dpseci_get_field(rsp_params->flags, OPR_ENABLE);
+ qry->nesn = le16_to_cpu(rsp_params->nesn);
+ qry->ndsn = le16_to_cpu(rsp_params->ndsn);
+ qry->ea_tseq = le16_to_cpu(rsp_params->ea_tseq);
+ qry->tseq_nlis = dpseci_get_field(rsp_params->tseq_nlis, TSEQ_NLIS);
+ qry->ea_hseq = le16_to_cpu(rsp_params->ea_hseq);
+ qry->hseq_nlis = dpseci_get_field(rsp_params->hseq_nlis, HSEQ_NLIS);
+ qry->ea_hptr = le16_to_cpu(rsp_params->ea_hptr);
+ qry->ea_tptr = le16_to_cpu(rsp_params->ea_tptr);
+ qry->opr_vid = le16_to_cpu(rsp_params->opr_vid);
+ qry->opr_id = le16_to_cpu(rsp_params->opr_id);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_set_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
@@ -604,6 +716,16 @@ int dpseci_set_congestion_notification(
return mc_send_command(mc_io, &cmd);
}
+/**
+ * dpseci_get_congestion_notification() - Get congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
int dpseci_get_congestion_notification(
struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 12ac005a..279e8f4d 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -20,7 +20,7 @@ struct fsl_mc_io;
/**
* Maximum number of Tx/Rx priorities per DPSECI object
*/
-#define DPSECI_PRIO_NUM 8
+#define DPSECI_MAX_QUEUE_NUM 16
/**
* All queues considered; see dpseci_set_rx_queue()
@@ -58,7 +58,7 @@ struct dpseci_cfg {
uint32_t options;
uint8_t num_tx_queues;
uint8_t num_rx_queues;
- uint8_t priorities[DPSECI_PRIO_NUM];
+ uint8_t priorities[DPSECI_MAX_QUEUE_NUM];
};
int dpseci_create(struct fsl_mc_io *mc_io,
@@ -259,6 +259,10 @@ int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
* implemented in this version of SEC.
* @aes_acc_num: The number of copies of the AES module that are
* implemented in this version of SEC.
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
**/
struct dpseci_sec_attr {
@@ -279,6 +283,8 @@ struct dpseci_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
@@ -316,6 +322,21 @@ int dpseci_get_api_version(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t *major_ver,
uint16_t *minor_ver);
+
+int dpseci_set_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ uint8_t options,
+ struct opr_cfg *cfg);
+
+int dpseci_get_opr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t index,
+ struct opr_cfg *cfg,
+ struct opr_qry *qry);
+
/**
* enum dpseci_congestion_unit - DPSECI congestion units
* @DPSECI_CONGESTION_UNIT_BYTES: bytes units
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 26cef0f7..af3518a0 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -9,22 +9,25 @@
/* DPSECI Version */
#define DPSECI_VER_MAJOR 5
-#define DPSECI_VER_MINOR 1
+#define DPSECI_VER_MINOR 3
/* Command versioning */
#define DPSECI_CMD_BASE_VERSION 1
#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_BASE_VERSION_V3 3
#define DPSECI_CMD_ID_OFFSET 4
#define DPSECI_CMD_V1(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
#define DPSECI_CMD_V2(id) \
((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+#define DPSECI_CMD_V3(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V3)
/* Command IDs */
#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
-#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V3(0x909)
#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
@@ -37,9 +40,10 @@
#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
-#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
-
+#define DPSECI_CMDID_SET_OPR DPSECI_CMD_V1(0x19A)
+#define DPSECI_CMDID_GET_OPR DPSECI_CMD_V1(0x19B)
#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
@@ -63,6 +67,8 @@ struct dpseci_cmd_create {
uint8_t num_rx_queues;
uint8_t pad[6];
uint32_t options;
+ uint32_t pad2;
+ uint8_t priorities2[8];
};
struct dpseci_cmd_destroy {
@@ -152,6 +158,8 @@ struct dpseci_rsp_get_sec_attr {
uint8_t arc4_acc_num;
uint8_t des_acc_num;
uint8_t aes_acc_num;
+ uint8_t ccha_acc_num;
+ uint8_t ptha_acc_num;
};
struct dpseci_rsp_get_sec_counters {
@@ -169,6 +177,63 @@ struct dpseci_rsp_get_api_version {
uint16_t minor;
};
+struct dpseci_cmd_set_opr {
+ uint16_t pad0;
+ uint8_t index;
+ uint8_t options;
+ uint8_t pad1[7];
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+};
+
+struct dpseci_cmd_get_opr {
+ uint16_t pad;
+ uint8_t index;
+};
+
+#define DPSECI_RIP_SHIFT 0
+#define DPSECI_RIP_SIZE 1
+#define DPSECI_OPR_ENABLE_SHIFT 1
+#define DPSECI_OPR_ENABLE_SIZE 1
+#define DPSECI_TSEQ_NLIS_SHIFT 0
+#define DPSECI_TSEQ_NLIS_SIZE 1
+#define DPSECI_HSEQ_NLIS_SHIFT 0
+#define DPSECI_HSEQ_NLIS_SIZE 1
+
+struct dpseci_rsp_get_opr {
+ uint64_t pad0;
+ /* from LSB: rip:1 enable:1 */
+ uint8_t flags;
+ uint16_t pad1;
+ uint8_t oloe;
+ uint8_t oeane;
+ uint8_t olws;
+ uint8_t oa;
+ uint8_t oprrws;
+ uint16_t nesn;
+ uint16_t pad8;
+ uint16_t ndsn;
+ uint16_t pad2;
+ uint16_t ea_tseq;
+ /* only the LSB */
+ uint8_t tseq_nlis;
+ uint8_t pad3;
+ uint16_t ea_hseq;
+ /* only the LSB */
+ uint8_t hseq_nlis;
+ uint8_t pad4;
+ uint16_t ea_hptr;
+ uint16_t pad5;
+ uint16_t ea_tptr;
+ uint16_t pad6;
+ uint16_t opr_vid;
+ uint16_t pad7;
+ uint16_t opr_id;
+};
+
#define DPSECI_DEST_TYPE_SHIFT 0
#define DPSECI_DEST_TYPE_SIZE 4
#define DPSECI_CG_UNITS_SHIFT 4
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
index 01afc587..8fa4827e 100644
--- a/drivers/crypto/dpaa2_sec/meson.build
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright 2018 NXP
+version = 2
+
if host_machine.system() != 'linux'
build = false
endif
diff --git a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
index 8591cc0b..0bfb986d 100644
--- a/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
+++ b/drivers/crypto/dpaa2_sec/rte_pmd_dpaa2_sec_version.map
@@ -2,3 +2,11 @@ DPDK_17.05 {
local: *;
};
+
+DPDK_18.11 {
+ global:
+
+ dpaa2_sec_eventq_attach;
+ dpaa2_sec_eventq_detach;
+
+} DPDK_17.05;