aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/dpaa_sec/dpaa_sec.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/dpaa_sec/dpaa_sec.c')
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c314
1 files changed, 190 insertions, 124 deletions
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index f571050b..d83e7454 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -23,6 +23,7 @@
#include <rte_mbuf.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
+#include <rte_spinlock.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
@@ -106,6 +107,12 @@ dpaa_mem_vtop(void *vaddr)
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
+ void *va;
+
+ va = (void *)dpaax_iova_table_get_va(paddr);
+ if (likely(va))
+ return va;
+
return rte_mem_iova2virt(paddr);
}
@@ -274,6 +281,9 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
{
switch (ses->auth_alg) {
case RTE_CRYPTO_AUTH_NULL:
+ alginfo_a->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_HMAC_NULL : 0;
ses->digest_length = 0;
break;
case RTE_CRYPTO_AUTH_MD5_HMAC:
@@ -322,6 +332,9 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
{
switch (ses->cipher_alg) {
case RTE_CRYPTO_CIPHER_NULL:
+ alginfo_c->algtype =
+ (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
+ OP_PCL_IPSEC_NULL : 0;
break;
case RTE_CRYPTO_CIPHER_AES_CBC:
alginfo_c->algtype =
@@ -359,6 +372,87 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
}
}
+/* prepare ipsec proto command block of the session */
+static int
+dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo cipherdata = {0}, authdata = {0};
+ struct sec_cdb *cdb = &ses->cdb;
+ int32_t shared_desc_len = 0;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ caam_cipher_alg(ses, &cipherdata);
+ if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported cipher alg");
+ return -ENOTSUP;
+ }
+
+ cipherdata.key = (size_t)ses->cipher_key.data;
+ cipherdata.keylen = ses->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &authdata);
+ if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ DPAA_SEC_ERR("not supported auth alg");
+ return -ENOTSUP;
+ }
+
+ authdata.key = (size_t)ses->auth_key.data;
+ authdata.keylen = ses->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = cipherdata.keylen;
+ cdb->sh_desc[1] = authdata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ cipherdata.key_type = RTA_DATA_IMM;
+ else {
+ cipherdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)cipherdata.key);
+ cipherdata.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ authdata.key_type = RTA_DATA_IMM;
+ else {
+ authdata.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)authdata.key);
+ authdata.key_type = RTA_DATA_PTR;
+ }
+
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+ if (ses->dir == DIR_ENC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_encap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->encap_pdb,
+ (uint8_t *)&ses->ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ses->dir == DIR_DEC) {
+ shared_desc_len = cnstr_shdsc_ipsec_new_decap(
+ cdb->sh_desc,
+ true, swap, SHR_SERIAL,
+ &ses->decap_pdb,
+ &cipherdata, &authdata);
+ }
+ return shared_desc_len;
+}
/* prepare command block of the session */
static int
@@ -376,7 +470,9 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
memset(cdb, 0, sizeof(struct sec_cdb));
- if (is_cipher_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
+ } else if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
DPAA_SEC_ERR("not supported cipher alg");
@@ -484,28 +580,13 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
cdb->sh_desc[0] = 0;
cdb->sh_desc[1] = 0;
cdb->sh_desc[2] = 0;
- if (is_proto_ipsec(ses)) {
- if (ses->dir == DIR_ENC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_encap(
- cdb->sh_desc,
- true, swap, &ses->encap_pdb,
- (uint8_t *)&ses->ip4_hdr,
- &alginfo_c, &alginfo_a);
- } else if (ses->dir == DIR_DEC) {
- shared_desc_len = cnstr_shdsc_ipsec_new_decap(
- cdb->sh_desc,
- true, swap, &ses->decap_pdb,
- &alginfo_c, &alginfo_a);
- }
- } else {
- /* Auth_only_len is set as 0 here and it will be
- * overwritten in fd for each packet.
- */
- shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
- true, swap, &alginfo_c, &alginfo_a,
- ses->iv.length, 0,
- ses->digest_length, ses->dir);
- }
+ /* Auth_only_len is set as 0 here and it will be
+ * overwritten in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
}
if (shared_desc_len < 0) {
@@ -1445,20 +1526,26 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
nb_ops = loop;
goto send_pkts;
}
- if (unlikely(!ses->qp || ses->qp != qp)) {
- DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
- ses->qp, qp);
+ if (unlikely(!ses->qp)) {
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
+ } else if (unlikely(ses->qp != qp)) {
+ DPAA_SEC_DP_ERR("Old:sess->qp = %p"
+ " New qp = %p\n", ses->qp, qp);
+ frames_to_send = loop;
+ nb_ops = loop;
+ goto send_pkts;
}
auth_only_len = op->sym->auth.data.length -
op->sym->cipher.data.length;
if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
- if (is_auth_only(ses)) {
+ if (is_proto_ipsec(ses)) {
+ cf = build_proto(op, ses);
+ } else if (is_auth_only(ses)) {
cf = build_auth_only(op, ses);
} else if (is_cipher_only(ses)) {
cf = build_cipher_only(op, ses);
@@ -1467,8 +1554,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
auth_only_len = ses->auth_only_len;
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth(op, ses);
- } else if (is_proto_ipsec(ses)) {
- cf = build_proto(op, ses);
} else {
DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
@@ -1760,6 +1845,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
+ memset(session, 0, sizeof(dpaa_sec_session));
/* Default IV length = 0 */
session->iv.length = 0;
@@ -1807,7 +1893,9 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
@@ -1888,111 +1976,86 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
{
struct dpaa_sec_dev_private *internals = dev->data->dev_private;
struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
- struct rte_crypto_auth_xform *auth_xform;
- struct rte_crypto_cipher_xform *cipher_xform;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
dpaa_sec_session *session = (dpaa_sec_session *)sess;
PMD_INIT_FUNC_TRACE();
+ memset(session, 0, sizeof(dpaa_sec_session));
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
cipher_xform = &conf->crypto_xform->cipher;
- auth_xform = &conf->crypto_xform->next->auth;
+ if (conf->crypto_xform->next)
+ auth_xform = &conf->crypto_xform->next->auth;
} else {
auth_xform = &conf->crypto_xform->auth;
- cipher_xform = &conf->crypto_xform->next->cipher;
+ if (conf->crypto_xform->next)
+ cipher_xform = &conf->crypto_xform->next->cipher;
}
session->proto_alg = conf->protocol;
- session->cipher_key.data = rte_zmalloc(NULL,
- cipher_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->cipher_key.data == NULL &&
- cipher_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for cipher key");
- return -ENOMEM;
- }
- session->cipher_key.length = cipher_xform->key.length;
- session->auth_key.data = rte_zmalloc(NULL,
- auth_xform->key.length,
- RTE_CACHE_LINE_SIZE);
- if (session->auth_key.data == NULL &&
- auth_xform->key.length > 0) {
- DPAA_SEC_ERR("No Memory for auth key");
- rte_free(session->cipher_key.data);
- return -ENOMEM;
+ if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for cipher key");
+ return -ENOMEM;
+ }
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ session->cipher_key.length = cipher_xform->key.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
+ goto out;
+ }
+ session->cipher_alg = cipher_xform->algo;
+ } else {
+ session->cipher_key.data = NULL;
+ session->cipher_key.length = 0;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
}
- session->auth_key.length = auth_xform->key.length;
- memcpy(session->cipher_key.data, cipher_xform->key.data,
- cipher_xform->key.length);
- memcpy(session->auth_key.data, auth_xform->key.data,
- auth_xform->key.length);
- switch (auth_xform->algo) {
- case RTE_CRYPTO_AUTH_SHA1_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
- break;
- case RTE_CRYPTO_AUTH_MD5_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA256_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA384_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
- break;
- case RTE_CRYPTO_AUTH_SHA512_HMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
- break;
- case RTE_CRYPTO_AUTH_AES_CMAC:
- session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
- break;
- case RTE_CRYPTO_AUTH_NULL:
+ if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ DPAA_SEC_ERR("No Memory for auth key");
+ rte_free(session->cipher_key.data);
+ return -ENOMEM;
+ }
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+ session->auth_key.length = auth_xform->key.length;
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ break;
+ default:
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
+ goto out;
+ }
+ session->auth_alg = auth_xform->algo;
+ } else {
+ session->auth_key.data = NULL;
+ session->auth_key.length = 0;
session->auth_alg = RTE_CRYPTO_AUTH_NULL;
- break;
- case RTE_CRYPTO_AUTH_SHA224_HMAC:
- case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
- case RTE_CRYPTO_AUTH_SHA1:
- case RTE_CRYPTO_AUTH_SHA256:
- case RTE_CRYPTO_AUTH_SHA512:
- case RTE_CRYPTO_AUTH_SHA224:
- case RTE_CRYPTO_AUTH_SHA384:
- case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_GMAC:
- case RTE_CRYPTO_AUTH_KASUMI_F9:
- case RTE_CRYPTO_AUTH_AES_CBC_MAC:
- case RTE_CRYPTO_AUTH_ZUC_EIA3:
- DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
- auth_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
- auth_xform->algo);
- goto out;
- }
-
- switch (cipher_xform->algo) {
- case RTE_CRYPTO_CIPHER_AES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_3DES_CBC:
- session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- break;
- case RTE_CRYPTO_CIPHER_AES_CTR:
- session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
- break;
- case RTE_CRYPTO_CIPHER_NULL:
- case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
- case RTE_CRYPTO_CIPHER_3DES_ECB:
- case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_KASUMI_F8:
- DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
- cipher_xform->algo);
- goto out;
- default:
- DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
- cipher_xform->algo);
- goto out;
}
if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
@@ -2020,7 +2083,8 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
(IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
PDBOPTS_ESP_OIHI_PDB_INL |
PDBOPTS_ESP_IVSRC |
- PDBHMO_ESP_ENCAP_DTTL;
+ PDBHMO_ESP_ENCAP_DTTL |
+ PDBHMO_ESP_SNR;
session->encap_pdb.spi = ipsec_xform->spi;
session->encap_pdb.ip_hdr_len = sizeof(struct ip);
@@ -2033,7 +2097,9 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
} else
goto out;
session->ctx_pool = internals->ctx_pool;
+ rte_spinlock_lock(&internals->lock);
session->inq = dpaa_sec_attach_rxq(internals);
+ rte_spinlock_unlock(&internals->lock);
if (session->inq == NULL) {
DPAA_SEC_ERR("unable to attach sec queue");
goto out;
@@ -2204,7 +2270,7 @@ dpaa_sec_capabilities_get(void *device __rte_unused)
return dpaa_sec_security_cap;
}
-struct rte_security_ops dpaa_sec_security_ops = {
+static const struct rte_security_ops dpaa_sec_security_ops = {
.session_create = dpaa_sec_security_session_create,
.session_update = NULL,
.session_stats_get = NULL,
@@ -2284,6 +2350,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
security_instance->sess_cnt = 0;
cryptodev->security_ctx = security_instance;
+ rte_spinlock_init(&internals->lock);
for (i = 0; i < internals->max_nb_queue_pairs; i++) {
/* init qman fq for queue pair */
qp = &internals->qps[i];
@@ -2316,7 +2383,7 @@ init_error:
}
static int
-cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
struct rte_dpaa_device *dpaa_dev)
{
struct rte_cryptodev *cryptodev;
@@ -2344,7 +2411,6 @@ cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
dpaa_dev->crypto_dev = cryptodev;
cryptodev->device = &dpaa_dev->device;
- cryptodev->device->driver = &dpaa_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));