aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c')
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c588
1 files changed, 472 insertions, 116 deletions
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 4e01fe81..e0f6cfca 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -2,7 +2,7 @@
* BSD LICENSE
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright (c) 2016 NXP. All rights reserved.
+ * Copyright 2016 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -69,10 +69,158 @@
#define FSL_MC_DPSECI_DEVID 3
#define NO_PREFETCH 0
-#define TDES_CBC_IV_LEN 8
-#define AES_CBC_IV_LEN 16
+/* FLE_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define FLE_POOL_NUM_BUFS 32000
+#define FLE_POOL_BUF_SIZE 256
+#define FLE_POOL_CACHE_SIZE 512
+
enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
+static uint8_t cryptodev_driver_id;
+
+static inline int
+build_authenc_gcm_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct qbman_fle *fle, *sge;
+ struct sec_flow_context *flc;
+ uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
+ int icv_len = sess->digest_length, retval;
+ uint8_t *old_icv;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* TODO we are using the first FLE entry to store Mbuf and session ctxt.
+ * Currently we donot know which FLE has the mbuf stored.
+ * So while retreiving we can go back 1 FLE from the FD -ADDR
+ * to get the MBUF Addr from the previous FLE.
+ * We can have a better approach to use the inline Mbuf
+ */
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ return -1;
+ }
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
+ fle = fle + 1;
+ sge = fle + 2;
+ if (likely(bpid < MAX_BPID)) {
+ DPAA2_SET_FD_BPID(fd, bpid);
+ DPAA2_SET_FLE_BPID(fle, bpid);
+ DPAA2_SET_FLE_BPID(fle + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ DPAA2_SET_FLE_BPID(sge + 1, bpid);
+ DPAA2_SET_FLE_BPID(sge + 2, bpid);
+ DPAA2_SET_FLE_BPID(sge + 3, bpid);
+ } else {
+ DPAA2_SET_FD_IVP(fd);
+ DPAA2_SET_FLE_IVP(fle);
+ DPAA2_SET_FLE_IVP((fle + 1));
+ DPAA2_SET_FLE_IVP(sge);
+ DPAA2_SET_FLE_IVP((sge + 1));
+ DPAA2_SET_FLE_IVP((sge + 2));
+ DPAA2_SET_FLE_IVP((sge + 3));
+ }
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+ /* Configure FD as a FRAME LIST */
+ DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
+ DPAA2_SET_FD_COMPOUND_FMT(fd);
+ DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
+
+ PMD_TX_LOG(DEBUG, "auth_off: 0x%x/length %d, digest-len=%d\n"
+ "iv-len=%d data_off: 0x%x\n",
+ sym_op->aead.data.offset,
+ sym_op->aead.data.length,
+ sym_op->aead.digest.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
+
+ /* Configure Output FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ if (auth_only_len)
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + icv_len + auth_only_len) :
+ sym_op->aead.data.length + auth_only_len;
+
+ DPAA2_SET_FLE_SG_EXT(fle);
+
+ /* Configure Output SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off - auth_only_len);
+ sge->length = sym_op->aead.data.length + auth_only_len;
+
+ if (sess->dir == DIR_ENC) {
+ sge++;
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.digest.data));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->iv.length + auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ sge++;
+ fle++;
+
+ /* Configure Input FLE with Scatter/Gather Entry */
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
+ DPAA2_SET_FLE_SG_EXT(fle);
+ DPAA2_SET_FLE_FIN(fle);
+ fle->length = (sess->dir == DIR_ENC) ?
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len) :
+ (sym_op->aead.data.length + sess->iv.length + auth_only_len +
+ sess->digest_length);
+
+ /* Configure Input SGE for Encap/Decap */
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(IV_ptr));
+ sge->length = sess->iv.length;
+ sge++;
+ if (auth_only_len) {
+ DPAA2_SET_FLE_ADDR(sge,
+ DPAA2_VADDR_TO_IOVA(sym_op->aead.aad.data));
+ sge->length = auth_only_len;
+ DPAA2_SET_FLE_BPID(sge, bpid);
+ sge++;
+ }
+
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
+ sym_op->m_src->data_off);
+ sge->length = sym_op->aead.data.length;
+ if (sess->dir == DIR_DEC) {
+ sge++;
+ old_icv = (uint8_t *)(sge + 1);
+ memcpy(old_icv, sym_op->aead.digest.data,
+ sess->digest_length);
+ memset(sym_op->aead.digest.data, 0, sess->digest_length);
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
+ sge->length = sess->digest_length;
+ DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
+ sess->digest_length +
+ sess->iv.length +
+ auth_only_len));
+ }
+ DPAA2_SET_FLE_FIN(sge);
+
+ if (auth_only_len) {
+ DPAA2_SET_FLE_INTERNAL_JD(fle, auth_only_len);
+ DPAA2_SET_FD_INTERNAL_JD(fd, auth_only_len);
+ }
+
+ return 0;
+}
+
static inline int
build_authenc_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -84,9 +232,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
struct sec_flow_context *flc;
uint32_t auth_only_len = sym_op->auth.data.length -
sym_op->cipher.data.length;
- int icv_len = sym_op->auth.digest.length;
+ int icv_len = sess->digest_length, retval;
uint8_t *old_icv;
- uint32_t mem_len = (7 * sizeof(struct qbman_fle)) + icv_len;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
@@ -96,12 +245,14 @@ build_authenc_fd(dpaa2_sec_session *sess,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -133,10 +284,10 @@ build_authenc_fd(dpaa2_sec_session *sess,
"cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
sym_op->auth.data.offset,
sym_op->auth.data.length,
- sym_op->auth.digest.length,
+ sess->digest_length,
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
@@ -159,9 +310,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
DPAA2_SET_FLE_ADDR(sge,
DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->cipher.iv.length));
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
@@ -173,13 +324,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
DPAA2_SET_FLE_FIN(fle);
fle->length = (sess->dir == DIR_ENC) ?
- (sym_op->auth.data.length + sym_op->cipher.iv.length) :
- (sym_op->auth.data.length + sym_op->cipher.iv.length +
- sym_op->auth.digest.length);
+ (sym_op->auth.data.length + sess->iv.length) :
+ (sym_op->auth.data.length + sess->iv.length +
+ sess->digest_length);
/* Configure Input SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -190,13 +341,13 @@ build_authenc_fd(dpaa2_sec_session *sess,
sge++;
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
- sym_op->auth.digest.length +
- sym_op->cipher.iv.length));
+ sess->digest_length +
+ sess->iv.length));
}
DPAA2_SET_FLE_FIN(sge);
if (auth_only_len) {
@@ -212,21 +363,19 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (sess->dir == DIR_ENC) ?
- (3 * sizeof(struct qbman_fle)) :
- (5 * sizeof(struct qbman_fle) +
- sym_op->auth.digest.length);
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
uint8_t *old_digest;
+ int retval;
PMD_INIT_FUNC_TRACE();
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for FLE\n");
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
+ RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -234,6 +383,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -249,7 +399,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sym_op->auth.digest.data));
- fle->length = sym_op->auth.digest.length;
+ fle->length = sess->digest_length;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_COMPOUND_FMT(fd);
@@ -280,17 +430,17 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->auth.data.length +
- sym_op->auth.digest.length);
+ sess->digest_length);
sge->length = sym_op->auth.data.length;
sge++;
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
- sym_op->auth.digest.length);
- memset(sym_op->auth.digest.data, 0, sym_op->auth.digest.length);
+ sess->digest_length);
+ memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
- sge->length = sym_op->auth.digest.length;
+ sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
- sym_op->auth.digest.length;
+ sess->digest_length;
DPAA2_SET_FLE_FIN(sge);
}
DPAA2_SET_FLE_FIN(fle);
@@ -304,18 +454,20 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
{
struct rte_crypto_sym_op *sym_op = op->sym;
struct qbman_fle *fle, *sge;
- uint32_t mem_len = (5 * sizeof(struct qbman_fle));
+ int retval;
struct sec_flow_context *flc;
struct ctxt_priv *priv = sess->ctxt;
+ uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->iv.offset);
PMD_INIT_FUNC_TRACE();
- /* todo - we can use some mempool to avoid malloc here */
- fle = rte_zmalloc(NULL, mem_len, RTE_CACHE_LINE_SIZE);
- if (!fle) {
+ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
+ if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
return -1;
}
+ memset(fle, 0, FLE_POOL_BUF_SIZE);
/* TODO we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -323,6 +475,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* We can have a better approach to use the inline Mbuf
*/
DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
+ DPAA2_FLE_SAVE_CTXT(fle, priv);
fle = fle + 1;
sge = fle + 2;
@@ -343,21 +496,21 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
DPAA2_SET_FD_ADDR(fd, DPAA2_VADDR_TO_IOVA(fle));
DPAA2_SET_FD_LEN(fd, sym_op->cipher.data.length +
- sym_op->cipher.iv.length);
+ sess->iv.length);
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
PMD_TX_LOG(DEBUG, "cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
sym_op->cipher.data.offset,
sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
+ sess->iv.length,
sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
sym_op->m_src->data_off);
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
PMD_TX_LOG(DEBUG, "1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
flc, fle, fle->addr_hi, fle->addr_lo, fle->length);
@@ -365,12 +518,12 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle++;
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
- fle->length = sym_op->cipher.data.length + sym_op->cipher.iv.length;
+ fle->length = sym_op->cipher.data.length + sess->iv.length;
DPAA2_SET_FLE_SG_EXT(fle);
- DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(sym_op->cipher.iv.data));
- sge->length = sym_op->cipher.iv.length;
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(iv_ptr));
+ sge->length = sess->iv.length;
sge++;
DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
@@ -406,6 +559,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
case DPAA2_SEC_AUTH:
ret = build_auth_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_AEAD:
+ ret = build_authenc_gcm_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_CIPHER_HASH:
ret = build_authenc_fd(sess, op, fd, bpid);
break;
@@ -437,7 +593,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(nb_ops == 0))
return 0;
- if (ops[0]->sym->sess_type != RTE_CRYPTO_SYM_OP_WITH_SESSION) {
+ if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
return 0;
}
@@ -463,7 +619,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
sess = (dpaa2_sec_session *)
- (*ops)->sym->session->_private;
+ get_session_private_data(
+ (*ops)->sym->session,
+ cryptodev_driver_id);
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
@@ -495,6 +653,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
+ struct ctxt_priv *priv;
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
@@ -510,7 +669,7 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?");
+ RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
return NULL;
}
op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
@@ -530,7 +689,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
- rte_free(fle - 1);
+ priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ rte_mempool_put(priv->fle_pool, (void *)(fle - 1));
return op;
}
@@ -571,8 +731,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD, "SEC VDQ command is not issued."
- "QBMAN is busy\n");
+ RTE_LOG(WARNING, PMD,
+ "SEC VDQ command is not issued : QBMAN busy\n");
/* Portal was busy, try again */
continue;
}
@@ -656,7 +816,8 @@ dpaa2_sec_queue_pair_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
static int
dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
__rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
- __rte_unused int socket_id)
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
{
struct dpaa2_sec_dev_private *priv = dev->data->dev_private;
struct dpaa2_sec_qp *qp;
@@ -747,19 +908,12 @@ dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
return sizeof(dpaa2_sec_session);
}
-static void
-dpaa2_sec_session_initialize(struct rte_mempool *mp __rte_unused,
- void *sess __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-}
-
static int
dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_cipher_ctxt *ctxt = &session->ext_params.cipher_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo cipherdata;
int bufsize, i;
struct ctxt_priv *priv;
@@ -772,16 +926,18 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
+
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -794,23 +950,27 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
+ /* Set IV parameters */
+ session->iv.offset = xform->cipher.iv.offset;
+ session->iv.length = xform->cipher.iv.length;
+
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_3DES_CTR:
- case RTE_CRYPTO_CIPHER_AES_GCM:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_XTS:
@@ -820,7 +980,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
xform->cipher.algo);
goto error_out;
default:
@@ -832,8 +992,8 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
DIR_ENC : DIR_DEC;
bufsize = cnstr_shdsc_blkcipher(priv->flc_desc[0].desc, 1, 0,
- &cipherdata, NULL, ctxt->iv.length,
- session->dir);
+ &cipherdata, NULL, session->iv.length,
+ session->dir);
if (bufsize < 0) {
RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
goto error_out;
@@ -868,9 +1028,9 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
dpaa2_sec_session *session)
{
- struct dpaa2_sec_auth_ctxt *ctxt = &session->ext_params.auth_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -882,16 +1042,17 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[DESC_INITFINAL].flc;
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(priv);
return -1;
}
@@ -904,6 +1065,8 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = xform->auth.digest_length;
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -936,7 +1099,6 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA224_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -945,13 +1107,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
xform->auth.algo);
goto error_out;
default:
@@ -964,7 +1125,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
- ctxt->trunc_len);
+ session->digest_length);
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
@@ -974,6 +1135,10 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+
return 0;
@@ -989,8 +1154,129 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
dpaa2_sec_session *session)
{
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
+ struct alginfo aeaddata;
+ unsigned int bufsize, i;
+ struct ctxt_priv *priv;
+ struct sec_flow_context *flc;
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ int err;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Set IV parameters */
+ session->iv.offset = aead_xform->iv.offset;
+ session->iv.length = aead_xform->iv.length;
+ session->ctxt_type = DPAA2_SEC_AEAD;
+
+ /* For SEC AEAD only one descriptor is required */
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ return -1;
+ }
+
+ priv->fle_pool = dev_priv->fle_pool;
+ flc = &priv->flc_desc[0].flc;
+
+ session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ rte_free(priv);
+ return -1;
+ }
+ memcpy(session->aead_key.data, aead_xform->key.data,
+ aead_xform->key.length);
+
+ session->digest_length = aead_xform->digest_length;
+ session->aead_key.length = aead_xform->key.length;
+ ctxt->auth_only_len = aead_xform->aad_length;
+
+ aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.keylen = session->aead_key.length;
+ aeaddata.key_enc_flags = 0;
+ aeaddata.key_type = RTA_DATA_IMM;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ aeaddata.algtype = OP_ALG_ALGSEL_AES;
+ aeaddata.algmode = OP_ALG_AAI_GCM;
+ session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
+ aead_xform->algo);
+ goto error_out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ goto error_out;
+ }
+ session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ priv->flc_desc[0].desc[0] = aeaddata.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)priv->flc_desc[0].desc,
+ &priv->flc_desc[0].desc[1], 1);
+
+ if (err < 0) {
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ goto error_out;
+ }
+ if (priv->flc_desc[0].desc[1] & 1) {
+ aeaddata.key_type = RTA_DATA_IMM;
+ } else {
+ aeaddata.key = DPAA2_VADDR_TO_IOVA(aeaddata.key);
+ aeaddata.key_type = RTA_DATA_PTR;
+ }
+ priv->flc_desc[0].desc[0] = 0;
+ priv->flc_desc[0].desc[1] = 0;
+
+ if (session->dir == DIR_ENC)
+ bufsize = cnstr_shdsc_gcm_encap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ else
+ bufsize = cnstr_shdsc_gcm_decap(
+ priv->flc_desc[0].desc, 1, 0,
+ &aeaddata, session->iv.length,
+ session->digest_length);
+ flc->word1_sdl = (uint8_t)bufsize;
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+ session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
+
+ return 0;
+
+error_out:
+ rte_free(session->aead_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+
+static int
+dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ dpaa2_sec_session *session)
+{
+ struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
+ struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ unsigned int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1012,21 +1298,27 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
DPAA2_SEC_HASH_CIPHER : DPAA2_SEC_CIPHER_HASH;
}
+
+ /* Set IV parameters */
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+
/* For SEC AEAD only one descriptor is required */
priv = (struct ctxt_priv *)rte_zmalloc(NULL,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT");
+ RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
return -1;
}
+ priv->fle_pool = dev_priv->fle_pool;
flc = &priv->flc_desc[0].flc;
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key");
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
rte_free(priv);
return -1;
}
@@ -1034,7 +1326,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key");
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1045,12 +1337,13 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- ctxt->trunc_len = auth_xform->digest_length;
authdata.key = (uint64_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
+ session->digest_length = auth_xform->digest_length;
+
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
authdata.algtype = OP_ALG_ALGSEL_SHA1;
@@ -1083,7 +1376,6 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
break;
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
- case RTE_CRYPTO_AUTH_AES_GCM:
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
case RTE_CRYPTO_AUTH_NULL:
case RTE_CRYPTO_AUTH_SHA1:
@@ -1092,13 +1384,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_GMAC:
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
auth_xform->algo);
goto error_out;
default:
@@ -1116,23 +1407,23 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
cipherdata.algtype = OP_ALG_ALGSEL_AES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
- ctxt->iv.length = AES_CBC_IV_LEN;
break;
case RTE_CRYPTO_CIPHER_3DES_CBC:
cipherdata.algtype = OP_ALG_ALGSEL_3DES;
cipherdata.algmode = OP_ALG_AAI_CBC;
session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
- ctxt->iv.length = TDES_CBC_IV_LEN;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_ALG_ALGSEL_AES;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CTR:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u",
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
cipher_xform->algo);
goto error_out;
default:
@@ -1151,7 +1442,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths");
+ PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -1173,12 +1464,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
if (session->ctxt_type == DPAA2_SEC_CIPHER_HASH) {
bufsize = cnstr_shdsc_authenc(priv->flc_desc[0].desc, 1,
0, &cipherdata, &authdata,
- ctxt->iv.length,
+ session->iv.length,
ctxt->auth_only_len,
- ctxt->trunc_len,
+ session->digest_length,
session->dir);
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported");
+ RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
goto error_out;
}
@@ -1190,6 +1481,9 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
(uint64_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
+ for (i = 0; i < bufsize; i++)
+ PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1200,8 +1494,8 @@ error_out:
return -1;
}
-static void *
-dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+static int
+dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *sess)
{
dpaa2_sec_session *session = sess;
@@ -1209,9 +1503,13 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct");
- return NULL;
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -1;
}
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
/* Cipher Only */
if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
session->ctxt_type = DPAA2_SEC_CIPHER;
@@ -1227,33 +1525,76 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
session->ext_params.aead_ctxt.auth_cipher_text = true;
- dpaa2_sec_aead_init(dev, xform, session);
+ dpaa2_sec_aead_chain_init(dev, xform, session);
/* Authenticate then Cipher */
} else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
session->ext_params.aead_ctxt.auth_cipher_text = false;
+ dpaa2_sec_aead_chain_init(dev, xform, session);
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
dpaa2_sec_aead_init(dev, xform, session);
+
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type");
- return NULL;
+ RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dpaa2_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
}
- return session;
+ ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
}
/** Clear the memory of session so it doesn't leave key material behind */
static void
-dpaa2_sec_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess)
+dpaa2_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
PMD_INIT_FUNC_TRACE();
- dpaa2_sec_session *s = (dpaa2_sec_session *)sess;
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
- if (s) {
+ if (sess_priv) {
rte_free(s->ctxt);
rte_free(s->cipher_key.data);
rte_free(s->auth_key.data);
memset(sess, 0, sizeof(dpaa2_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -1263,7 +1604,7 @@ dpaa2_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
{
PMD_INIT_FUNC_TRACE();
- return -ENOTSUP;
+ return 0;
}
static int
@@ -1366,7 +1707,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Free the allocated memory for ethernet private data and dpseci*/
priv->hw = NULL;
- free(dpseci);
+ rte_free(dpseci);
return 0;
}
@@ -1383,7 +1724,7 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = dpaa2_sec_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ info->driver_id = cryptodev_driver_id;
}
}
@@ -1475,15 +1816,17 @@ static struct rte_cryptodev_ops crypto_ops = {
.queue_pair_stop = dpaa2_sec_queue_pair_stop,
.queue_pair_count = dpaa2_sec_queue_pair_count,
.session_get_size = dpaa2_sec_session_get_size,
- .session_initialize = dpaa2_sec_session_initialize,
.session_configure = dpaa2_sec_session_configure,
.session_clear = dpaa2_sec_session_clear,
};
static int
-dpaa2_sec_uninit(const struct rte_cryptodev_driver *crypto_drv __rte_unused,
- struct rte_cryptodev *dev)
+dpaa2_sec_uninit(const struct rte_cryptodev *dev)
{
+ struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+
+ rte_mempool_free(internals->fle_pool);
+
PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
dev->data->name, rte_socket_id());
@@ -1500,6 +1843,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
uint16_t token;
struct dpseci_attr attr;
int retcode, hw_id;
+ char str[20];
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
@@ -1509,7 +1853,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
}
hw_id = dpaa2_dev->object_id;
- cryptodev->dev_type = RTE_CRYPTODEV_DPAA2_SEC_PMD;
+ cryptodev->driver_id = cryptodev_driver_id;
cryptodev->dev_ops = &crypto_ops;
cryptodev->enqueue_burst = dpaa2_sec_enqueue_burst;
@@ -1560,6 +1904,18 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
internals->hw = dpseci;
internals->token = token;
+ sprintf(str, "fle_pool_%d", cryptodev->data->dev_id);
+ internals->fle_pool = rte_mempool_create((const char *)str,
+ FLE_POOL_NUM_BUFS,
+ FLE_POOL_BUF_SIZE,
+ FLE_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->fle_pool) {
+ RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ goto init_error;
+ }
+
PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
return 0;
@@ -1599,7 +1955,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
dpaa2_dev->cryptodev = cryptodev;
cryptodev->device = &dpaa2_dev->device;
- cryptodev->driver = (struct rte_cryptodev_driver *)dpaa2_drv;
+ cryptodev->device->driver = &dpaa2_drv->driver;
/* init user callbacks */
TAILQ_INIT(&(cryptodev->link_intr_cbs));
@@ -1627,7 +1983,7 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
if (cryptodev == NULL)
return -ENODEV;
- ret = dpaa2_sec_uninit(NULL, cryptodev);
+ ret = dpaa2_sec_uninit(cryptodev);
if (ret)
return ret;
@@ -1638,7 +1994,6 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
rte_free(cryptodev->data->dev_private);
cryptodev->device = NULL;
- cryptodev->driver = NULL;
cryptodev->data = NULL;
return 0;
@@ -1653,4 +2008,5 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
.remove = cryptodev_dpaa2_sec_remove,
};
-RTE_PMD_REGISTER_DPAA2(dpaa2_sec_pmd, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);