aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c2
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h1
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c8
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c10
-rw-r--r--drivers/crypto/qat/qat_crypto.c28
-rw-r--r--drivers/crypto/qat/qat_qp.c10
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c8
7 files changed, 46 insertions, 21 deletions
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9c64c5d9..67fb6e24 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -1309,7 +1309,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AEAD_AES_GCM:
aeaddata.algtype = OP_ALG_ALGSEL_AES;
aeaddata.algmode = OP_ALG_AAI_GCM;
- session->cipher_alg = RTE_CRYPTO_AEAD_AES_GCM;
+ session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 14e71df5..8e583803 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -169,6 +169,7 @@ typedef struct dpaa2_sec_session_entry {
uint8_t dir; /*!< Operation Direction */
enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< AEAD Algorithm*/
union {
struct {
uint8_t *data; /**< pointer to key data */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 16155b1a..438dd3bc 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -121,7 +121,7 @@ dpaa_mem_vtop(void *vaddr)
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
if (vaddr_64 >= memseg[i].addr_64 &&
vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].phys_addr +
+ paddr = memseg[i].iova +
(vaddr_64 - memseg[i].addr_64);
return (rte_iova_t)paddr;
@@ -137,10 +137,10 @@ dpaa_mem_ptov(rte_iova_t paddr)
int i;
for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].phys_addr &&
- (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ if (paddr >= memseg[i].iova &&
+ (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].phys_addr));
+ (paddr - memseg[i].iova));
}
return NULL;
}
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index db6c9a32..26f854c2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -359,6 +359,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
rte_memcpy(in, qat_aes_xcbc_key_seed,
ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ);
for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) {
@@ -389,6 +394,11 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg,
ICP_QAT_HW_GALOIS_E_CTR0_SZ);
in = rte_zmalloc("working mem for key",
ICP_QAT_HW_GALOIS_H_SZ, 16);
+ if (in == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc memory");
+ return -ENOMEM;
+ }
+
memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ);
if (AES_set_encrypt_key(auth_key, auth_keylen << 3,
&enc_key) != 0) {
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index a572967c..acd979d2 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -69,6 +69,10 @@
#include "adf_transport_access_macros.h"
#define BYTE_LENGTH 8
+/* bpi is only used for partial blocks of DES and AES
+ * so AES block len can be assumed as max len for iv, src and dst
+ */
+#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ
static int
qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo,
@@ -121,16 +125,16 @@ bpi_cipher_encrypt(uint8_t *src, uint8_t *dst,
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt the IV, then XOR this with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_encrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
@@ -150,16 +154,16 @@ bpi_cipher_decrypt(uint8_t *src, uint8_t *dst,
{
EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx;
int encrypted_ivlen;
- uint8_t encrypted_iv[16];
- int i;
+ uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN];
+ uint8_t *encr = encrypted_iv;
/* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */
if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen)
<= 0)
goto cipher_decrypt_err;
- for (i = 0; i < srclen; i++)
- *(dst+i) = *(src+i)^(encrypted_iv[i]);
+ for (; srclen != 0; --srclen, ++dst, ++src, ++encr)
+ *dst = *src ^ *encr;
return 0;
@@ -844,7 +848,7 @@ static inline uint32_t
qat_bpicipher_preprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
@@ -899,7 +903,7 @@ static inline uint32_t
qat_bpicipher_postprocess(struct qat_session *ctx,
struct rte_crypto_op *op)
{
- uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
+ int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
uint8_t last_block_len = block_len > 0 ?
sym_op->cipher.data.length % block_len : 0;
@@ -1367,7 +1371,9 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
min_ofs = auth_ofs;
- auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
+ if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL))
+ auth_param->auth_res_addr =
+ op->sym->auth.digest.phys_addr;
}
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index ced3aa6a..c02971ee 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -180,6 +180,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer",
qp_conf->nb_descriptors * sizeof(*qp->op_cookies),
RTE_CACHE_LINE_SIZE);
+ if (qp->op_cookies == NULL) {
+ PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie");
+ rte_free(qp);
+ return -ENOMEM;
+ }
qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
qp->inflights16 = 0;
@@ -221,7 +226,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
for (i = 0; i < qp->nb_descriptors; i++) {
if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) {
PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie");
- return -EFAULT;
+ goto create_err;
}
struct qat_crypto_op_cookie *sql_cookie =
@@ -246,6 +251,9 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
return 0;
create_err:
+ if (qp->op_cookie_pool)
+ rte_mempool_free(qp->op_cookie_pool);
+ rte_free(qp->op_cookies);
rte_free(qp);
return -EFAULT;
}
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index df8634ad..822ce27c 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -467,8 +467,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
RTE_CRYPTODEV_NAME_MAX_LEN);
return -EINVAL;
}
- strncpy(sched_ctx->name, scheduler->name,
- RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN);
+ snprintf(sched_ctx->name, sizeof(sched_ctx->name), "%s",
+ scheduler->name);
if (strlen(scheduler->description) >
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) {
@@ -477,8 +477,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1);
return -EINVAL;
}
- strncpy(sched_ctx->description, scheduler->description,
- RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN);
+ snprintf(sched_ctx->description, sizeof(sched_ctx->description), "%s",
+ scheduler->description);
/* load scheduler instance operations functions */
sched_ctx->ops.config_queue_pair = scheduler->ops->config_queue_pair;