summaryrefslogtreecommitdiffstats
path: root/drivers/crypto/qat
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:42:05 +0100
committerLuca Boccassi <luca.boccassi@gmail.com>2017-08-16 18:46:04 +0100
commitf239aed5e674965691846e8ce3f187dd47523689 (patch)
treea153a3125c6e183c73871a8ecaa4b285fed5fbd5 /drivers/crypto/qat
parentbf7567fd2a5b0b28ab724046143c24561d38d015 (diff)
New upstream version 17.08
Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h31
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c23
-rw-r--r--drivers/crypto/qat/qat_crypto.c629
-rw-r--r--drivers/crypto/qat/qat_crypto.h47
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h102
-rw-r--r--drivers/crypto/qat/qat_qp.c15
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c55
7 files changed, 586 insertions, 316 deletions
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 5c63406b..2c8e03c0 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -51,6 +51,7 @@
#include "icp_qat_hw.h"
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
+#include "../qat_crypto.h"
/*
* Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR
@@ -127,15 +128,17 @@ struct qat_session {
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } cipher_iv;
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } auth_iv;
+ uint16_t digest_length;
rte_spinlock_t lock; /* protects this struct */
-};
-
-struct qat_alg_ablkcipher_cd {
- struct icp_qat_hw_cipher_algo_blk *cd;
- phys_addr_t cd_paddr;
- struct icp_qat_fw_la_bulk_req fw_req;
- struct qat_crypto_instance *inst;
- rte_spinlock_t lock; /* protects this struct */
+ enum qat_device_gen min_qat_dev_gen;
};
int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg);
@@ -147,21 +150,13 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation);
void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
enum qat_crypto_proto_flag proto_flags);
-void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
-void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd,
- int alg, const uint8_t *key,
- unsigned int keylen);
-
int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg);
int qat_alg_validate_aes_docsisbpi_key(int key_len,
enum icp_qat_hw_cipher_algo *alg);
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 154e1ddd..2d16c9e2 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -17,7 +17,7 @@
* qat-linux@intel.com
*
* BSD LICENSE
- * Copyright(c) 2015-2016 Intel Corporation.
+ * Copyright(c) 2015-2017 Intel Corporation.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -121,6 +121,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_NULL:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -603,6 +606,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
cipher_cd_ctrl->cipher_state_sz =
ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
} else {
total_key_size = cipherkeylen;
cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3;
@@ -661,7 +665,7 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc,
int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
uint8_t *authkey,
uint32_t authkeylen,
- uint32_t add_auth_data_length,
+ uint32_t aad_length,
uint32_t digestsize,
unsigned int operation)
{
@@ -810,13 +814,14 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
* in big-endian format. This field is 8 bytes
*/
auth_param->u2.aad_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16);
+ RTE_ALIGN_CEIL(aad_length, 16);
auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3;
aad_len = (uint32_t *)(cdesc->cd_cur_ptr +
ICP_QAT_HW_GALOIS_128_STATE1_SZ +
ICP_QAT_HW_GALOIS_H_SZ);
- *aad_len = rte_bswap32(add_auth_data_length);
+ *aad_len = rte_bswap32(aad_length);
+ cdesc->aad_len = aad_length;
break;
case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2:
qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G;
@@ -837,8 +842,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ);
cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) +
authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3;
break;
case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3:
hash->auth_config.config =
@@ -854,8 +858,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
cdesc->cd_cur_ptr += state1_size + state2_size
+ ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ;
- auth_param->hash_state_sz =
- RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3;
+ auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3;
+ cdesc->min_qat_dev_gen = QAT_GEN2;
break;
case ICP_QAT_HW_AUTH_ALGO_MD5:
@@ -868,6 +872,9 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
state2_size = ICP_QAT_HW_MD5_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_NULL:
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_NULL);
+ state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 386aa453..1f52cabf 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,6 +60,7 @@
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
+#include <rte_cryptodev_pci.h>
#include <openssl/evp.h>
#include "qat_logs.h"
@@ -170,16 +171,19 @@ cipher_decrypt_err:
/** Creates a context in either AES or DES in ECB mode
* Depends on openssl libcrypto
*/
-static void *
+static int
bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
enum rte_crypto_cipher_operation direction __rte_unused,
- uint8_t *key)
+ uint8_t *key, void **ctx)
{
const EVP_CIPHER *algo = NULL;
- EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new();
+ int ret;
+ *ctx = EVP_CIPHER_CTX_new();
- if (ctx == NULL)
+ if (*ctx == NULL) {
+ ret = -ENOMEM;
goto ctx_init_err;
+ }
if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI)
algo = EVP_des_ecb();
@@ -187,15 +191,17 @@ bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo,
algo = EVP_aes_128_ecb();
/* IV will be ECB encrypted whether direction is encrypt or decrypt*/
- if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1)
+ if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) {
+ ret = -EINVAL;
goto ctx_init_err;
+ }
- return ctx;
+ return 0;
ctx_init_err:
- if (ctx != NULL)
- EVP_CIPHER_CTX_free(ctx);
- return NULL;
+ if (*ctx != NULL)
+ EVP_CIPHER_CTX_free(*ctx);
+ return ret;
}
/** Frees a context previously created
@@ -213,25 +219,25 @@ adf_modulo(uint32_t data, uint32_t shift);
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie);
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp);
-void qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
- void *session)
+void
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
{
- struct qat_session *sess = session;
- phys_addr_t cd_paddr;
-
PMD_INIT_FUNC_TRACE();
- if (sess) {
- if (sess->bpi_ctx) {
- bpi_cipher_ctx_free(sess->bpi_ctx);
- sess->bpi_ctx = NULL;
- }
- cd_paddr = sess->cd_paddr;
- memset(sess, 0, qat_crypto_sym_get_session_private_size(dev));
- sess->cd_paddr = cd_paddr;
- } else
- PMD_DRV_LOG(ERR, "NULL session");
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ struct qat_session *s = (struct qat_session *)sess_priv;
+
+ if (sess_priv) {
+ if (s->bpi_ctx)
+ bpi_cipher_ctx_free(s->bpi_ctx);
+ memset(s, 0, qat_crypto_sym_get_session_private_size(dev));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
}
static int
@@ -245,6 +251,14 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
return ICP_QAT_FW_LA_CMD_AUTH;
+ /* AEAD */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ }
+
if (xform->next == NULL)
return -1;
@@ -286,38 +300,37 @@ qat_get_cipher_xform(struct rte_crypto_sym_xform *xform)
return NULL;
}
-void *
+
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private)
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
{
- struct qat_session *session = session_private;
struct qat_pmd_private *internals = dev->data->dev_private;
struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ int ret;
/* Get cipher xform from crypto xform chain */
cipher_xform = qat_get_cipher_xform(xform);
+ session->cipher_iv.offset = cipher_xform->iv.offset;
+ session->cipher_iv.length = cipher_xform->iv.length;
+
switch (cipher_xform->algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
- case RTE_CRYPTO_CIPHER_AES_GCM:
- if (qat_alg_validate_aes_key(cipher_xform->key.length,
- &session->qat_cipher_alg) != 0) {
- PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
- goto error_out;
- }
- session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
- break;
case RTE_CRYPTO_CIPHER_AES_CTR:
if (qat_alg_validate_aes_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
@@ -326,6 +339,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_snow3g_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
@@ -337,6 +351,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_kasumi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE;
@@ -345,6 +360,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -353,6 +369,7 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -361,38 +378,43 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_validate_3des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
break;
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create DES BPI ctx");
goto error_out;
}
if (qat_alg_validate_des_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid DES cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
- session->bpi_ctx = bpi_cipher_ctx_init(
+ ret = bpi_cipher_ctx_init(
cipher_xform->algo,
cipher_xform->op,
- cipher_xform->key.data);
- if (session->bpi_ctx == NULL) {
+ cipher_xform->key.data,
+ &session->bpi_ctx);
+ if (ret != 0) {
PMD_DRV_LOG(ERR, "failed to create AES BPI ctx");
goto error_out;
}
if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE;
@@ -403,27 +425,30 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_cipher_algorithm_strings
[cipher_xform->algo]);
+ ret = -ENOTSUP;
goto error_out;
}
if (qat_alg_validate_zuc_key(cipher_xform->key.length,
&session->qat_cipher_alg) != 0) {
PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size");
+ ret = -EINVAL;
goto error_out;
}
session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE;
break;
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
- case RTE_CRYPTO_CIPHER_AES_CCM:
case RTE_CRYPTO_CIPHER_AES_F8:
case RTE_CRYPTO_CIPHER_AES_XTS:
case RTE_CRYPTO_CIPHER_ARC4:
PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u",
cipher_xform->algo);
+ ret = -ENOTSUP;
goto error_out;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n",
cipher_xform->algo);
+ ret = -EINVAL;
goto error_out;
}
@@ -434,50 +459,119 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
if (qat_alg_aead_session_create_content_desc_cipher(session,
cipher_xform->key.data,
- cipher_xform->key.length))
+ cipher_xform->key.length)) {
+ ret = -EINVAL;
goto error_out;
+ }
- return session;
+ return 0;
error_out:
if (session->bpi_ctx) {
bpi_cipher_ctx_free(session->bpi_ctx);
session->bpi_ctx = NULL;
}
- return NULL;
+ return ret;
}
-
-void *
+int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private)
{
struct qat_session *session = session_private;
+ int ret;
int qat_cmd_id;
PMD_INIT_FUNC_TRACE();
+ /* Set context descriptor physical address */
+ session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ offsetof(struct qat_session, cd);
+
+ session->min_qat_dev_gen = QAT_GEN1;
+
/* Get requested QAT command id */
qat_cmd_id = qat_get_cmd_id(xform);
if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) {
PMD_DRV_LOG(ERR, "Unsupported xform chain requested");
- goto error_out;
+ return -ENOTSUP;
}
session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id;
switch (session->qat_cmd) {
case ICP_QAT_FW_LA_CMD_CIPHER:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_AUTH:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ ret = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (ret < 0)
+ return ret;
break;
case ICP_QAT_FW_LA_CMD_CIPHER_HASH:
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_HASH_CIPHER:
- session = qat_crypto_sym_configure_session_auth(dev, xform, session);
- session = qat_crypto_sym_configure_session_cipher(dev, xform, session);
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret = qat_crypto_sym_configure_session_aead(xform,
+ session);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = qat_crypto_sym_configure_session_auth(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ ret = qat_crypto_sym_configure_session_cipher(dev,
+ xform, session);
+ if (ret < 0)
+ return ret;
+ }
break;
case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM:
case ICP_QAT_FW_LA_CMD_TRNG_TEST:
@@ -490,30 +584,26 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
case ICP_QAT_FW_LA_CMD_DELIMITER:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Unsupported Service %u",
session->qat_cmd);
- goto error_out;
+ return -ENOTSUP;
}
- return session;
-
-error_out:
- return NULL;
+ return 0;
}
-struct qat_session *
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private)
+ struct qat_session *session)
{
-
- struct qat_session *session = session_private;
struct rte_crypto_auth_xform *auth_xform = NULL;
- struct rte_crypto_cipher_xform *cipher_xform = NULL;
struct qat_pmd_private *internals = dev->data->dev_private;
auth_xform = qat_get_auth_xform(xform);
+ uint8_t *key_data = auth_xform->key.data;
+ uint8_t key_length = auth_xform->key.length;
switch (auth_xform->algo) {
case RTE_CRYPTO_AUTH_SHA1_HMAC:
@@ -534,11 +624,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC;
break;
- case RTE_CRYPTO_AUTH_AES_GCM:
- session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
- break;
case RTE_CRYPTO_AUTH_AES_GMAC:
+ if (qat_alg_validate_aes_key(auth_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+
break;
case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2;
@@ -557,7 +651,7 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
PMD_DRV_LOG(ERR, "%s not supported on this device",
rte_crypto_auth_algorithm_strings
[auth_xform->algo]);
- goto error_out;
+ return -ENOTSUP;
}
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3;
break;
@@ -567,42 +661,149 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_SHA224:
case RTE_CRYPTO_AUTH_SHA384:
case RTE_CRYPTO_AUTH_MD5:
- case RTE_CRYPTO_AUTH_AES_CCM:
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u",
auth_xform->algo);
- goto error_out;
+ return -ENOTSUP;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified",
auth_xform->algo);
- goto error_out;
+ return -EINVAL;
}
- cipher_xform = qat_get_cipher_xform(xform);
- if ((session->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) ||
- (session->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) {
+ session->auth_iv.offset = auth_xform->iv.offset;
+ session->auth_iv.length = auth_xform->iv.length;
+
+ if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) {
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+ } else {
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ key_data,
+ key_length,
+ 0,
+ auth_xform->digest_length,
+ auth_xform->op))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ auth_xform->key.data,
+ auth_xform->key.length))
+ return -EINVAL;
+ }
+ /* Restore to authentication only only */
+ session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH;
+ } else {
if (qat_alg_aead_session_create_content_desc_auth(session,
- cipher_xform->key.data,
- cipher_xform->key.length,
- auth_xform->add_auth_data_length,
+ key_data,
+ key_length,
+ 0,
auth_xform->digest_length,
auth_xform->op))
- goto error_out;
+ return -EINVAL;
+ }
+
+ session->digest_length = auth_xform->digest_length;
+ return 0;
+}
+
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session)
+{
+ struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+
+ /*
+ * Store AEAD IV parameters as cipher IV,
+ * to avoid unnecessary memory usage
+ */
+ session->cipher_iv.offset = xform->aead.iv.offset;
+ session->cipher_iv.length = xform->aead.iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
+ aead_xform->algo);
+ return -ENOTSUP;
+ default:
+ PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
+ aead_xform->algo);
+ return -EINVAL;
+ }
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
+ /*
+ * It needs to create cipher desc content first,
+ * then authentication
+ */
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_auth(session,
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_GENERATE))
+ return -EINVAL;
} else {
+ session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
+ /*
+ * It needs to create authentication desc content first,
+ * then cipher
+ */
if (qat_alg_aead_session_create_content_desc_auth(session,
- auth_xform->key.data,
- auth_xform->key.length,
- auth_xform->add_auth_data_length,
- auth_xform->digest_length,
- auth_xform->op))
- goto error_out;
+ aead_xform->key.data,
+ aead_xform->key.length,
+ aead_xform->aad_length,
+ aead_xform->digest_length,
+ RTE_CRYPTO_AUTH_OP_VERIFY))
+ return -EINVAL;
+
+ if (qat_alg_aead_session_create_content_desc_cipher(session,
+ aead_xform->key.data,
+ aead_xform->key.length))
+ return -EINVAL;
}
- return session;
-error_out:
- return NULL;
+ session->digest_length = aead_xform->digest_length;
+ return 0;
}
unsigned qat_crypto_sym_get_session_private_size(
@@ -617,7 +818,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) {
@@ -641,7 +843,8 @@ qat_bpicipher_preprocess(struct qat_session *ctx,
iv = last_block - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "BPI: src before pre-process:", last_block,
@@ -670,7 +873,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
{
uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg);
struct rte_crypto_sym_op *sym_op = op->sym;
- uint8_t last_block_len = sym_op->cipher.data.length % block_len;
+ uint8_t last_block_len = block_len > 0 ?
+ sym_op->cipher.data.length % block_len : 0;
if (last_block_len > 0 &&
ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) {
@@ -696,7 +900,8 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
iv = dst - block_len;
else
/* runt block, i.e. less than one full block */
- iv = sym_op->cipher.iv.data;
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "BPI: src before post-process:", last_block,
@@ -752,12 +957,12 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
while (nb_ops_sent != nb_ops_possible) {
ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail,
- tmp_qp->op_cookies[tail / queue->msg_size]);
+ tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp);
if (ret != 0) {
tmp_qp->stats.enqueue_err_count++;
/*
* This message cannot be enqueued,
- * decrease number of ops that wasnt sent
+ * decrease number of ops that wasn't sent
*/
rte_atomic16_sub(&tmp_qp->inflights16,
nb_ops_possible - nb_ops_sent);
@@ -808,7 +1013,10 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
} else {
struct qat_session *sess = (struct qat_session *)
- (rx_op->sym->session->_private);
+ get_session_private_data(
+ rx_op->sym->session,
+ cryptodev_qat_driver_id);
+
if (sess->bpi_ctx)
qat_bpicipher_postprocess(sess, rx_op);
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -882,23 +1090,44 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
return 0;
}
+static inline void
+set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op,
+ struct icp_qat_fw_la_bulk_req *qat_req)
+{
+ /* copy IV into request if it fits */
+ if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) {
+ rte_memcpy(cipher_param->u.cipher_IV_array,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset),
+ iv_length);
+ } else {
+ ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_CIPH_IV_64BIT_PTR);
+ cipher_param->u.s.cipher_IV_ptr =
+ rte_crypto_op_ctophys_offset(op,
+ iv_offset);
+ }
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
- struct qat_crypto_op_cookie *qat_op_cookie)
+ struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
{
int ret = 0;
struct qat_session *ctx;
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
register struct icp_qat_fw_la_bulk_req *qat_req;
- uint8_t do_auth = 0, do_cipher = 0;
+ uint8_t do_auth = 0, do_cipher = 0, do_aead = 0;
uint32_t cipher_len = 0, cipher_ofs = 0;
uint32_t auth_len = 0, auth_ofs = 0;
uint32_t min_ofs = 0;
uint64_t src_buf_start = 0, dst_buf_start = 0;
uint8_t do_sgl = 0;
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto "
@@ -907,18 +1136,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
#endif
- if (unlikely(op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS)) {
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented"
" requests, op (%p) is sessionless.", op);
return -EINVAL;
}
- if (unlikely(op->sym->session->dev_type != RTE_CRYPTODEV_QAT_SYM_PMD)) {
+ ctx = (struct qat_session *)get_session_private_data(
+ op->sym->session, cryptodev_qat_driver_id);
+
+ if (unlikely(ctx == NULL)) {
PMD_DRV_LOG(ERR, "Session was not created for this device");
return -EINVAL;
}
- ctx = (struct qat_session *)op->sym->session->_private;
+ if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) {
+ PMD_DRV_LOG(ERR, "Session alg not supported on this device gen");
+ op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ return -EINVAL;
+ }
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -926,9 +1163,15 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
- ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- do_auth = 1;
- do_cipher = 1;
+ ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
+ /* AES-GCM */
+ if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ do_aead = 1;
+ } else {
+ do_auth = 1;
+ do_cipher = 1;
+ }
} else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) {
do_auth = 1;
do_cipher = 0;
@@ -970,26 +1213,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_ofs = op->sym->cipher.data.offset;
}
- /* copy IV into request if it fits */
- /*
- * If IV length is zero do not copy anything but still
- * use request descriptor embedded IV
- *
- */
- if (op->sym->cipher.iv.length) {
- if (op->sym->cipher.iv.length <=
- sizeof(cipher_param->u.cipher_IV_array)) {
- rte_memcpy(cipher_param->u.cipher_IV_array,
- op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- } else {
- ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_CIPH_IV_64BIT_PTR);
- cipher_param->u.s.cipher_IV_ptr =
- op->sym->cipher.iv.phys_addr;
- }
- }
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
min_ofs = cipher_ofs;
}
@@ -1009,34 +1234,70 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_ofs = op->sym->auth.data.offset >> 3;
auth_len = op->sym->auth.data.length >> 3;
- if (ctx->qat_hash_alg ==
- ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) {
- if (do_cipher) {
- auth_len = auth_len + auth_ofs + 1 -
- ICP_QAT_HW_KASUMI_BLK_SZ;
- auth_ofs = ICP_QAT_HW_KASUMI_BLK_SZ;
- } else {
- auth_len = auth_len + auth_ofs + 1;
- auth_ofs = 0;
- }
- }
+ auth_param->u1.aad_adr =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->auth_iv.offset);
} else if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- auth_ofs = op->sym->cipher.data.offset;
- auth_len = op->sym->cipher.data.length;
+ /* AES-GMAC */
+ set_cipher_iv(ctx->auth_iv.length,
+ ctx->auth_iv.offset,
+ cipher_param, op, qat_req);
+ auth_ofs = op->sym->auth.data.offset;
+ auth_len = op->sym->auth.data.length;
+
+ auth_param->u1.aad_adr = 0;
+ auth_param->u2.aad_sz = 0;
+
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->auth_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+
+ }
} else {
auth_ofs = op->sym->auth.data.offset;
auth_len = op->sym->auth.data.length;
+
}
min_ofs = auth_ofs;
auth_param->auth_res_addr = op->sym->auth.digest.phys_addr;
- auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr;
+ }
+ if (do_aead) {
+ if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
+ ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ /*
+ * If len(iv)==12B fw computes J0
+ */
+ if (ctx->cipher_iv.length == 12) {
+ ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
+ qat_req->comn_hdr.serv_specif_flags,
+ ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
+ }
+
+ }
+
+ cipher_len = op->sym->aead.data.length;
+ cipher_ofs = op->sym->aead.data.offset;
+ auth_len = op->sym->aead.data.length;
+ auth_ofs = op->sym->aead.data.offset;
+
+ auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
+ set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+ min_ofs = op->sym->aead.data.offset;
}
if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next))
@@ -1080,7 +1341,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
dst_buf_start = src_buf_start;
}
- if (do_cipher) {
+ if (do_cipher || do_aead) {
cipher_param->cipher_offset =
(uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
@@ -1089,7 +1350,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
cipher_param->cipher_offset = 0;
cipher_param->cipher_length = 0;
}
- if (do_auth) {
+
+ if (do_auth || do_aead) {
auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
@@ -1097,6 +1359,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_param->auth_off = 0;
auth_param->auth_len = 0;
}
+
qat_req->comn_mid.dst_length =
qat_req->comn_mid.src_length =
(cipher_param->cipher_offset + cipher_param->cipher_length)
@@ -1142,48 +1405,38 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
qat_req->comn_mid.dest_data_addr = dst_buf_start;
}
- if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
- if (op->sym->cipher.iv.length == 12) {
- /*
- * For GCM a 12 bit IV is allowed,
- * but we need to inform the f/w
- */
- ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(
- qat_req->comn_hdr.serv_specif_flags,
- ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
- }
- if (op->sym->cipher.data.length == 0) {
- /*
- * GMAC
- */
- qat_req->comn_mid.dest_data_addr =
- qat_req->comn_mid.src_data_addr =
- op->sym->auth.aad.phys_addr;
- qat_req->comn_mid.dst_length =
- qat_req->comn_mid.src_length =
- rte_pktmbuf_data_len(op->sym->m_src);
- cipher_param->cipher_length = 0;
- cipher_param->cipher_offset = 0;
- auth_param->u1.aad_adr = 0;
- auth_param->auth_len = op->sym->auth.aad.length;
- auth_param->auth_off = op->sym->auth.data.offset;
- auth_param->u2.aad_sz = 0;
- }
- }
-
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX
rte_hexdump(stdout, "qat_req:", qat_req,
sizeof(struct icp_qat_fw_la_bulk_req));
rte_hexdump(stdout, "src_data:",
rte_pktmbuf_mtod(op->sym->m_src, uint8_t*),
rte_pktmbuf_data_len(op->sym->m_src));
- rte_hexdump(stdout, "iv:", op->sym->cipher.iv.data,
- op->sym->cipher.iv.length);
- rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
- op->sym->auth.digest.length);
- rte_hexdump(stdout, "aad:", op->sym->auth.aad.data,
- op->sym->auth.aad.length);
+ if (do_cipher) {
+ uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->cipher_iv.offset);
+ rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr,
+ ctx->cipher_iv.length);
+ }
+
+ if (do_auth) {
+ if (ctx->auth_iv.length) {
+ uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op,
+ uint8_t *,
+ ctx->auth_iv.offset);
+ rte_hexdump(stdout, "auth iv:", auth_iv_ptr,
+ ctx->auth_iv.length);
+ }
+ rte_hexdump(stdout, "digest:", op->sym->auth.digest.data,
+ ctx->digest_length);
+ }
+
+ if (do_aead) {
+ rte_hexdump(stdout, "digest:", op->sym->aead.digest.data,
+ ctx->digest_length);
+ rte_hexdump(stdout, "aad:", op->sym->aead.aad.data,
+ ctx->aad_len);
+ }
#endif
return 0;
}
@@ -1196,17 +1449,6 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
return data - mult;
}
-void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess)
-{
- struct rte_cryptodev_sym_session *sess = sym_sess;
- struct qat_session *s = (void *)sess->_private;
-
- PMD_INIT_FUNC_TRACE();
- s->cd_paddr = rte_mempool_virt2phy(mp, sess) +
- offsetof(struct qat_session, cd) +
- offsetof(struct rte_cryptodev_sym_session, _private);
-}
-
int qat_dev_config(__rte_unused struct rte_cryptodev *dev,
__rte_unused struct rte_cryptodev_config *config)
{
@@ -1240,8 +1482,8 @@ int qat_dev_close(struct rte_cryptodev *dev)
return 0;
}
-void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
- struct rte_cryptodev_info *info)
+void qat_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
{
struct qat_pmd_private *internals = dev->data->dev_private;
@@ -1253,7 +1495,8 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev,
info->feature_flags = dev->feature_flags;
info->capabilities = internals->qat_dev_capabilities;
info->sym.max_nb_sessions = internals->max_nb_sessions;
- info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ info->driver_id = cryptodev_qat_driver_id;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
}
}
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index b740d6b0..3f35a00e 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -39,6 +39,9 @@
#include "qat_crypto_capabilities.h"
+#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat
+/**< Intel QAT Symmetric Crypto PMD device name */
+
/*
* This macro rounds up a number to a be a multiple of
* the alignment when the alignment is a power of 2
@@ -47,6 +50,13 @@
(((num) + (align) - 1) & ~((align) - 1))
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+struct qat_session;
+
+enum qat_device_gen {
+ QAT_GEN1 = 1,
+ QAT_GEN2,
+};
+
/**
* Structure associated with each queue.
*/
@@ -74,6 +84,7 @@ struct qat_qp {
struct rte_mempool *op_cookie_pool;
void **op_cookies;
uint32_t nb_descriptors;
+ enum qat_device_gen qat_dev_gen;
} __rte_cache_aligned;
/** private data structure for each QAT device */
@@ -82,9 +93,13 @@ struct qat_pmd_private {
/**< Max number of queue pairs supported by device */
unsigned max_nb_sessions;
/**< Max number of sessions supported by device */
+ enum qat_device_gen qat_dev_gen;
+ /**< QAT device generation */
const struct rte_cryptodev_capabilities *qat_dev_capabilities;
};
+extern uint8_t cryptodev_qat_driver_id;
+
int qat_dev_config(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config);
int qat_dev_start(struct rte_cryptodev *dev);
@@ -98,7 +113,8 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev,
void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev);
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
- const struct rte_cryptodev_qp_conf *rx_conf, int socket_id);
+ const struct rte_cryptodev_qp_conf *rx_conf, int socket_id,
+ struct rte_mempool *session_pool);
int qat_crypto_sym_qp_release(struct rte_cryptodev *dev,
uint16_t queue_pair_id);
@@ -109,26 +125,35 @@ qat_pmd_session_mempool_create(struct rte_cryptodev *dev,
extern unsigned
qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev);
-extern void
-qat_crypto_sym_session_init(struct rte_mempool *mempool, void *priv_sess);
-
-extern void *
+extern int
qat_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool);
+
+
+int
+qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform, void *session_private);
-struct qat_session *
+int
+qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
+
+int
qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
- struct qat_session *session_private);
+ struct qat_session *session);
-void *
+int
qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev,
- struct rte_crypto_sym_xform *xform, void *session_private);
+ struct rte_crypto_sym_xform *xform,
+ struct qat_session *session);
extern void
-qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session);
-
+qat_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *session);
extern uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 1294f247..70120072 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -34,7 +34,7 @@
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
#define _QAT_CRYPTO_CAPABILITIES_H_
-#define QAT_BASE_CPM16_SYM_CAPABILITIES \
+#define QAT_BASE_GEN1_SYM_CAPABILITIES \
{ /* SHA1 HMAC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -43,16 +43,16 @@
.algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 20, \
.max = 20, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -64,16 +64,16 @@
.algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 28, \
.max = 28, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -85,16 +85,16 @@
.algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 32, \
.max = 32, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -104,18 +104,18 @@
.xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
{.auth = { \
.algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
- .block_size = 64, \
+ .block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 48, \
.max = 48, \
.increment = 0 \
- }, \
- .aad_size = { 0 } \
+ }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -127,16 +127,16 @@
.algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
.block_size = 128, \
.key_size = { \
- .min = 128, \
+ .min = 1, \
.max = 128, \
- .increment = 0 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 64, \
.max = 64, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -148,16 +148,16 @@
.algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
.block_size = 64, \
.key_size = { \
- .min = 8, \
+ .min = 1, \
.max = 64, \
- .increment = 8 \
+ .increment = 1 \
}, \
.digest_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -178,16 +178,17 @@
.max = 16, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .aad_size = { 0 }, \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
- { /* AES GCM (AUTH) */ \
+ { /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
- {.auth = { \
- .algo = RTE_CRYPTO_AUTH_AES_GCM, \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
.block_size = 16, \
.key_size = { \
.min = 16, \
@@ -203,7 +204,12 @@
.min = 0, \
.max = 240, \
.increment = 1 \
- } \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
+ }, \
}, } \
}, } \
}, \
@@ -224,10 +230,10 @@
.max = 16, \
.increment = 4 \
}, \
- .aad_size = { \
- .min = 1, \
- .max = 65535, \
- .increment = 1 \
+ .iv_size = { \
+ .min = 12, \
+ .max = 12, \
+ .increment = 0 \
} \
}, } \
}, } \
@@ -249,7 +255,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
@@ -257,26 +263,6 @@
}, } \
}, } \
}, \
- { /* AES GCM (CIPHER) */ \
- .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
- {.sym = { \
- .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
- {.cipher = { \
- .algo = RTE_CRYPTO_CIPHER_AES_GCM, \
- .block_size = 16, \
- .key_size = { \
- .min = 16, \
- .max = 32, \
- .increment = 8 \
- }, \
- .iv_size = { \
- .min = 12, \
- .max = 12, \
- .increment = 0 \
- } \
- }, } \
- }, } \
- }, \
{ /* AES CBC */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -374,7 +360,7 @@
.max = 0, \
.increment = 0 \
}, \
- .aad_size = { 0 } \
+ .iv_size = { 0 } \
}, }, \
}, }, \
}, \
@@ -435,11 +421,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
- .min = 8, \
- .max = 8, \
- .increment = 0 \
- } \
+ .iv_size = { 0 } \
}, } \
}, } \
}, \
@@ -524,7 +506,7 @@
}, } \
}
-#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \
+#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \
{ /* ZUC (EEA3) */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
@@ -562,7 +544,7 @@
.max = 4, \
.increment = 0 \
}, \
- .aad_size = { \
+ .iv_size = { \
.min = 16, \
.max = 16, \
.increment = 0 \
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index a358ccd7..5048d214 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -36,6 +36,7 @@
#include <rte_malloc.h>
#include <rte_memzone.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
@@ -133,7 +134,7 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
const struct rte_cryptodev_qp_conf *qp_conf,
- int socket_id)
+ int socket_id, struct rte_mempool *session_pool __rte_unused)
{
struct qat_qp *qp;
struct rte_pci_device *pci_dev;
@@ -205,7 +206,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
adf_configure_queues(qp);
adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr);
snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu",
- dev->driver->pci_drv.driver.name, dev->data->dev_id,
+ pci_dev->driver->driver.name, dev->data->dev_id,
queue_pair_id);
qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name);
@@ -242,6 +243,11 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_dst);
}
+
+ struct qat_pmd_private *internals
+ = dev->data->dev_private;
+ qp->qat_dev_gen = internals->qat_dev_gen;
+
dev->data->queue_pairs[queue_pair_id] = qp;
return 0;
@@ -355,11 +361,13 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
return -EINVAL;
}
+ pci_dev = RTE_DEV_TO_PCI(dev->device);
+
/*
* Allocate a memzone for the queue - create a unique name.
*/
snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d",
- dev->driver->pci_drv.driver.name, "qp_mem", dev->data->dev_id,
+ pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id,
queue->hw_bundle_number, queue->hw_queue_number);
qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes,
socket_id);
@@ -408,7 +416,6 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr,
queue->queue_size);
- pci_dev = RTE_DEV_TO_PCI(dev->device);
io_addr = pci_dev->mem_resource[0].addr;
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 1bdd30da..7d56fca4 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -35,18 +35,21 @@
#include <rte_dev.h>
#include <rte_malloc.h>
#include <rte_cryptodev_pmd.h>
+#include <rte_cryptodev_pci.h>
#include "qat_crypto.h"
#include "qat_logs.h"
-static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
+uint8_t cryptodev_qat_driver_id;
+
+static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
-static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = {
- QAT_BASE_CPM16_SYM_CAPABILITIES,
- QAT_EXTRA_CPM17_SYM_CAPABILITIES,
+static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = {
+ QAT_BASE_GEN1_SYM_CAPABILITIES,
+ QAT_EXTRA_GEN2_SYM_CAPABILITIES,
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -70,7 +73,6 @@ static struct rte_cryptodev_ops crypto_qat_ops = {
/* Crypto related operations */
.session_get_size = qat_crypto_sym_get_session_private_size,
.session_configure = qat_crypto_sym_configure_session,
- .session_initialize = qat_crypto_sym_session_init,
.session_clear = qat_crypto_sym_clear_session
};
@@ -95,8 +97,7 @@ static const struct rte_pci_id pci_id_qat_map[] = {
};
static int
-crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_drv,
- struct rte_cryptodev *cryptodev)
+crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
{
struct qat_pmd_private *internals;
@@ -106,7 +107,7 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
- cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD;
+ cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst;
@@ -121,12 +122,14 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
case 0x0443:
- internals->qat_dev_capabilities = qat_cpm16_capabilities;
+ internals->qat_dev_gen = QAT_GEN1;
+ internals->qat_dev_capabilities = qat_gen1_capabilities;
break;
case 0x37c9:
case 0x19e3:
case 0x6f55:
- internals->qat_dev_capabilities = qat_cpm17_capabilities;
+ internals->qat_dev_gen = QAT_GEN2;
+ internals->qat_dev_capabilities = qat_gen2_capabilities;
break;
default:
PMD_DRV_LOG(ERR,
@@ -147,17 +150,25 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_
return 0;
}
-static struct rte_cryptodev_driver rte_qat_pmd = {
- .pci_drv = {
- .id_table = pci_id_qat_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
- .probe = rte_cryptodev_pci_probe,
- .remove = rte_cryptodev_pci_remove,
- },
- .cryptodev_init = crypto_qat_dev_init,
- .dev_private_size = sizeof(struct qat_pmd_private),
+static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_probe(pci_dev,
+ sizeof(struct qat_pmd_private), crypto_qat_dev_init);
+}
+
+static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_cryptodev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_qat_pmd = {
+ .id_table = pci_id_qat_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = crypto_qat_pci_probe,
+ .remove = crypto_qat_pci_remove
};
-RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-
+RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_qat_pmd, cryptodev_qat_driver_id);