diff options
Diffstat (limited to 'drivers/crypto/qat')
-rw-r--r-- | drivers/crypto/qat/Makefile | 7 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_adf/adf_transport_access_macros.h | 11 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_adf/qat_algs.h | 31 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 176 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_crypto.c | 968 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_crypto.h | 12 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_crypto_capabilities.h | 574 | ||||
-rw-r--r-- | drivers/crypto/qat/qat_qp.c | 69 | ||||
-rw-r--r-- | drivers/crypto/qat/rte_qat_cryptodev.c | 39 |
9 files changed, 1356 insertions, 531 deletions
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index 20a70d4a..7322ffe4 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -56,11 +56,4 @@ SYMLINK-y-include += # versioning export map EXPORT_MAP := rte_pmd_qat_version.map -# library dependencies -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mempool -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev - - include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h index 47f1c91a..d218f85d 100644 --- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h +++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h @@ -47,14 +47,15 @@ #ifndef ADF_TRANSPORT_ACCESS_MACROS_H #define ADF_TRANSPORT_ACCESS_MACROS_H +#include <rte_io.h> + /* CSR write macro */ -#define ADF_CSR_WR(csrAddr, csrOffset, val) \ - (void)((*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset)) \ - = (val))) +#define ADF_CSR_WR(csrAddr, csrOffset, val) \ + rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) /* CSR read macro */ -#define ADF_CSR_RD(csrAddr, csrOffset) \ - (*((volatile uint32_t *)(((uint8_t *)csrAddr) + csrOffset))) +#define ADF_CSR_RD(csrAddr, csrOffset) \ + rte_read32((((uint8_t *)csrAddr) + csrOffset)) #define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL #define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h index dcc0df59..5c63406b 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ b/drivers/crypto/qat/qat_adf/qat_algs.h @@ -47,6 +47,7 @@ #ifndef _ICP_QAT_ALGS_H_ #define _ICP_QAT_ALGS_H_ #include <rte_memory.h> +#include <rte_crypto.h> #include "icp_qat_hw.h" #include "icp_qat_fw.h" #include "icp_qat_fw_la.h" @@ -79,13 +80,33 @@ struct qat_alg_buf { uint64_t addr; } __rte_packed; +enum qat_crypto_proto_flag { + QAT_CRYPTO_PROTO_FLAG_NONE = 0, + QAT_CRYPTO_PROTO_FLAG_CCM = 1, + QAT_CRYPTO_PROTO_FLAG_GCM = 2, + QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3, + QAT_CRYPTO_PROTO_FLAG_ZUC = 4 +}; + +/* + * Maximum number of SGL entries + */ +#define QAT_SGL_MAX_NUMBER 16 + struct qat_alg_buf_list { uint64_t resrvd; uint32_t num_bufs; uint32_t num_mapped_bufs; - struct qat_alg_buf bufers[]; + struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER]; } __rte_packed __rte_cache_aligned; +struct qat_crypto_op_cookie { + struct qat_alg_buf_list qat_sgl_list_src; + struct qat_alg_buf_list qat_sgl_list_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; +}; + /* Common content descriptor */ struct qat_alg_cd { struct icp_qat_hw_cipher_algo_blk cipher; @@ -99,6 +120,7 @@ struct qat_session { enum icp_qat_hw_cipher_mode qat_mode; enum icp_qat_hw_auth_algo qat_hash_alg; enum icp_qat_hw_auth_op auth_op; + void *bpi_ctx; struct qat_alg_cd cd; uint8_t *cd_cur_ptr; phys_addr_t cd_paddr; @@ -130,7 +152,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, unsigned int operation); void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - uint16_t proto); + enum qat_crypto_proto_flag proto_flags); void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_cd *cd, int alg, const uint8_t *key, @@ -141,7 +163,12 @@ void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_cd *cd, unsigned int keylen); int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int qat_alg_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg); int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg); +int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg); #endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index 8900668d..154e1ddd 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -54,12 +54,31 @@ #include <rte_crypto_sym.h> #include "../qat_logs.h" -#include "qat_algs.h" #include <openssl/sha.h> /* Needed to calculate pre-compute values */ #include <openssl/aes.h> /* Needed to calculate pre-compute values */ #include <openssl/md5.h> /* Needed to calculate pre-compute values */ +#include "qat_algs.h" + +/* returns block size in bytes per cipher algo */ +int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) +{ + switch (qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_DES: + return ICP_QAT_HW_DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_3DES: + return ICP_QAT_HW_3DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_AES128: + case ICP_QAT_HW_CIPHER_ALGO_AES192: + case ICP_QAT_HW_CIPHER_ALGO_AES256: + return ICP_QAT_HW_AES_BLK_SZ; + default: + PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); + return -EFAULT; + }; + return -EFAULT; +} /* * Returns size in bytes per hash algo for state1 size field in cd_ctrl @@ -90,6 +109,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, QAT_HW_DEFAULT_ALIGNMENT); @@ -422,7 +444,7 @@ static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, } void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - uint16_t proto) + enum qat_crypto_proto_flag proto_flags) { PMD_INIT_FUNC_TRACE(); header->hdr_flags = @@ -435,10 +457,58 @@ void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, ICP_QAT_FW_LA_PARTIAL_NONE); ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, ICP_QAT_FW_CIPH_IV_16BYTE_DATA); - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - proto); + + switch (proto_flags) { + case QAT_CRYPTO_PROTO_FLAG_NONE: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_CCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_GCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_GCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_SNOW3G: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_ZUC: + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + } + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, ICP_QAT_FW_LA_NO_UPDATE_STATE); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); +} + +/* + * Snow3G and ZUC should never use this function + * and set its protocol flag in both cipher and auth part of content + * descriptor building function + */ +static enum qat_crypto_proto_flag +qat_get_crypto_proto_flag(uint16_t flags) +{ + int proto = ICP_QAT_FW_LA_PROTO_GET(flags); + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + switch (proto) { + case ICP_QAT_FW_LA_GCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + break; + case ICP_QAT_FW_LA_CCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + break; + } + + return qat_proto_flag; } int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, @@ -453,8 +523,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; enum icp_qat_hw_cipher_convert key_convert; + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; uint32_t total_key_size; - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */ uint16_t cipher_offset, cd_size; uint32_t wordIndex = 0; uint32_t *temp_key = NULL; @@ -494,7 +565,9 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, */ cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; @@ -506,7 +579,8 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; @@ -515,21 +589,34 @@ int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { total_key_size = ICP_QAT_HW_3DES_KEY_SZ; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; - proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { + total_key_size = ICP_QAT_HW_DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; } else { total_key_size = cipherkeylen; cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - proto = ICP_QAT_FW_LA_PROTO_GET(header->serv_specif_flags); + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); } cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; header->service_cmd_id = cdesc->qat_cmd; - qat_alg_init_common_hdr(header, proto); + qat_alg_init_common_hdr(header, qat_proto_flag); cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; - cipher->cipher_config.val = ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, cdesc->qat_cipher_alg, key_convert, @@ -590,12 +677,13 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, (struct icp_qat_fw_la_auth_req_params *) ((char *)&req_tmpl->serv_specif_rqpars + sizeof(struct icp_qat_fw_la_cipher_req_params)); - uint16_t proto = ICP_QAT_FW_LA_NO_PROTO; /* no CCM/GCM/SNOW 3G */ uint16_t state1_size = 0, state2_size = 0; uint16_t hash_offset, cd_size; uint32_t *aad_len = NULL; uint32_t wordIndex = 0; uint32_t *pTempKey; + enum qat_crypto_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; PMD_INIT_FUNC_TRACE(); @@ -645,7 +733,8 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, cdesc->qat_hash_alg, digestsize); if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) hash->auth_counter.counter = 0; else hash->auth_counter.counter = rte_bswap32( @@ -708,7 +797,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, break; case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - proto = ICP_QAT_FW_LA_GCM_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; if (qat_alg_do_precomputes(cdesc->qat_hash_alg, authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, @@ -730,7 +819,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, *aad_len = rte_bswap32(add_auth_data_length); break; case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; state1_size = qat_hash_get_state1_size( ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; @@ -751,6 +840,24 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, auth_param->hash_state_sz = RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3; break; + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, + cdesc->qat_hash_alg, digestsize); + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); + state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + cdesc->cd_cur_ptr += state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + auth_param->hash_state_sz = + RTE_ALIGN_CEIL(add_auth_data_length, 16) >> 3; + + break; case ICP_QAT_HW_AUTH_ALGO_MD5: if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, authkey, authkeylen, cdesc->cd_cur_ptr, @@ -788,7 +895,7 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, } /* Request template setup */ - qat_alg_init_common_hdr(header, proto); + qat_alg_init_common_hdr(header, qat_proto_flag); header->service_cmd_id = cdesc->qat_cmd; /* Auth CD config setup */ @@ -832,6 +939,19 @@ int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) return 0; } +int qat_alg_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + default: + return -EINVAL; + } + return 0; +} + int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) { switch (key_len) { @@ -856,6 +976,18 @@ int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) return 0; } +int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_DES_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_DES; + break; + default: + return -EINVAL; + } + return 0; +} + int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) { switch (key_len) { @@ -868,3 +1000,15 @@ int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) } return 0; } + +int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index a4119fcd..386aa453 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -59,6 +59,8 @@ #include <rte_string_fns.h> #include <rte_spinlock.h> #include <rte_hexdump.h> +#include <rte_crypto_sym.h> +#include <openssl/evp.h> #include "qat_logs.h" #include "qat_algs.h" @@ -67,443 +69,151 @@ #define BYTE_LENGTH 8 -static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { - { /* SHA1 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA224 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 28, - .max = 28, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA256 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, - .block_size = 64, - .key_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA384 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 64, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* SHA512 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, - .block_size = 128, - .key_size = { - .min = 128, - .max = 128, - .increment = 0 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* MD5 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, - .block_size = 64, - .key_size = { - .min = 8, - .max = 64, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES XCBC MAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { 0 } - }, } - }, } - }, - { /* AES GCM (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 8, - .max = 12, - .increment = 4 - } - }, } - }, } - }, - { /* AES GMAC (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GMAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 8, - .max = 16, - .increment = 4 - }, - .aad_size = { - .min = 1, - .max = 65535, - .increment = 1 - } - }, } - }, } - }, - { /* SNOW 3G (UIA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES GCM (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 12, - .max = 12, - .increment = 0 - } - }, } - }, } - }, - { /* AES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CBC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* SNOW 3G (UEA2) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, - .block_size = 16, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CTR, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* NULL (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .aad_size = { 0 } - }, }, - }, }, - }, - { /* NULL (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_NULL, - .block_size = 1, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .iv_size = { - .min = 0, - .max = 0, - .increment = 0 - } - }, }, - }, } - }, - { /* KASUMI (F8) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* KASUMI (F9) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_KASUMI_F9, - .block_size = 8, - .key_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .digest_size = { - .min = 4, - .max = 4, - .increment = 0 - }, - .aad_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, - .block_size = 8, - .key_size = { - .min = 16, - .max = 24, - .increment = 8 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, - .block_size = 8, - .key_size = { - .min = 16, - .max = 24, - .increment = 8 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; +static int +qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (capability->sym.cipher.algo == algo) + return 1; + } + return 0; +} + +static int +qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, + struct qat_pmd_private *internals) { + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (capability->sym.auth.algo == algo) + return 1; + } + return 0; +} + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_encrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +/** Decrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[16]; + int i; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (i = 0; i < srclen; i++) + *(dst+i) = *(src+i)^(encrypted_iv[i]); + + return 0; + +cipher_decrypt_err: + PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt for BPI IV failed"); + return -EINVAL; +} + +/** Creates a context in either AES or DES in ECB mode + * Depends on openssl libcrypto + */ +static void * +bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, + enum rte_crypto_cipher_operation direction __rte_unused, + uint8_t *key) +{ + const EVP_CIPHER *algo = NULL; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + + if (ctx == NULL) + goto ctx_init_err; + + if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) + algo = EVP_des_ecb(); + else + algo = EVP_aes_128_ecb(); + + /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ + if (EVP_EncryptInit_ex(ctx, algo, NULL, key, 0) != 1) + goto ctx_init_err; + + return ctx; + +ctx_init_err: + if (ctx != NULL) + EVP_CIPHER_CTX_free(ctx); + return NULL; +} + +/** Frees a context previously created + * Depends on openssl libcrypto + */ +static void +bpi_cipher_ctx_free(void *bpi_ctx) +{ + if (bpi_ctx != NULL) + EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); +} static inline uint32_t adf_modulo(uint32_t data, uint32_t shift); static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg); +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie); void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session) @@ -512,7 +222,11 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, phys_addr_t cd_paddr; PMD_INIT_FUNC_TRACE(); - if (session) { + if (sess) { + if (sess->bpi_ctx) { + bpi_cipher_ctx_free(sess->bpi_ctx); + sess->bpi_ctx = NULL; + } cd_paddr = sess->cd_paddr; memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); sess->cd_paddr = cd_paddr; @@ -576,10 +290,8 @@ void * qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; - + struct qat_pmd_private *internals = dev->data->dev_private; struct rte_crypto_cipher_xform *cipher_xform = NULL; /* Get cipher xform from crypto xform chain */ @@ -637,6 +349,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; break; + case RTE_CRYPTO_CIPHER_DES_CBC: + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; case RTE_CRYPTO_CIPHER_3DES_CTR: if (qat_alg_validate_3des_key(cipher_xform->key.length, &session->qat_cipher_alg) != 0) { @@ -645,13 +365,59 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; break; + case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create DES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: + session->bpi_ctx = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data); + if (session->bpi_ctx == NULL) { + PMD_DRV_LOG(ERR, "failed to create AES BPI ctx"); + goto error_out; + } + if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + if (!qat_is_cipher_alg_supported( + cipher_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_cipher_algorithm_strings + [cipher_xform->algo]); + goto error_out; + } + if (qat_alg_validate_zuc_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_AES_CCM: case RTE_CRYPTO_CIPHER_AES_F8: case RTE_CRYPTO_CIPHER_AES_XTS: case RTE_CRYPTO_CIPHER_ARC4: - case RTE_CRYPTO_CIPHER_ZUC_EEA3: PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", cipher_xform->algo); goto error_out; @@ -674,7 +440,10 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, return session; error_out: - rte_mempool_put(internals->sess_mp, session); + if (session->bpi_ctx) { + bpi_cipher_ctx_free(session->bpi_ctx); + session->bpi_ctx = NULL; + } return NULL; } @@ -683,12 +452,9 @@ void * qat_crypto_sym_configure_session(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; - struct qat_session *session = session_private; int qat_cmd_id; - PMD_INIT_FUNC_TRACE(); /* Get requested QAT command id */ @@ -730,10 +496,10 @@ qat_crypto_sym_configure_session(struct rte_cryptodev *dev, session->qat_cmd); goto error_out; } + return session; error_out: - rte_mempool_put(internals->sess_mp, session); return NULL; } @@ -743,10 +509,10 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, struct qat_session *session_private) { - struct qat_pmd_private *internals = dev->data->dev_private; struct qat_session *session = session_private; struct rte_crypto_auth_xform *auth_xform = NULL; struct rte_crypto_cipher_xform *cipher_xform = NULL; + struct qat_pmd_private *internals = dev->data->dev_private; auth_xform = qat_get_auth_xform(xform); switch (auth_xform->algo) { @@ -786,6 +552,15 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { + PMD_DRV_LOG(ERR, "%s not supported on this device", + rte_crypto_auth_algorithm_strings + [auth_xform->algo]); + goto error_out; + } + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; + break; case RTE_CRYPTO_AUTH_SHA1: case RTE_CRYPTO_AUTH_SHA256: case RTE_CRYPTO_AUTH_SHA512: @@ -795,7 +570,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_CCM: case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: - case RTE_CRYPTO_AUTH_ZUC_EIA3: PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", auth_xform->algo); goto error_out; @@ -828,8 +602,6 @@ qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, return session; error_out: - if (internals->sess_mp != NULL) - rte_mempool_put(internals->sess_mp, session); return NULL; } @@ -839,6 +611,112 @@ unsigned qat_crypto_sym_get_session_private_size( return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); } +static inline uint32_t +qat_bpicipher_preprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = sym_op->cipher.data.length % block_len; + + if (last_block_len && + ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = sym_op->cipher.iv.data; + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src before pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before pre-process:", dst, + last_block_len); +#endif + bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX + rte_hexdump(stdout, "BPI: src after pre-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after pre-process:", dst, + last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_session *ctx, + struct rte_crypto_op *op) +{ + uint8_t block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = sym_op->cipher.data.length % block_len; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = sym_op->cipher.iv.data; + +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src before post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX + rte_hexdump(stdout, "BPI: src after post-process:", last_block, + last_block_len); + if (sym_op->m_dst != NULL) + rte_hexdump(stdout, "BPI: dst after post-process:", dst, + last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} uint16_t qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, @@ -873,9 +751,16 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, } while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail); + ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, + tmp_qp->op_cookies[tail / queue->msg_size]); if (ret != 0) { tmp_qp->stats.enqueue_err_count++; + /* + * This message cannot be enqueued, + * decrease number of ops that wasnt sent + */ + rte_atomic16_sub(&tmp_qp->inflights16, + nb_ops_possible - nb_ops_sent); if (nb_ops_sent == 0) return 0; goto kick_tail; @@ -914,15 +799,21 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); + sizeof(struct icp_qat_fw_comn_resp)); + #endif if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( resp_msg->comn_hdr.comn_status)) { rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; } else { + struct qat_session *sess = (struct qat_session *) + (rx_op->sym->session->_private); + if (sess->bpi_ctx) + qat_bpicipher_postprocess(sess, rx_op); rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } + *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG; queue->head = adf_modulo(queue->head + queue->msg_size, @@ -945,8 +836,57 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, } static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) +qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, + struct qat_alg_buf_list *list, uint32_t data_len) { + int nr = 1; + + uint32_t buf_len = rte_pktmbuf_mtophys(buf) - + buff_start + rte_pktmbuf_data_len(buf); + + list->bufers[0].addr = buff_start; + list->bufers[0].resrvd = 0; + list->bufers[0].len = buf_len; + + if (data_len <= buf_len) { + list->num_bufs = nr; + list->bufers[0].len = data_len; + return 0; + } + + buf = buf->next; + while (buf) { + if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { + PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" + " entry(%u)", + QAT_SGL_MAX_NUMBER); + return -EINVAL; + } + + list->bufers[nr].len = rte_pktmbuf_data_len(buf); + list->bufers[nr].resrvd = 0; + list->bufers[nr].addr = rte_pktmbuf_mtophys(buf); + + buf_len += list->bufers[nr].len; + buf = buf->next; + + if (buf_len > data_len) { + list->bufers[nr].len -= + buf_len - data_len; + buf = NULL; + } + ++nr; + } + list->num_bufs = nr; + + return 0; +} + +static inline int +qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, + struct qat_crypto_op_cookie *qat_op_cookie) +{ + int ret = 0; struct qat_session *ctx; struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_auth_req_params *auth_param; @@ -955,8 +895,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) uint32_t cipher_len = 0, cipher_ofs = 0; uint32_t auth_len = 0, auth_ofs = 0; uint32_t min_ofs = 0; - uint32_t digest_appended = 1; uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX @@ -1001,37 +941,54 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) if (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { if (unlikely( (cipher_param->cipher_length % BYTE_LENGTH != 0) || (cipher_param->cipher_offset % BYTE_LENGTH != 0))) { PMD_DRV_LOG(ERR, - "SNOW3G/KASUMI in QAT PMD only supports byte aligned values"); + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; } cipher_len = op->sym->cipher.data.length >> 3; cipher_ofs = op->sym->cipher.data.offset >> 3; + } else if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; } else { cipher_len = op->sym->cipher.data.length; cipher_ofs = op->sym->cipher.data.offset; } /* copy IV into request if it fits */ - if (op->sym->cipher.iv.length && (op->sym->cipher.iv.length <= - sizeof(cipher_param->u.cipher_IV_array))) { - rte_memcpy(cipher_param->u.cipher_IV_array, - op->sym->cipher.iv.data, - op->sym->cipher.iv.length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = - op->sym->cipher.iv.phys_addr; + /* + * If IV length is zero do not copy anything but still + * use request descriptor embedded IV + * + */ + if (op->sym->cipher.iv.length) { + if (op->sym->cipher.iv.length <= + sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + op->sym->cipher.iv.data, + op->sym->cipher.iv.length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + op->sym->cipher.iv.phys_addr; + } } min_ofs = cipher_ofs; } @@ -1039,11 +996,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) if (do_auth) { if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9) { + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) || (auth_param->auth_len % BYTE_LENGTH != 0))) { PMD_DRV_LOG(ERR, - "For SNOW3G/KASUMI, QAT PMD only supports byte aligned values"); + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return -EINVAL; } @@ -1062,29 +1021,34 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) } } + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + auth_ofs = op->sym->cipher.data.offset; + auth_len = op->sym->cipher.data.length; } else { auth_ofs = op->sym->auth.data.offset; auth_len = op->sym->auth.data.length; } min_ofs = auth_ofs; - if (op->sym->auth.digest.phys_addr) { - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); - auth_param->auth_res_addr = - op->sym->auth.digest.phys_addr; - digest_appended = 0; - } + auth_param->auth_res_addr = op->sym->auth.digest.phys_addr; auth_param->u1.aad_adr = op->sym->auth.aad.phys_addr; } + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + /* adjust for chain case */ if (do_cipher && do_auth) min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + if (unlikely(op->sym->m_dst != NULL)) { /* Out-of-place operation (OOP) * Don't align DMA start. DMA the minimum data-set @@ -1094,6 +1058,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs); dst_buf_start = rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs); + } else { /* In-place operation * Start DMA at nearest aligned address below min_ofs @@ -1139,16 +1104,43 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) (cipher_param->cipher_offset + cipher_param->cipher_length) : (auth_param->auth_off + auth_param->auth_len); - if (do_auth && digest_appended) { - if (ctx->auth_op == ICP_QAT_HW_AUTH_GENERATE) - qat_req->comn_mid.dst_length - += op->sym->auth.digest.length; - else - qat_req->comn_mid.src_length - += op->sym->auth.digest.length; + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, + &qat_op_cookie->qat_sgl_list_src, + qat_req->comn_mid.src_length); + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + if (likely(op->sym->m_dst == NULL)) + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + dst_buf_start, + &qat_op_cookie->qat_sgl_list_dst, + qat_req->comn_mid.dst_length); + + if (ret) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot " + "fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + qat_op_cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + qat_op_cookie->qat_sgl_dst_phys_addr; + } + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; } - qat_req->comn_mid.src_data_addr = src_buf_start; - qat_req->comn_mid.dest_data_addr = dst_buf_start; if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { @@ -1180,7 +1172,6 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg) } } - #ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX rte_hexdump(stdout, "qat_req:", qat_req, sizeof(struct icp_qat_fw_la_bulk_req)); @@ -1216,10 +1207,11 @@ void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) offsetof(struct rte_cryptodev_sym_session, _private); } -int qat_dev_config(__rte_unused struct rte_cryptodev *dev) +int qat_dev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { PMD_INIT_FUNC_TRACE(); - return -ENOTSUP; + return 0; } int qat_dev_start(__rte_unused struct rte_cryptodev *dev) @@ -1259,7 +1251,7 @@ void qat_dev_info_get(__rte_unused struct rte_cryptodev *dev, ADF_NUM_SYM_QPS_PER_BUNDLE * ADF_NUM_BUNDLES_PER_DEV; info->feature_flags = dev->feature_flags; - info->capabilities = qat_pmd_capabilities; + info->capabilities = internals->qat_dev_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; info->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; } @@ -1283,9 +1275,9 @@ void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, } stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.enqueued_count; + stats->dequeued_count += qp[i]->stats.dequeued_count; stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.enqueue_err_count; + stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; } } diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h index 6b844881..b740d6b0 100644 --- a/drivers/crypto/qat/qat_crypto.h +++ b/drivers/crypto/qat/qat_crypto.h @@ -37,6 +37,8 @@ #include <rte_cryptodev_pmd.h> #include <rte_memzone.h> +#include "qat_crypto_capabilities.h" + /* * This macro rounds up a number to a be a multiple of * the alignment when the alignment is a power of 2 @@ -69,20 +71,22 @@ struct qat_qp { struct qat_queue tx_q; struct qat_queue rx_q; struct rte_cryptodev_stats stats; + struct rte_mempool *op_cookie_pool; + void **op_cookies; + uint32_t nb_descriptors; } __rte_cache_aligned; /** private data structure for each QAT device */ struct qat_pmd_private { - char sess_mp_name[RTE_MEMPOOL_NAMESIZE]; - struct rte_mempool *sess_mp; - unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ unsigned max_nb_sessions; /**< Max number of sessions supported by device */ + const struct rte_cryptodev_capabilities *qat_dev_capabilities; }; -int qat_dev_config(struct rte_cryptodev *dev); +int qat_dev_config(struct rte_cryptodev *dev, + struct rte_cryptodev_config *config); int qat_dev_start(struct rte_cryptodev *dev); void qat_dev_stop(struct rte_cryptodev *dev); int qat_dev_close(struct rte_cryptodev *dev); diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h new file mode 100644 index 00000000..1294f247 --- /dev/null +++ b/drivers/crypto/qat/qat_crypto_capabilities.h @@ -0,0 +1,574 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2017 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _QAT_CRYPTO_CAPABILITIES_H_ +#define _QAT_CRYPTO_CAPABILITIES_H_ + +#define QAT_BASE_CPM16_SYM_CAPABILITIES \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 20, \ + .max = 20, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 128, \ + .max = 128, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 128, \ + .max = 128, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* MD5 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 8, \ + .max = 64, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES XCBC MAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES GCM (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 240, \ + .increment = 1 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES GMAC (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_GMAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .aad_size = { \ + .min = 1, \ + .max = 65535, \ + .increment = 1 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UIA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .aad_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES GCM (CIPHER) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 12, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES DOCSIS BPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UEA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* NULL (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, }, \ + }, }, \ + }, \ + { /* NULL (CIPHER) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + } \ + }, }, \ + }, } \ + }, \ + { /* KASUMI (F8) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* KASUMI (F9) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .aad_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES DOCSISBPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#define QAT_EXTRA_CPM17_SYM_CAPABILITIES \ + { /* ZUC (EEA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* ZUC (EIA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .aad_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c index 2e7188bd..a358ccd7 100644 --- a/drivers/crypto/qat/qat_qp.c +++ b/drivers/crypto/qat/qat_qp.c @@ -41,6 +41,7 @@ #include "qat_logs.h" #include "qat_crypto.h" +#include "qat_algs.h" #include "adf_transport_access_macros.h" #define ADF_MAX_SYM_DESC 4096 @@ -135,7 +136,10 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, int socket_id) { struct qat_qp *qp; + struct rte_pci_device *pci_dev; int ret; + char op_cookie_pool_name[RTE_RING_NAMESIZE]; + uint32_t i; PMD_INIT_FUNC_TRACE(); @@ -153,7 +157,9 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, return -EINVAL; } - if (dev->pci_dev->mem_resource[0].addr == NULL) { + pci_dev = RTE_DEV_TO_PCI(dev->device); + + if (pci_dev->mem_resource[0].addr == NULL) { PMD_DRV_LOG(ERR, "Could not find VF config space " "(UIO driver attached?)."); return -EINVAL; @@ -166,7 +172,6 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, queue_pair_id); return -EINVAL; } - /* Allocate the queue pair data structure. */ qp = rte_zmalloc("qat PMD qp metadata", sizeof(*qp), RTE_CACHE_LINE_SIZE); @@ -174,7 +179,12 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct"); return -ENOMEM; } - qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr; + qp->nb_descriptors = qp_conf->nb_descriptors; + qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", + qp_conf->nb_descriptors * sizeof(*qp->op_cookies), + RTE_CACHE_LINE_SIZE); + + qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; rte_atomic16_init(&qp->inflights16); if (qat_tx_queue_create(dev, &(qp->tx_q), @@ -191,8 +201,47 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, qat_queue_delete(&(qp->tx_q)); goto create_err; } + adf_configure_queues(qp); adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); + snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", + dev->driver->pci_drv.driver.name, dev->data->dev_id, + queue_pair_id); + + qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); + if (qp->op_cookie_pool == NULL) + qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, + qp->nb_descriptors, + sizeof(struct qat_crypto_op_cookie), 64, 0, + NULL, NULL, NULL, NULL, socket_id, + 0); + if (!qp->op_cookie_pool) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot create" + " op mempool"); + goto create_err; + } + + for (i = 0; i < qp->nb_descriptors; i++) { + if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { + PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); + return -EFAULT; + } + + struct qat_crypto_op_cookie *sql_cookie = + qp->op_cookies[i]; + + sql_cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2phy(qp->op_cookie_pool, + sql_cookie) + + offsetof(struct qat_crypto_op_cookie, + qat_sgl_list_src); + + sql_cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2phy(qp->op_cookie_pool, + sql_cookie) + + offsetof(struct qat_crypto_op_cookie, + qat_sgl_list_dst); + } dev->data->queue_pairs[queue_pair_id] = qp; return 0; @@ -205,6 +254,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) { struct qat_qp *qp = (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; + uint32_t i; PMD_INIT_FUNC_TRACE(); if (qp == NULL) { @@ -221,6 +271,14 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) } adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); + + for (i = 0; i < qp->nb_descriptors; i++) + rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); + + if (qp->op_cookie_pool) + rte_mempool_free(qp->op_cookie_pool); + + rte_free(qp->op_cookies); rte_free(qp); dev->data->queue_pairs[queue_pair_id] = NULL; return 0; @@ -289,6 +347,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, void *io_addr; const struct rte_memzone *qp_mz; uint32_t queue_size_bytes = nb_desc*desc_size; + struct rte_pci_device *pci_dev; PMD_INIT_FUNC_TRACE(); if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { @@ -349,7 +408,9 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, queue->queue_size); - io_addr = dev->pci_dev->mem_resource[0].addr; + pci_dev = RTE_DEV_TO_PCI(dev->device); + + io_addr = pci_dev->mem_resource[0].addr; WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, queue->hw_queue_number, queue_base); diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c index 1e7ee61c..1bdd30da 100644 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ b/drivers/crypto/qat/rte_qat_cryptodev.c @@ -39,6 +39,17 @@ #include "qat_crypto.h" #include "qat_logs.h" +static const struct rte_cryptodev_capabilities qat_cpm16_capabilities[] = { + QAT_BASE_CPM16_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_cryptodev_capabilities qat_cpm17_capabilities[] = { + QAT_BASE_CPM16_SYM_CAPABILITIES, + QAT_EXTRA_CPM17_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + static struct rte_cryptodev_ops crypto_qat_ops = { /* Device related operations */ @@ -67,7 +78,7 @@ static struct rte_cryptodev_ops crypto_qat_ops = { * The set of PCI devices this driver supports */ -static struct rte_pci_id pci_id_qat_map[] = { +static const struct rte_pci_id pci_id_qat_map[] = { { RTE_PCI_DEVICE(0x8086, 0x0443), }, @@ -77,6 +88,9 @@ static struct rte_pci_id pci_id_qat_map[] = { { RTE_PCI_DEVICE(0x8086, 0x19e3), }, + { + RTE_PCI_DEVICE(0x8086, 0x6f55), + }, {.device_id = 0}, }; @@ -88,9 +102,9 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_ PMD_INIT_FUNC_TRACE(); PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x", - cryptodev->pci_dev->addr.bus, - cryptodev->pci_dev->addr.devid, - cryptodev->pci_dev->addr.function); + RTE_DEV_TO_PCI(cryptodev->device)->addr.bus, + RTE_DEV_TO_PCI(cryptodev->device)->addr.devid, + RTE_DEV_TO_PCI(cryptodev->device)->addr.function); cryptodev->dev_type = RTE_CRYPTODEV_QAT_SYM_PMD; cryptodev->dev_ops = &crypto_qat_ops; @@ -100,10 +114,25 @@ crypto_qat_dev_init(__attribute__((unused)) struct rte_cryptodev_driver *crypto_ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; internals = cryptodev->data->dev_private; internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS; + switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) { + case 0x0443: + internals->qat_dev_capabilities = qat_cpm16_capabilities; + break; + case 0x37c9: + case 0x19e3: + case 0x6f55: + internals->qat_dev_capabilities = qat_cpm17_capabilities; + break; + default: + PMD_DRV_LOG(ERR, + "Invalid dev_id, can't determine capabilities"); + break; + } /* * For secondary processes, we don't initialise any further as primary |