diff options
author | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 14:51:32 +0200 |
---|---|---|
committer | Christian Ehrhardt <christian.ehrhardt@canonical.com> | 2017-05-16 16:20:45 +0200 |
commit | 7595afa4d30097c1177b69257118d8ad89a539be (patch) | |
tree | 4bfeadc905c977e45e54a90c42330553b8942e4e /drivers/crypto/aesni_gcm | |
parent | ce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff) |
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6
Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/crypto/aesni_gcm')
-rw-r--r-- | drivers/crypto/aesni_gcm/Makefile | 15 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 95 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 403 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 54 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h | 15 |
5 files changed, 264 insertions, 318 deletions
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index 5898cae1..59a7c6a9 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -31,9 +31,6 @@ include $(RTE_SDK)/mk/rte.vars.mk ifneq ($(MAKECMDGOALS),clean) -ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) -$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") -endif endif # library name @@ -50,10 +47,7 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_gcm_version.map # external library dependencies -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include -LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB -LDLIBS += -lcrypto +LDLIBS += -lisal_crypto # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c @@ -62,11 +56,4 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c # export include files SYMLINK-y-include += -# library dependencies -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mempool -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_ring -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev - include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h index c399068c..e9de6546 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -37,91 +37,26 @@ #define LINUX #endif -#include <gcm_defines.h> -#include <aux_funcs.h> +#include <isa-l_crypto/aes_gcm.h> -/** Supported vector modes */ -enum aesni_gcm_vector_mode { - RTE_AESNI_GCM_NOT_SUPPORTED = 0, - RTE_AESNI_GCM_SSE, - RTE_AESNI_GCM_AVX, - RTE_AESNI_GCM_AVX2 -}; - -typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys); +typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data, + uint8_t *iv, + uint8_t const *aad, + uint64_t aad_len); -typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in, - u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len, - u8 *auth_tag, u64 auth_tag_len); +typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data, + uint8_t *out, + const uint8_t *in, + uint64_t plaintext_len); -typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey); +typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data, + uint8_t *auth_tag, + uint64_t auth_tag_len); -/** GCM library function pointer table */ struct aesni_gcm_ops { - struct { - struct { - aes_keyexp_128_enc_t aes128_enc; - /**< AES128 enc key expansion */ - } keyexp; - /**< Key expansion functions */ - } aux; /**< Auxiliary functions */ - - struct { - aesni_gcm_t enc; /**< GCM encode function pointer */ - aesni_gcm_t dec; /**< GCM decode function pointer */ - aesni_gcm_precomp_t precomp; /**< GCM pre-compute */ - } gcm; /**< GCM functions */ + aesni_gcm_init_t init; + aesni_gcm_update_t update; + aesni_gcm_finalize_t finalize; }; - -static const struct aesni_gcm_ops gcm_ops[] = { - [RTE_AESNI_GCM_NOT_SUPPORTED] = { - .aux = { - .keyexp = { - NULL - } - }, - .gcm = { - NULL - } - }, - [RTE_AESNI_GCM_SSE] = { - .aux = { - .keyexp = { - aes_keyexp_128_enc_sse - } - }, - .gcm = { - aesni_gcm_enc_sse, - aesni_gcm_dec_sse, - aesni_gcm_precomp_sse - } - }, - [RTE_AESNI_GCM_AVX] = { - .aux = { - .keyexp = { - aes_keyexp_128_enc_avx, - } - }, - .gcm = { - aesni_gcm_enc_avx_gen2, - aesni_gcm_dec_avx_gen2, - aesni_gcm_precomp_avx_gen2 - } - }, - [RTE_AESNI_GCM_AVX2] = { - .aux = { - .keyexp = { - aes_keyexp_128_enc_avx2, - } - }, - .gcm = { - aesni_gcm_enc_avx_gen4, - aesni_gcm_dec_avx_gen4, - aesni_gcm_precomp_avx_gen4 - } - } -}; - - #endif /* _AESNI_GCM_OPS_H_ */ diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index af3d60f0..101ef98b 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -30,8 +30,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <openssl/aes.h> - #include <rte_common.h> #include <rte_config.h> #include <rte_hexdump.h> @@ -44,133 +42,96 @@ #include "aesni_gcm_pmd_private.h" -/** - * Global static parameter used to create a unique name for each AES-NI multi - * buffer crypto device. - */ -static unsigned unique_name_id; - -static inline int -create_unique_device_name(char *name, size_t size) -{ - int ret; - - if (name == NULL) - return -EINVAL; - - ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), - unique_name_id++); - if (ret < 0) - return ret; - return 0; -} - -static int -aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length, - uint8_t *aeskey, unsigned aeskey_length) -{ - uint8_t key[aeskey_length] __rte_aligned(16); - AES_KEY enc_key; - - if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0) - return -EFAULT; - - memcpy(key, aeskey, aeskey_length); - - if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0) - return -EFAULT; - - AES_encrypt(hsubkey, hsubkey, &enc_key); - - return 0; -} - -/** Get xform chain order */ -static int -aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform) -{ - /* - * GCM only supports authenticated encryption or authenticated - * decryption, all other options are invalid, so we must have exactly - * 2 xform structs chained together - */ - if (xform->next == NULL || xform->next->next != NULL) - return -1; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { - return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; - } - - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { - return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; - } +/** GCM encode functions pointer table */ +static const struct aesni_gcm_ops aesni_gcm_enc[] = { + [AESNI_GCM_KEY_128] = { + aesni_gcm128_init, + aesni_gcm128_enc_update, + aesni_gcm128_enc_finalize + }, + [AESNI_GCM_KEY_256] = { + aesni_gcm256_init, + aesni_gcm256_enc_update, + aesni_gcm256_enc_finalize + } +}; - return -1; -} +/** GCM decode functions pointer table */ +static const struct aesni_gcm_ops aesni_gcm_dec[] = { + [AESNI_GCM_KEY_128] = { + aesni_gcm128_init, + aesni_gcm128_dec_update, + aesni_gcm128_dec_finalize + }, + [AESNI_GCM_KEY_256] = { + aesni_gcm256_init, + aesni_gcm256_dec_update, + aesni_gcm256_dec_finalize + } +}; /** Parse crypto xform chain and set private session parameters */ int -aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, - struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform) { - const struct rte_crypto_sym_xform *auth_xform = NULL; - const struct rte_crypto_sym_xform *cipher_xform = NULL; - - uint8_t hsubkey[16] __rte_aligned(16) = { 0 }; + const struct rte_crypto_sym_xform *auth_xform; + const struct rte_crypto_sym_xform *cipher_xform; - /* Select Crypto operation - hash then cipher / cipher then hash */ - switch (aesni_gcm_get_mode(xform)) { - case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION: - sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + if (xform->next == NULL || xform->next->next != NULL) { + GCM_LOG_ERR("Two and only two chained xform required"); + return -EINVAL; + } - cipher_xform = xform; + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = xform->next; - break; - case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION: - sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; - + cipher_xform = xform; + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { auth_xform = xform; cipher_xform = xform->next; - break; - default: - GCM_LOG_ERR("Unsupported operation chain order parameter"); + } else { + GCM_LOG_ERR("Cipher and auth xform required"); return -EINVAL; } - /* We only support AES GCM */ - if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM && - auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM) + if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM && + (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM || + auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) { + GCM_LOG_ERR("We only support AES GCM and AES GMAC"); return -EINVAL; + } - /* Select cipher direction */ - if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION && - cipher_xform->cipher.op != - RTE_CRYPTO_CIPHER_OP_ENCRYPT) { - GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation " - "(DECRYPT) specified are an invalid selection"); - return -EINVAL; - } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION && - cipher_xform->cipher.op != - RTE_CRYPTO_CIPHER_OP_DECRYPT) { - GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation " - "(ENCRYPT) specified are an invalid selection"); + /* Select Crypto operation */ + if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && + auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && + auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) + sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + else { + GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or" + " Decrypt/Verify are valid only"); return -EINVAL; } - /* Expand GCM AES128 key */ - (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data, - sess->gdata.expanded_keys); + /* Check key length and calculate GCM pre-compute. */ + switch (cipher_xform->cipher.key.length) { + case 16: + aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata); + sess->key = AESNI_GCM_KEY_128; - /* Calculate hash sub key here */ - aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey), - cipher_xform->cipher.key.data, - cipher_xform->cipher.key.length); + break; + case 32: + aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata); + sess->key = AESNI_GCM_KEY_256; - /* Calculate GCM pre-compute */ - (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey); + break; + default: + GCM_LOG_ERR("Unsupported cipher key length"); + return -EINVAL; + } return 0; } @@ -194,10 +155,10 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) return sess; sess = (struct aesni_gcm_session *) - ((struct rte_cryptodev_session *)_sess)->_private; + ((struct rte_cryptodev_sym_session *)_sess)->_private; - if (unlikely(aesni_gcm_set_session_parameters(qp->ops, - sess, op->xform) != 0)) { + if (unlikely(aesni_gcm_set_session_parameters(sess, + op->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); sess = NULL; } @@ -217,19 +178,45 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) * */ static int -process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op, +process_gcm_crypto_op(struct rte_crypto_sym_op *op, struct aesni_gcm_session *session) { uint8_t *src, *dst; - struct rte_mbuf *m = op->m_src; + struct rte_mbuf *m_src = op->m_src; + uint32_t offset = op->cipher.data.offset; + uint32_t part_len, total_len, data_len; + + RTE_ASSERT(m_src != NULL); + + while (offset >= m_src->data_len) { + offset -= m_src->data_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + } + + data_len = m_src->data_len - offset; + part_len = (data_len < op->cipher.data.length) ? data_len : + op->cipher.data.length; + + /* Destination buffer is required when segmented source buffer */ + RTE_ASSERT((part_len == op->cipher.data.length) || + ((part_len != op->cipher.data.length) && + (op->m_dst != NULL))); + /* Segmented destination buffer is not supported */ + RTE_ASSERT((op->m_dst == NULL) || + ((op->m_dst != NULL) && + rte_pktmbuf_is_contiguous(op->m_dst))); + - src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset; dst = op->m_dst ? rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, op->cipher.data.offset) : - rte_pktmbuf_mtod_offset(m, uint8_t *, + rte_pktmbuf_mtod_offset(op->m_src, uint8_t *, op->cipher.data.offset); + src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); + /* sanity checks */ if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 && op->cipher.iv.length != 0) { @@ -246,48 +233,81 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op, *iv_padd = rte_bswap32(1); } - if (op->auth.aad.length != 12 && op->auth.aad.length != 8 && - op->auth.aad.length != 0) { - GCM_LOG_ERR("iv"); - return -1; - } - if (op->auth.digest.length != 16 && op->auth.digest.length != 12 && - op->auth.digest.length != 8 && - op->auth.digest.length != 0) { - GCM_LOG_ERR("iv"); + op->auth.digest.length != 8) { + GCM_LOG_ERR("digest"); return -1; } if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { - (*qp->ops->gcm.enc)(&session->gdata, dst, src, - (uint64_t)op->cipher.data.length, + aesni_gcm_enc[session->key].init(&session->gdata, op->cipher.iv.data, op->auth.aad.data, - (uint64_t)op->auth.aad.length, + (uint64_t)op->auth.aad.length); + + aesni_gcm_enc[session->key].update(&session->gdata, dst, src, + (uint64_t)part_len); + total_len = op->cipher.data.length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + aesni_gcm_enc[session->key].update(&session->gdata, + dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + aesni_gcm_enc[session->key].finalize(&session->gdata, op->auth.digest.data, (uint64_t)op->auth.digest.length); - } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { - uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m, + } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */ + uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ? + op->m_dst : op->m_src, op->auth.digest.length); if (!auth_tag) { - GCM_LOG_ERR("iv"); + GCM_LOG_ERR("auth_tag"); return -1; } - (*qp->ops->gcm.dec)(&session->gdata, dst, src, - (uint64_t)op->cipher.data.length, + aesni_gcm_dec[session->key].init(&session->gdata, op->cipher.iv.data, op->auth.aad.data, - (uint64_t)op->auth.aad.length, + (uint64_t)op->auth.aad.length); + + aesni_gcm_dec[session->key].update(&session->gdata, dst, src, + (uint64_t)part_len); + total_len = op->cipher.data.length - part_len; + + while (total_len) { + dst += part_len; + m_src = m_src->next; + + RTE_ASSERT(m_src != NULL); + + src = rte_pktmbuf_mtod(m_src, uint8_t *); + part_len = (m_src->data_len < total_len) ? + m_src->data_len : total_len; + + aesni_gcm_dec[session->key].update(&session->gdata, + dst, src, + (uint64_t)part_len); + total_len -= part_len; + } + + aesni_gcm_dec[session->key].finalize(&session->gdata, auth_tag, (uint64_t)op->auth.digest.length); - } else { - GCM_LOG_ERR("iv"); - return -1; } return 0; @@ -355,67 +375,73 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } - - rte_ring_enqueue(qp->processed_pkts, (void *)op); } static uint16_t -aesni_gcm_pmd_enqueue_burst(void *queue_pair, +aesni_gcm_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) { struct aesni_gcm_session *sess; struct aesni_gcm_qp *qp = queue_pair; - int i, retval = 0; + int retval = 0; + unsigned int i, nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); - for (i = 0; i < nb_ops; i++) { + for (i = 0; i < nb_dequeued; i++) { sess = aesni_gcm_get_session(qp, ops[i]->sym); if (unlikely(sess == NULL)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - qp->qp_stats.enqueue_err_count++; + qp->qp_stats.dequeue_err_count++; break; } - retval = process_gcm_crypto_op(qp, ops[i]->sym, sess); + retval = process_gcm_crypto_op(ops[i]->sym, sess); if (retval < 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - qp->qp_stats.enqueue_err_count++; + qp->qp_stats.dequeue_err_count++; break; } handle_completed_gcm_crypto_op(qp, ops[i]); - - qp->qp_stats.enqueued_count++; } + + qp->qp_stats.dequeued_count += i; + return i; } static uint16_t -aesni_gcm_pmd_dequeue_burst(void *queue_pair, +aesni_gcm_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) { struct aesni_gcm_qp *qp = queue_pair; - unsigned nb_dequeued; + unsigned int nb_enqueued; - nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, - (void **)ops, nb_ops); - qp->qp_stats.dequeued_count += nb_dequeued; + nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts, + (void **)ops, nb_ops, NULL); + qp->qp_stats.enqueued_count += nb_enqueued; - return nb_dequeued; + return nb_enqueued; } -static int aesni_gcm_remove(const char *name); +static int aesni_gcm_remove(struct rte_vdev_device *vdev); static int aesni_gcm_create(const char *name, + struct rte_vdev_device *vdev, struct rte_crypto_vdev_init_params *init_params) { struct rte_cryptodev *dev; - char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; struct aesni_gcm_private *internals; - enum aesni_gcm_vector_mode vector_mode; + + if (init_params->name[0] == '\0') + snprintf(init_params->name, sizeof(init_params->name), + "%s", name); /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { @@ -423,27 +449,7 @@ aesni_gcm_create(const char *name, return -EFAULT; } - /* Check CPU for supported vector instruction set */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) - vector_mode = RTE_AESNI_GCM_AVX2; - else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) - vector_mode = RTE_AESNI_GCM_AVX; - else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) - vector_mode = RTE_AESNI_GCM_SSE; - else { - GCM_LOG_ERR("Vector instructions are not supported by CPU"); - return -EFAULT; - } - - /* create a unique device name */ - if (create_unique_device_name(crypto_dev_name, - RTE_CRYPTODEV_NAME_MAX_LEN) != 0) { - GCM_LOG_ERR("failed to create unique cryptodev name"); - return -EINVAL; - } - - - dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name, + dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name, sizeof(struct aesni_gcm_private), init_params->socket_id); if (dev == NULL) { GCM_LOG_ERR("failed to create cryptodev vdev"); @@ -459,63 +465,60 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_CPU_AESNI; + RTE_CRYPTODEV_FF_CPU_AESNI | + RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; - switch (vector_mode) { - case RTE_AESNI_GCM_SSE: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; - break; - case RTE_AESNI_GCM_AVX: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; - break; - case RTE_AESNI_GCM_AVX2: - dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; - break; - default: - break; - } - - /* Set vector instructions mode supported */ internals = dev->data->dev_private; - internals->vector_mode = vector_mode; - internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - GCM_LOG_ERR("driver %s: create failed", name); + GCM_LOG_ERR("driver %s: create failed", init_params->name); - aesni_gcm_remove(crypto_dev_name); + aesni_gcm_remove(vdev); return -EFAULT; } static int -aesni_gcm_probe(const char *name, const char *input_args) +aesni_gcm_probe(struct rte_vdev_device *vdev) { struct rte_crypto_vdev_init_params init_params = { RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, - rte_socket_id() + rte_socket_id(), + {0} }; + const char *name; + const char *input_args; + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + input_args = rte_vdev_device_args(vdev); rte_cryptodev_parse_vdev_init_params(&init_params, input_args); RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, init_params.socket_id); + if (init_params.name[0] != '\0') + RTE_LOG(INFO, PMD, " User defined name = %s\n", + init_params.name); RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", init_params.max_nb_queue_pairs); RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", init_params.max_nb_sessions); - return aesni_gcm_create(name, &init_params); + return aesni_gcm_create(name, vdev, &init_params); } static int -aesni_gcm_remove(const char *name) +aesni_gcm_remove(struct rte_vdev_device *vdev) { + const char *name; + + name = rte_vdev_device_name(vdev); if (name == NULL) return -EINVAL; diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index c51f82a8..1fc047be 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -39,17 +39,17 @@ #include "aesni_gcm_pmd_private.h" static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { - { /* AES GCM (AUTH) */ + { /* AES GMAC (AUTH) */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GCM, + .algo = RTE_CRYPTO_AUTH_AES_GMAC, .block_size = 16, .key_size = { .min = 16, - .max = 16, - .increment = 0 + .max = 32, + .increment = 16 }, .digest_size = { .min = 8, @@ -57,9 +57,34 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { .increment = 4 }, .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 + } + }, } + }, } + }, + { /* AES GCM (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 16 + }, + .digest_size = { .min = 8, - .max = 12, + .max = 16, .increment = 4 + }, + .aad_size = { + .min = 0, + .max = 65535, + .increment = 1 } }, } }, } @@ -73,8 +98,8 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { .block_size = 16, .key_size = { .min = 16, - .max = 16, - .increment = 0 + .max = 32, + .increment = 16 }, .iv_size = { .min = 12, @@ -89,7 +114,8 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { /** Configure device */ static int -aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev) +aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) { return 0; } @@ -199,7 +225,7 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, r = rte_ring_lookup(qp->name); if (r) { - if (r->prod.size >= ring_size) { + if (rte_ring_get_size(r) >= ring_size) { GCM_LOG_INFO("Reusing existing ring %s for processed" " packets", qp->name); return r; @@ -221,7 +247,6 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int socket_id) { struct aesni_gcm_qp *qp = NULL; - struct aesni_gcm_private *internals = dev->data->dev_private; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) @@ -239,8 +264,6 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; - qp->ops = &gcm_ops[internals->vector_mode]; - qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) @@ -291,18 +314,15 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) /** Configure a aesni gcm session from a crypto xform chain */ static void * -aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev, +aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, void *sess) { - struct aesni_gcm_private *internals = dev->data->dev_private; - if (unlikely(sess == NULL)) { GCM_LOG_ERR("invalid session struct"); return NULL; } - if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode], - sess, xform) != 0) { + if (aesni_gcm_set_session_parameters(sess, xform) != 0) { GCM_LOG_ERR("failed configure session parameters"); return NULL; } diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index 9878d6e4..0496b447 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -58,8 +58,6 @@ /** private data structure for each virtual AESNI GCM device */ struct aesni_gcm_private { - enum aesni_gcm_vector_mode vector_mode; - /**< Vector mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ unsigned max_nb_sessions; @@ -71,8 +69,6 @@ struct aesni_gcm_qp { /**< Queue Pair Identifier */ char name[RTE_CRYPTODEV_NAME_LEN]; /**< Unique Queue Pair Name */ - const struct aesni_gcm_ops *ops; - /**< Architecture dependent function pointer table of the gcm APIs */ struct rte_ring *processed_pkts; /**< Ring for placing process packets */ struct rte_mempool *sess_mp; @@ -87,10 +83,17 @@ enum aesni_gcm_operation { AESNI_GCM_OP_AUTHENTICATED_DECRYPTION }; +enum aesni_gcm_key { + AESNI_GCM_KEY_128, + AESNI_GCM_KEY_256 +}; + /** AESNI GCM private session structure */ struct aesni_gcm_session { enum aesni_gcm_operation op; /**< GCM operation type */ + enum aesni_gcm_key key; + /**< GCM key type */ struct gcm_data gdata __rte_cache_aligned; /**< GCM parameters */ }; @@ -98,7 +101,6 @@ struct aesni_gcm_session { /** * Setup GCM session parameters - * @param ops gcm ops function pointer table * @param sess aesni gcm session structure * @param xform crypto transform chain * @@ -107,8 +109,7 @@ struct aesni_gcm_session { * - On failure returns error code < 0 */ extern int -aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops, - struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform); |