From f239aed5e674965691846e8ce3f187dd47523689 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Wed, 16 Aug 2017 18:42:05 +0100 Subject: New upstream version 17.08 Change-Id: I288b50990f52646089d6b1f3aaa6ba2f091a51d7 Signed-off-by: Luca Boccassi --- drivers/crypto/aesni_gcm/Makefile | 9 +- drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 97 +++++- drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 416 ++++++++++++++--------- drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 94 ++--- drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h | 46 ++- 5 files changed, 440 insertions(+), 222 deletions(-) (limited to 'drivers/crypto/aesni_gcm') diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index 59a7c6a9..6fca5e1c 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -1,6 +1,6 @@ # BSD LICENSE # -# Copyright(c) 2016 Intel Corporation. All rights reserved. +# Copyright(c) 2016-2017 Intel Corporation. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -31,6 +31,9 @@ include $(RTE_SDK)/mk/rte.vars.mk ifneq ($(MAKECMDGOALS),clean) +ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) +$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") +endif endif # library name @@ -47,7 +50,9 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_gcm_version.map # external library dependencies -LDLIBS += -lisal_crypto +CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) +CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h index e9de6546..d6a18efc 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -37,26 +37,109 @@ #define LINUX #endif -#include +#include +#include -typedef void (*aesni_gcm_init_t)(struct gcm_data *my_ctx_data, - uint8_t *iv, +/** Supported vector modes */ +enum aesni_gcm_vector_mode { + RTE_AESNI_GCM_NOT_SUPPORTED = 0, + RTE_AESNI_GCM_SSE, + RTE_AESNI_GCM_AVX, + RTE_AESNI_GCM_AVX2, + RTE_AESNI_GCM_VECTOR_NUM +}; + +enum aesni_gcm_key { + AESNI_GCM_KEY_128, + AESNI_GCM_KEY_192, + AESNI_GCM_KEY_256, + AESNI_GCM_KEY_NUM +}; + + +typedef void (*aesni_gcm_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, uint8_t *out, + const uint8_t *in, uint64_t plaintext_len, const uint8_t *iv, + const uint8_t *aad, uint64_t aad_len, + uint8_t *auth_tag, uint64_t auth_tag_len); + +typedef void (*aesni_gcm_precomp_t)(const void *key, struct gcm_key_data *gcm_data); + +typedef void (*aesni_gcm_init_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, + const uint8_t *iv, uint8_t const *aad, uint64_t aad_len); -typedef void (*aesni_gcm_update_t)(struct gcm_data *my_ctx_data, +typedef void (*aesni_gcm_update_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, uint8_t *out, const uint8_t *in, uint64_t plaintext_len); -typedef void (*aesni_gcm_finalize_t)(struct gcm_data *my_ctx_data, +typedef void (*aesni_gcm_finalize_t)(const struct gcm_key_data *gcm_key_data, + struct gcm_context_data *gcm_ctx_data, uint8_t *auth_tag, uint64_t auth_tag_len); +/** GCM library function pointer table */ struct aesni_gcm_ops { + aesni_gcm_t enc; /**< GCM encode function pointer */ + aesni_gcm_t dec; /**< GCM decode function pointer */ + aesni_gcm_precomp_t precomp; /**< GCM pre-compute */ aesni_gcm_init_t init; - aesni_gcm_update_t update; + aesni_gcm_update_t update_enc; + aesni_gcm_update_t update_dec; aesni_gcm_finalize_t finalize; }; +#define AES_GCM_FN(keylen, arch) \ +aes_gcm_enc_##keylen##_##arch,\ +aes_gcm_dec_##keylen##_##arch,\ +aes_gcm_pre_##keylen##_##arch,\ +aes_gcm_init_##keylen##_##arch,\ +aes_gcm_enc_##keylen##_update_##arch,\ +aes_gcm_dec_##keylen##_update_##arch,\ +aes_gcm_enc_##keylen##_finalize_##arch, + +static const struct aesni_gcm_ops gcm_ops[RTE_AESNI_GCM_VECTOR_NUM][AESNI_GCM_KEY_NUM] = { + [RTE_AESNI_GCM_NOT_SUPPORTED] = { + [AESNI_GCM_KEY_128] = {NULL}, + [AESNI_GCM_KEY_192] = {NULL}, + [AESNI_GCM_KEY_256] = {NULL} + }, + [RTE_AESNI_GCM_SSE] = { + [AESNI_GCM_KEY_128] = { + AES_GCM_FN(128, sse) + }, + [AESNI_GCM_KEY_192] = { + AES_GCM_FN(192, sse) + }, + [AESNI_GCM_KEY_256] = { + AES_GCM_FN(256, sse) + } + }, + [RTE_AESNI_GCM_AVX] = { + [AESNI_GCM_KEY_128] = { + AES_GCM_FN(128, avx_gen2) + }, + [AESNI_GCM_KEY_192] = { + AES_GCM_FN(192, avx_gen2) + }, + [AESNI_GCM_KEY_256] = { + AES_GCM_FN(256, avx_gen2) + } + }, + [RTE_AESNI_GCM_AVX2] = { + [AESNI_GCM_KEY_128] = { + AES_GCM_FN(128, avx_gen4) + }, + [AESNI_GCM_KEY_192] = { + AES_GCM_FN(192, avx_gen4) + }, + [AESNI_GCM_KEY_256] = { + AES_GCM_FN(256, avx_gen4) + } + } +}; #endif /* _AESNI_GCM_OPS_H_ */ diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 101ef98b..d9c91d06 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -42,133 +43,155 @@ #include "aesni_gcm_pmd_private.h" -/** GCM encode functions pointer table */ -static const struct aesni_gcm_ops aesni_gcm_enc[] = { - [AESNI_GCM_KEY_128] = { - aesni_gcm128_init, - aesni_gcm128_enc_update, - aesni_gcm128_enc_finalize - }, - [AESNI_GCM_KEY_256] = { - aesni_gcm256_init, - aesni_gcm256_enc_update, - aesni_gcm256_enc_finalize - } -}; - -/** GCM decode functions pointer table */ -static const struct aesni_gcm_ops aesni_gcm_dec[] = { - [AESNI_GCM_KEY_128] = { - aesni_gcm128_init, - aesni_gcm128_dec_update, - aesni_gcm128_dec_finalize - }, - [AESNI_GCM_KEY_256] = { - aesni_gcm256_init, - aesni_gcm256_dec_update, - aesni_gcm256_dec_finalize - } -}; +static uint8_t cryptodev_driver_id; /** Parse crypto xform chain and set private session parameters */ int -aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, + struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform) { const struct rte_crypto_sym_xform *auth_xform; - const struct rte_crypto_sym_xform *cipher_xform; - - if (xform->next == NULL || xform->next->next != NULL) { - GCM_LOG_ERR("Two and only two chained xform required"); - return -EINVAL; - } + const struct rte_crypto_sym_xform *aead_xform; + uint16_t digest_length; + uint8_t key_length; + uint8_t *key; - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { - auth_xform = xform->next; - cipher_xform = xform; - } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + /* AES-GMAC */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = xform; - cipher_xform = xform->next; + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) { + GCM_LOG_ERR("Only AES GMAC is supported as an " + "authentication only algorithm"); + return -ENOTSUP; + } + /* Set IV parameters */ + sess->iv.offset = auth_xform->auth.iv.offset; + sess->iv.length = auth_xform->auth.iv.length; + + /* Select Crypto operation */ + if (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->op = AESNI_GMAC_OP_GENERATE; + else + sess->op = AESNI_GMAC_OP_VERIFY; + + key_length = auth_xform->auth.key.length; + key = auth_xform->auth.key.data; + digest_length = auth_xform->auth.digest_length; + + /* AES-GCM */ + } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + aead_xform = xform; + + if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) { + GCM_LOG_ERR("The only combined operation " + "supported is AES GCM"); + return -ENOTSUP; + } + + /* Set IV parameters */ + sess->iv.offset = aead_xform->aead.iv.offset; + sess->iv.length = aead_xform->aead.iv.length; + + /* Select Crypto operation */ + if (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + else + sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + + key_length = aead_xform->aead.key.length; + key = aead_xform->aead.key.data; + + sess->aad_length = aead_xform->aead.aad_length; + digest_length = aead_xform->aead.digest_length; } else { - GCM_LOG_ERR("Cipher and auth xform required"); - return -EINVAL; + GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication"); + return -ENOTSUP; } - if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM && - (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM || - auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) { - GCM_LOG_ERR("We only support AES GCM and AES GMAC"); - return -EINVAL; - } - /* Select Crypto operation */ - if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT && - auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) - sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; - else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT && - auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) - sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; - else { - GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or" - " Decrypt/Verify are valid only"); + /* IV check */ + if (sess->iv.length != 16 && sess->iv.length != 12 && + sess->iv.length != 0) { + GCM_LOG_ERR("Wrong IV length"); return -EINVAL; } /* Check key length and calculate GCM pre-compute. */ - switch (cipher_xform->cipher.key.length) { + switch (key_length) { case 16: - aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata); sess->key = AESNI_GCM_KEY_128; - + break; + case 24: + sess->key = AESNI_GCM_KEY_192; break; case 32: - aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata); sess->key = AESNI_GCM_KEY_256; - break; default: - GCM_LOG_ERR("Unsupported cipher key length"); + GCM_LOG_ERR("Invalid key length"); + return -EINVAL; + } + + gcm_ops[sess->key].precomp(key, &sess->gdata_key); + + /* Digest check */ + if (digest_length != 16 && + digest_length != 12 && + digest_length != 8) { + GCM_LOG_ERR("digest"); return -EINVAL; } + sess->digest_length = digest_length; return 0; } /** Get gcm session */ static struct aesni_gcm_session * -aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) +aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) { struct aesni_gcm_session *sess = NULL; - - if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { - if (unlikely(op->session->dev_type - != RTE_CRYPTODEV_AESNI_GCM_PMD)) - return sess; - - sess = (struct aesni_gcm_session *)op->session->_private; + struct rte_crypto_sym_op *sym_op = op->sym; + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (likely(sym_op->session != NULL)) + sess = (struct aesni_gcm_session *) + get_session_private_data( + sym_op->session, + cryptodev_driver_id); } else { void *_sess; + void *_sess_private_data = NULL; + + if (rte_mempool_get(qp->sess_mp, (void **)&_sess)) + return NULL; - if (rte_mempool_get(qp->sess_mp, &_sess)) - return sess; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + return NULL; - sess = (struct aesni_gcm_session *) - ((struct rte_cryptodev_sym_session *)_sess)->_private; + sess = (struct aesni_gcm_session *)_sess_private_data; - if (unlikely(aesni_gcm_set_session_parameters(sess, - op->xform) != 0)) { + if (unlikely(aesni_gcm_set_session_parameters(qp->ops, + sess, sym_op->xform) != 0)) { rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp, _sess_private_data); sess = NULL; } + sym_op->session = (struct rte_cryptodev_sym_session *)_sess; + set_session_private_data(sym_op->session, cryptodev_driver_id, + _sess_private_data); } + + if (unlikely(sess == NULL)) + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return sess; } /** - * Process a crypto operation and complete a JOB_AES_HMAC job structure for - * submission to the multi buffer library for processing. + * Process a crypto operation, calling + * the GCM API from the multi buffer library. * * @param qp queue pair * @param op symmetric crypto operation @@ -178,14 +201,27 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) * */ static int -process_gcm_crypto_op(struct rte_crypto_sym_op *op, +process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op, struct aesni_gcm_session *session) { uint8_t *src, *dst; - struct rte_mbuf *m_src = op->m_src; - uint32_t offset = op->cipher.data.offset; + uint8_t *iv_ptr; + struct rte_crypto_sym_op *sym_op = op->sym; + struct rte_mbuf *m_src = sym_op->m_src; + uint32_t offset, data_offset, data_length; uint32_t part_len, total_len, data_len; + if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION || + session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + offset = sym_op->aead.data.offset; + data_offset = offset; + data_length = sym_op->aead.data.length; + } else { + offset = sym_op->auth.data.offset; + data_offset = offset; + data_length = sym_op->auth.data.length; + } + RTE_ASSERT(m_src != NULL); while (offset >= m_src->data_len) { @@ -196,60 +232,50 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op, } data_len = m_src->data_len - offset; - part_len = (data_len < op->cipher.data.length) ? data_len : - op->cipher.data.length; + part_len = (data_len < data_length) ? data_len : + data_length; /* Destination buffer is required when segmented source buffer */ - RTE_ASSERT((part_len == op->cipher.data.length) || - ((part_len != op->cipher.data.length) && - (op->m_dst != NULL))); + RTE_ASSERT((part_len == data_length) || + ((part_len != data_length) && + (sym_op->m_dst != NULL))); /* Segmented destination buffer is not supported */ - RTE_ASSERT((op->m_dst == NULL) || - ((op->m_dst != NULL) && - rte_pktmbuf_is_contiguous(op->m_dst))); + RTE_ASSERT((sym_op->m_dst == NULL) || + ((sym_op->m_dst != NULL) && + rte_pktmbuf_is_contiguous(sym_op->m_dst))); - dst = op->m_dst ? - rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, - op->cipher.data.offset) : - rte_pktmbuf_mtod_offset(op->m_src, uint8_t *, - op->cipher.data.offset); + dst = sym_op->m_dst ? + rte_pktmbuf_mtod_offset(sym_op->m_dst, uint8_t *, + data_offset) : + rte_pktmbuf_mtod_offset(sym_op->m_src, uint8_t *, + data_offset); src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset); - /* sanity checks */ - if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 && - op->cipher.iv.length != 0) { - GCM_LOG_ERR("iv"); - return -1; - } - + iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, + session->iv.offset); /* * GCM working in 12B IV mode => 16B pre-counter block we need * to set BE LSB to 1, driver expects that 16B is allocated */ - if (op->cipher.iv.length == 12) { - uint32_t *iv_padd = (uint32_t *)&op->cipher.iv.data[12]; + if (session->iv.length == 12) { + uint32_t *iv_padd = (uint32_t *)&(iv_ptr[12]); *iv_padd = rte_bswap32(1); } - if (op->auth.digest.length != 16 && - op->auth.digest.length != 12 && - op->auth.digest.length != 8) { - GCM_LOG_ERR("digest"); - return -1; - } - if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { - aesni_gcm_enc[session->key].init(&session->gdata, - op->cipher.iv.data, - op->auth.aad.data, - (uint64_t)op->auth.aad.length); + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + sym_op->aead.aad.data, + (uint64_t)session->aad_length); - aesni_gcm_enc[session->key].update(&session->gdata, dst, src, + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); - total_len = op->cipher.data.length - part_len; + total_len = data_length - part_len; while (total_len) { dst += part_len; @@ -261,33 +287,36 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op, part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; - aesni_gcm_enc[session->key].update(&session->gdata, - dst, src, + qp->ops[session->key].update_enc(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len -= part_len; } - aesni_gcm_enc[session->key].finalize(&session->gdata, - op->auth.digest.data, - (uint64_t)op->auth.digest.length); - } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */ - uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ? - op->m_dst : op->m_src, - op->auth.digest.length); + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, + sym_op->aead.digest.data, + (uint64_t)session->digest_length); + } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ? + sym_op->m_dst : sym_op->m_src, + session->digest_length); if (!auth_tag) { GCM_LOG_ERR("auth_tag"); return -1; } - aesni_gcm_dec[session->key].init(&session->gdata, - op->cipher.iv.data, - op->auth.aad.data, - (uint64_t)op->auth.aad.length); + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + sym_op->aead.aad.data, + (uint64_t)session->aad_length); - aesni_gcm_dec[session->key].update(&session->gdata, dst, src, + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); - total_len = op->cipher.data.length - part_len; + total_len = data_length - part_len; while (total_len) { dst += part_len; @@ -299,15 +328,47 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op, part_len = (m_src->data_len < total_len) ? m_src->data_len : total_len; - aesni_gcm_dec[session->key].update(&session->gdata, + qp->ops[session->key].update_dec(&session->gdata_key, + &qp->gdata_ctx, dst, src, (uint64_t)part_len); total_len -= part_len; } - aesni_gcm_dec[session->key].finalize(&session->gdata, + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, auth_tag, - (uint64_t)op->auth.digest.length); + (uint64_t)session->digest_length); + } else if (session->op == AESNI_GMAC_OP_GENERATE) { + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + src, + (uint64_t)data_length); + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, + sym_op->auth.digest.data, + (uint64_t)session->digest_length); + } else { /* AESNI_GMAC_OP_VERIFY */ + uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ? + sym_op->m_dst : sym_op->m_src, + session->digest_length); + + if (!auth_tag) { + GCM_LOG_ERR("auth_tag"); + return -1; + } + + qp->ops[session->key].init(&session->gdata_key, + &qp->gdata_ctx, + iv_ptr, + src, + (uint64_t)data_length); + + qp->ops[session->key].finalize(&session->gdata_key, + &qp->gdata_ctx, + auth_tag, + (uint64_t)session->digest_length); } return 0; @@ -324,34 +385,38 @@ process_gcm_crypto_op(struct rte_crypto_sym_op *op, * - Returns NULL on invalid job */ static void -post_process_gcm_crypto_op(struct rte_crypto_op *op) +post_process_gcm_crypto_op(struct rte_crypto_op *op, + struct aesni_gcm_session *session) { struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; - struct aesni_gcm_session *session = - (struct aesni_gcm_session *)op->sym->session->_private; - op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; /* Verify digest if required */ - if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION || + session->op == AESNI_GMAC_OP_VERIFY) { + uint8_t *digest; uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *, - m->data_len - op->sym->auth.digest.length); + m->data_len - session->digest_length); + + if (session->op == AESNI_GMAC_OP_VERIFY) + digest = op->sym->auth.digest.data; + else + digest = op->sym->aead.digest.data; #ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG rte_hexdump(stdout, "auth tag (orig):", - op->sym->auth.digest.data, op->sym->auth.digest.length); + digest, session->digest_length); rte_hexdump(stdout, "auth tag (calc):", - tag, op->sym->auth.digest.length); + tag, session->digest_length); #endif - if (memcmp(tag, op->sym->auth.digest.data, - op->sym->auth.digest.length) != 0) + if (memcmp(tag, digest, session->digest_length) != 0) op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; /* trim area used for digest from mbuf */ - rte_pktmbuf_trim(m, op->sym->auth.digest.length); + rte_pktmbuf_trim(m, session->digest_length); } } @@ -359,6 +424,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) * Process a completed GCM request * * @param qp Queue Pair to process + * @param op Crypto operation * @param job JOB_AES_HMAC job * * @return @@ -366,12 +432,17 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op) */ static void handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, - struct rte_crypto_op *op) + struct rte_crypto_op *op, + struct aesni_gcm_session *sess) { - post_process_gcm_crypto_op(op); + post_process_gcm_crypto_op(op, sess); /* Free session if a session-less crypto op */ - if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + memset(sess, 0, sizeof(struct aesni_gcm_session)); + memset(op->sym->session, 0, + rte_cryptodev_get_header_session_size()); + rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; } @@ -392,21 +463,21 @@ aesni_gcm_pmd_dequeue_burst(void *queue_pair, for (i = 0; i < nb_dequeued; i++) { - sess = aesni_gcm_get_session(qp, ops[i]->sym); + sess = aesni_gcm_get_session(qp, ops[i]); if (unlikely(sess == NULL)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; qp->qp_stats.dequeue_err_count++; break; } - retval = process_gcm_crypto_op(ops[i]->sym, sess); + retval = process_gcm_crypto_op(qp, ops[i], sess); if (retval < 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; qp->qp_stats.dequeue_err_count++; break; } - handle_completed_gcm_crypto_op(qp, ops[i]); + handle_completed_gcm_crypto_op(qp, ops[i], sess); } qp->qp_stats.dequeued_count += i; @@ -438,6 +509,7 @@ aesni_gcm_create(const char *name, { struct rte_cryptodev *dev; struct aesni_gcm_private *internals; + enum aesni_gcm_vector_mode vector_mode; if (init_params->name[0] == '\0') snprintf(init_params->name, sizeof(init_params->name), @@ -449,14 +521,23 @@ aesni_gcm_create(const char *name, return -EFAULT; } - dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name, - sizeof(struct aesni_gcm_private), init_params->socket_id); + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + vector_mode = RTE_AESNI_GCM_AVX2; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + vector_mode = RTE_AESNI_GCM_AVX; + else + vector_mode = RTE_AESNI_GCM_SSE; + + dev = rte_cryptodev_vdev_pmd_init(init_params->name, + sizeof(struct aesni_gcm_private), init_params->socket_id, + vdev); if (dev == NULL) { GCM_LOG_ERR("failed to create cryptodev vdev"); goto init_error; } - dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD; + dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_aesni_gcm_pmd_ops; /* register rx/tx burst functions for data path */ @@ -468,8 +549,24 @@ aesni_gcm_create(const char *name, RTE_CRYPTODEV_FF_CPU_AESNI | RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + switch (vector_mode) { + case RTE_AESNI_GCM_SSE: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + break; + case RTE_AESNI_GCM_AVX: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + break; + case RTE_AESNI_GCM_AVX2: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + break; + default: + break; + } + internals = dev->data->dev_private; + internals->vector_mode = vector_mode; + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; internals->max_nb_sessions = init_params->max_nb_sessions; @@ -498,7 +595,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev) if (name == NULL) return -EINVAL; input_args = rte_vdev_device_args(vdev); - rte_cryptodev_parse_vdev_init_params(&init_params, input_args); + rte_cryptodev_vdev_parse_init_params(&init_params, input_args); RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, init_params.socket_id); @@ -539,3 +636,4 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "max_nb_queue_pairs= " "max_nb_sessions= " "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id); diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index 1fc047be..48400ac2 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -49,32 +49,32 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { .key_size = { .min = 16, .max = 32, - .increment = 16 + .increment = 8 }, .digest_size = { .min = 8, .max = 16, .increment = 4 }, - .aad_size = { - .min = 0, - .max = 65535, - .increment = 1 + .iv_size = { + .min = 12, + .max = 12, + .increment = 0 } }, } }, } }, - { /* AES GCM (AUTH) */ + { /* AES GCM */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GCM, + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, .block_size = 16, .key_size = { .min = 16, .max = 32, - .increment = 16 + .increment = 8 }, .digest_size = { .min = 8, @@ -85,21 +85,6 @@ static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { .min = 0, .max = 65535, .increment = 1 - } - }, } - }, } - }, - { /* AES GCM (CIPHER) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 16 }, .iv_size = { .min = 12, @@ -181,9 +166,9 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, struct aesni_gcm_private *internals = dev->data->dev_private; if (dev_info != NULL) { - dev_info->dev_type = dev->dev_type; - dev_info->feature_flags = dev->feature_flags; - dev_info->capabilities = aesni_gcm_pmd_capabilities; + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = aesni_gcm_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; dev_info->sym.max_nb_sessions = internals->max_nb_sessions; @@ -244,9 +229,10 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, static int aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id) + int socket_id, struct rte_mempool *session_pool) { struct aesni_gcm_qp *qp = NULL; + struct aesni_gcm_private *internals = dev->data->dev_private; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) @@ -264,12 +250,14 @@ aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) goto qp_setup_cleanup; + qp->ops = (const struct aesni_gcm_ops *)gcm_ops[internals->vector_mode]; + qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) goto qp_setup_cleanup; - qp->sess_mp = dev->data->session_pool; + qp->sess_mp = session_pool; memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); @@ -313,29 +301,57 @@ aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) } /** Configure a aesni gcm session from a crypto xform chain */ -static void * +static int aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, - struct rte_crypto_sym_xform *xform, void *sess) + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) { + void *sess_private_data; + int ret; + struct aesni_gcm_private *internals = dev->data->dev_private; + if (unlikely(sess == NULL)) { GCM_LOG_ERR("invalid session struct"); - return NULL; + return -EINVAL; } - if (aesni_gcm_set_session_parameters(sess, xform) != 0) { + if (rte_mempool_get(mempool, &sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode], + sess_private_data, xform); + if (ret != 0) { GCM_LOG_ERR("failed configure session parameters"); - return NULL; + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; } - return sess; + set_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; } /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) +aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) { - if (sess) - memset(sess, 0, sizeof(struct aesni_gcm_session)); + uint8_t index = dev->driver_id; + void *sess_priv = get_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + memset(sess_priv, 0, sizeof(struct aesni_gcm_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } } struct rte_cryptodev_ops aesni_gcm_pmd_ops = { diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index 0496b447..7e155729 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2016 Intel Corporation. All rights reserved. + * Copyright(c) 2016-2017 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -35,6 +35,9 @@ #include "aesni_gcm_ops.h" +#define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm +/**< AES-NI GCM PMD device name */ + #define GCM_LOG_ERR(fmt, args...) \ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ @@ -58,6 +61,8 @@ /** private data structure for each virtual AESNI GCM device */ struct aesni_gcm_private { + enum aesni_gcm_vector_mode vector_mode; + /**< Vector mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ unsigned max_nb_sessions; @@ -65,36 +70,46 @@ struct aesni_gcm_private { }; struct aesni_gcm_qp { - uint16_t id; - /**< Queue Pair Identifier */ - char name[RTE_CRYPTODEV_NAME_LEN]; - /**< Unique Queue Pair Name */ + const struct aesni_gcm_ops *ops; + /**< Architecture dependent function pointer table of the gcm APIs */ struct rte_ring *processed_pkts; /**< Ring for placing process packets */ + struct gcm_context_data gdata_ctx; /* (16 * 5) + 8 = 88 B */ + /**< GCM parameters */ + struct rte_cryptodev_stats qp_stats; /* 8 * 4 = 32 B */ + /**< Queue pair statistics */ struct rte_mempool *sess_mp; /**< Session Mempool */ - struct rte_cryptodev_stats qp_stats; - /**< Queue pair statistics */ + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_LEN]; + /**< Unique Queue Pair Name */ } __rte_cache_aligned; enum aesni_gcm_operation { AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION, - AESNI_GCM_OP_AUTHENTICATED_DECRYPTION -}; - -enum aesni_gcm_key { - AESNI_GCM_KEY_128, - AESNI_GCM_KEY_256 + AESNI_GCM_OP_AUTHENTICATED_DECRYPTION, + AESNI_GMAC_OP_GENERATE, + AESNI_GMAC_OP_VERIFY }; /** AESNI GCM private session structure */ struct aesni_gcm_session { + struct { + uint16_t length; + uint16_t offset; + } iv; + /**< IV parameters */ + uint16_t aad_length; + /**< AAD length */ + uint16_t digest_length; + /**< Digest length */ enum aesni_gcm_operation op; /**< GCM operation type */ enum aesni_gcm_key key; /**< GCM key type */ - struct gcm_data gdata __rte_cache_aligned; + struct gcm_key_data gdata_key; /**< GCM parameters */ }; @@ -109,7 +124,8 @@ struct aesni_gcm_session { * - On failure returns error code < 0 */ extern int -aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess, +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops, + struct aesni_gcm_session *sess, const struct rte_crypto_sym_xform *xform); -- cgit 1.2.3-korg