aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/aesni_gcm/aesni_gcm_pmd.c')
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c403
1 files changed, 203 insertions, 200 deletions
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index af3d60f0..101ef98b 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -30,8 +30,6 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <openssl/aes.h>
-
#include <rte_common.h>
#include <rte_config.h>
#include <rte_hexdump.h>
@@ -44,133 +42,96 @@
#include "aesni_gcm_pmd_private.h"
-/**
- * Global static parameter used to create a unique name for each AES-NI multi
- * buffer crypto device.
- */
-static unsigned unique_name_id;
-
-static inline int
-create_unique_device_name(char *name, size_t size)
-{
- int ret;
-
- if (name == NULL)
- return -EINVAL;
-
- ret = snprintf(name, size, "%s_%u", RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD),
- unique_name_id++);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int
-aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length,
- uint8_t *aeskey, unsigned aeskey_length)
-{
- uint8_t key[aeskey_length] __rte_aligned(16);
- AES_KEY enc_key;
-
- if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0)
- return -EFAULT;
-
- memcpy(key, aeskey, aeskey_length);
-
- if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0)
- return -EFAULT;
-
- AES_encrypt(hsubkey, hsubkey, &enc_key);
-
- return 0;
-}
-
-/** Get xform chain order */
-static int
-aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform)
-{
- /*
- * GCM only supports authenticated encryption or authenticated
- * decryption, all other options are invalid, so we must have exactly
- * 2 xform structs chained together
- */
- if (xform->next == NULL || xform->next->next != NULL)
- return -1;
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
- return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
- }
-
- if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
- xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
- return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
- }
+/** GCM encode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_enc[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_enc_update,
+ aesni_gcm128_enc_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_enc_update,
+ aesni_gcm256_enc_finalize
+ }
+};
- return -1;
-}
+/** GCM decode functions pointer table */
+static const struct aesni_gcm_ops aesni_gcm_dec[] = {
+ [AESNI_GCM_KEY_128] = {
+ aesni_gcm128_init,
+ aesni_gcm128_dec_update,
+ aesni_gcm128_dec_finalize
+ },
+ [AESNI_GCM_KEY_256] = {
+ aesni_gcm256_init,
+ aesni_gcm256_dec_update,
+ aesni_gcm256_dec_finalize
+ }
+};
/** Parse crypto xform chain and set private session parameters */
int
-aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops,
- struct aesni_gcm_session *sess,
+aesni_gcm_set_session_parameters(struct aesni_gcm_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- const struct rte_crypto_sym_xform *auth_xform = NULL;
- const struct rte_crypto_sym_xform *cipher_xform = NULL;
-
- uint8_t hsubkey[16] __rte_aligned(16) = { 0 };
+ const struct rte_crypto_sym_xform *auth_xform;
+ const struct rte_crypto_sym_xform *cipher_xform;
- /* Select Crypto operation - hash then cipher / cipher then hash */
- switch (aesni_gcm_get_mode(xform)) {
- case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ if (xform->next == NULL || xform->next->next != NULL) {
+ GCM_LOG_ERR("Two and only two chained xform required");
+ return -EINVAL;
+ }
- cipher_xform = xform;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
auth_xform = xform->next;
- break;
- case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION:
- sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
-
+ cipher_xform = xform;
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
auth_xform = xform;
cipher_xform = xform->next;
- break;
- default:
- GCM_LOG_ERR("Unsupported operation chain order parameter");
+ } else {
+ GCM_LOG_ERR("Cipher and auth xform required");
return -EINVAL;
}
- /* We only support AES GCM */
- if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM &&
- auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM)
+ if (!(cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_AES_GCM &&
+ (auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GCM ||
+ auth_xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC))) {
+ GCM_LOG_ERR("We only support AES GCM and AES GMAC");
return -EINVAL;
+ }
- /* Select cipher direction */
- if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
- GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation "
- "(DECRYPT) specified are an invalid selection");
- return -EINVAL;
- } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION &&
- cipher_xform->cipher.op !=
- RTE_CRYPTO_CIPHER_OP_DECRYPT) {
- GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation "
- "(ENCRYPT) specified are an invalid selection");
+ /* Select Crypto operation */
+ if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION;
+ else if (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
+ auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
+ sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION;
+ else {
+ GCM_LOG_ERR("Cipher/Auth operations: Encrypt/Generate or"
+ " Decrypt/Verify are valid only");
return -EINVAL;
}
- /* Expand GCM AES128 key */
- (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data,
- sess->gdata.expanded_keys);
+ /* Check key length and calculate GCM pre-compute. */
+ switch (cipher_xform->cipher.key.length) {
+ case 16:
+ aesni_gcm128_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_128;
- /* Calculate hash sub key here */
- aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey),
- cipher_xform->cipher.key.data,
- cipher_xform->cipher.key.length);
+ break;
+ case 32:
+ aesni_gcm256_pre(cipher_xform->cipher.key.data, &sess->gdata);
+ sess->key = AESNI_GCM_KEY_256;
- /* Calculate GCM pre-compute */
- (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey);
+ break;
+ default:
+ GCM_LOG_ERR("Unsupported cipher key length");
+ return -EINVAL;
+ }
return 0;
}
@@ -194,10 +155,10 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
return sess;
sess = (struct aesni_gcm_session *)
- ((struct rte_cryptodev_session *)_sess)->_private;
+ ((struct rte_cryptodev_sym_session *)_sess)->_private;
- if (unlikely(aesni_gcm_set_session_parameters(qp->ops,
- sess, op->xform) != 0)) {
+ if (unlikely(aesni_gcm_set_session_parameters(sess,
+ op->xform) != 0)) {
rte_mempool_put(qp->sess_mp, _sess);
sess = NULL;
}
@@ -217,19 +178,45 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op)
*
*/
static int
-process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
+process_gcm_crypto_op(struct rte_crypto_sym_op *op,
struct aesni_gcm_session *session)
{
uint8_t *src, *dst;
- struct rte_mbuf *m = op->m_src;
+ struct rte_mbuf *m_src = op->m_src;
+ uint32_t offset = op->cipher.data.offset;
+ uint32_t part_len, total_len, data_len;
+
+ RTE_ASSERT(m_src != NULL);
+
+ while (offset >= m_src->data_len) {
+ offset -= m_src->data_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+ }
+
+ data_len = m_src->data_len - offset;
+ part_len = (data_len < op->cipher.data.length) ? data_len :
+ op->cipher.data.length;
+
+ /* Destination buffer is required when segmented source buffer */
+ RTE_ASSERT((part_len == op->cipher.data.length) ||
+ ((part_len != op->cipher.data.length) &&
+ (op->m_dst != NULL)));
+ /* Segmented destination buffer is not supported */
+ RTE_ASSERT((op->m_dst == NULL) ||
+ ((op->m_dst != NULL) &&
+ rte_pktmbuf_is_contiguous(op->m_dst)));
+
- src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset;
dst = op->m_dst ?
rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *,
op->cipher.data.offset) :
- rte_pktmbuf_mtod_offset(m, uint8_t *,
+ rte_pktmbuf_mtod_offset(op->m_src, uint8_t *,
op->cipher.data.offset);
+ src = rte_pktmbuf_mtod_offset(m_src, uint8_t *, offset);
+
/* sanity checks */
if (op->cipher.iv.length != 16 && op->cipher.iv.length != 12 &&
op->cipher.iv.length != 0) {
@@ -246,48 +233,81 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op,
*iv_padd = rte_bswap32(1);
}
- if (op->auth.aad.length != 12 && op->auth.aad.length != 8 &&
- op->auth.aad.length != 0) {
- GCM_LOG_ERR("iv");
- return -1;
- }
-
if (op->auth.digest.length != 16 &&
op->auth.digest.length != 12 &&
- op->auth.digest.length != 8 &&
- op->auth.digest.length != 0) {
- GCM_LOG_ERR("iv");
+ op->auth.digest.length != 8) {
+ GCM_LOG_ERR("digest");
return -1;
}
if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) {
- (*qp->ops->gcm.enc)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_enc[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_enc[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_enc[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_enc[session->key].finalize(&session->gdata,
op->auth.digest.data,
(uint64_t)op->auth.digest.length);
- } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m,
+ } else { /* session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION */
+ uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(op->m_dst ?
+ op->m_dst : op->m_src,
op->auth.digest.length);
if (!auth_tag) {
- GCM_LOG_ERR("iv");
+ GCM_LOG_ERR("auth_tag");
return -1;
}
- (*qp->ops->gcm.dec)(&session->gdata, dst, src,
- (uint64_t)op->cipher.data.length,
+ aesni_gcm_dec[session->key].init(&session->gdata,
op->cipher.iv.data,
op->auth.aad.data,
- (uint64_t)op->auth.aad.length,
+ (uint64_t)op->auth.aad.length);
+
+ aesni_gcm_dec[session->key].update(&session->gdata, dst, src,
+ (uint64_t)part_len);
+ total_len = op->cipher.data.length - part_len;
+
+ while (total_len) {
+ dst += part_len;
+ m_src = m_src->next;
+
+ RTE_ASSERT(m_src != NULL);
+
+ src = rte_pktmbuf_mtod(m_src, uint8_t *);
+ part_len = (m_src->data_len < total_len) ?
+ m_src->data_len : total_len;
+
+ aesni_gcm_dec[session->key].update(&session->gdata,
+ dst, src,
+ (uint64_t)part_len);
+ total_len -= part_len;
+ }
+
+ aesni_gcm_dec[session->key].finalize(&session->gdata,
auth_tag,
(uint64_t)op->auth.digest.length);
- } else {
- GCM_LOG_ERR("iv");
- return -1;
}
return 0;
@@ -355,67 +375,73 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
}
-
- rte_ring_enqueue(qp->processed_pkts, (void *)op);
}
static uint16_t
-aesni_gcm_pmd_enqueue_burst(void *queue_pair,
+aesni_gcm_pmd_dequeue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_session *sess;
struct aesni_gcm_qp *qp = queue_pair;
- int i, retval = 0;
+ int retval = 0;
+ unsigned int i, nb_dequeued;
+
+ nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
- for (i = 0; i < nb_ops; i++) {
+ for (i = 0; i < nb_dequeued; i++) {
sess = aesni_gcm_get_session(qp, ops[i]->sym);
if (unlikely(sess == NULL)) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->qp_stats.enqueue_err_count++;
+ qp->qp_stats.dequeue_err_count++;
break;
}
- retval = process_gcm_crypto_op(qp, ops[i]->sym, sess);
+ retval = process_gcm_crypto_op(ops[i]->sym, sess);
if (retval < 0) {
ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
- qp->qp_stats.enqueue_err_count++;
+ qp->qp_stats.dequeue_err_count++;
break;
}
handle_completed_gcm_crypto_op(qp, ops[i]);
-
- qp->qp_stats.enqueued_count++;
}
+
+ qp->qp_stats.dequeued_count += i;
+
return i;
}
static uint16_t
-aesni_gcm_pmd_dequeue_burst(void *queue_pair,
+aesni_gcm_pmd_enqueue_burst(void *queue_pair,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct aesni_gcm_qp *qp = queue_pair;
- unsigned nb_dequeued;
+ unsigned int nb_enqueued;
- nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts,
- (void **)ops, nb_ops);
- qp->qp_stats.dequeued_count += nb_dequeued;
+ nb_enqueued = rte_ring_enqueue_burst(qp->processed_pkts,
+ (void **)ops, nb_ops, NULL);
+ qp->qp_stats.enqueued_count += nb_enqueued;
- return nb_dequeued;
+ return nb_enqueued;
}
-static int aesni_gcm_remove(const char *name);
+static int aesni_gcm_remove(struct rte_vdev_device *vdev);
static int
aesni_gcm_create(const char *name,
+ struct rte_vdev_device *vdev,
struct rte_crypto_vdev_init_params *init_params)
{
struct rte_cryptodev *dev;
- char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
struct aesni_gcm_private *internals;
- enum aesni_gcm_vector_mode vector_mode;
+
+ if (init_params->name[0] == '\0')
+ snprintf(init_params->name, sizeof(init_params->name),
+ "%s", name);
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
@@ -423,27 +449,7 @@ aesni_gcm_create(const char *name,
return -EFAULT;
}
- /* Check CPU for supported vector instruction set */
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
- vector_mode = RTE_AESNI_GCM_AVX2;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
- vector_mode = RTE_AESNI_GCM_AVX;
- else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
- vector_mode = RTE_AESNI_GCM_SSE;
- else {
- GCM_LOG_ERR("Vector instructions are not supported by CPU");
- return -EFAULT;
- }
-
- /* create a unique device name */
- if (create_unique_device_name(crypto_dev_name,
- RTE_CRYPTODEV_NAME_MAX_LEN) != 0) {
- GCM_LOG_ERR("failed to create unique cryptodev name");
- return -EINVAL;
- }
-
-
- dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name,
+ dev = rte_cryptodev_pmd_virtual_dev_init(init_params->name,
sizeof(struct aesni_gcm_private), init_params->socket_id);
if (dev == NULL) {
GCM_LOG_ERR("failed to create cryptodev vdev");
@@ -459,63 +465,60 @@ aesni_gcm_create(const char *name,
dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
- RTE_CRYPTODEV_FF_CPU_AESNI;
+ RTE_CRYPTODEV_FF_CPU_AESNI |
+ RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
- switch (vector_mode) {
- case RTE_AESNI_GCM_SSE:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- break;
- case RTE_AESNI_GCM_AVX:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX;
- break;
- case RTE_AESNI_GCM_AVX2:
- dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2;
- break;
- default:
- break;
- }
-
- /* Set vector instructions mode supported */
internals = dev->data->dev_private;
- internals->vector_mode = vector_mode;
-
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
init_error:
- GCM_LOG_ERR("driver %s: create failed", name);
+ GCM_LOG_ERR("driver %s: create failed", init_params->name);
- aesni_gcm_remove(crypto_dev_name);
+ aesni_gcm_remove(vdev);
return -EFAULT;
}
static int
-aesni_gcm_probe(const char *name, const char *input_args)
+aesni_gcm_probe(struct rte_vdev_device *vdev)
{
struct rte_crypto_vdev_init_params init_params = {
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
- rte_socket_id()
+ rte_socket_id(),
+ {0}
};
+ const char *name;
+ const char *input_args;
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ input_args = rte_vdev_device_args(vdev);
rte_cryptodev_parse_vdev_init_params(&init_params, input_args);
RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
init_params.socket_id);
+ if (init_params.name[0] != '\0')
+ RTE_LOG(INFO, PMD, " User defined name = %s\n",
+ init_params.name);
RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
init_params.max_nb_queue_pairs);
RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
init_params.max_nb_sessions);
- return aesni_gcm_create(name, &init_params);
+ return aesni_gcm_create(name, vdev, &init_params);
}
static int
-aesni_gcm_remove(const char *name)
+aesni_gcm_remove(struct rte_vdev_device *vdev)
{
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;