diff options
Diffstat (limited to 'drivers/crypto/aesni_gcm')
-rw-r--r-- | drivers/crypto/aesni_gcm/Makefile | 67 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 127 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 524 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 343 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h | 120 | ||||
-rw-r--r-- | drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map | 3 |
6 files changed, 1184 insertions, 0 deletions
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile new file mode 100644 index 00000000..aa2621bd --- /dev/null +++ b/drivers/crypto/aesni_gcm/Makefile @@ -0,0 +1,67 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) +$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") +endif + +# library name +LIB = librte_pmd_aesni_gcm.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_aesni_gcm_version.map + +# external library include paths +CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) +CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -lcrypto + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd_ops.c + +# export include files +SYMLINK-y-include += + +# library dependencies +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h new file mode 100644 index 00000000..c399068c --- /dev/null +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -0,0 +1,127 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _AESNI_GCM_OPS_H_ +#define _AESNI_GCM_OPS_H_ + +#ifndef LINUX +#define LINUX +#endif + +#include <gcm_defines.h> +#include <aux_funcs.h> + +/** Supported vector modes */ +enum aesni_gcm_vector_mode { + RTE_AESNI_GCM_NOT_SUPPORTED = 0, + RTE_AESNI_GCM_SSE, + RTE_AESNI_GCM_AVX, + RTE_AESNI_GCM_AVX2 +}; + +typedef void (*aes_keyexp_128_enc_t)(void *key, void *enc_exp_keys); + +typedef void (*aesni_gcm_t)(gcm_data *my_ctx_data, u8 *out, const u8 *in, + u64 plaintext_len, u8 *iv, const u8 *aad, u64 aad_len, + u8 *auth_tag, u64 auth_tag_len); + +typedef void (*aesni_gcm_precomp_t)(gcm_data *my_ctx_data, u8 *hash_subkey); + +/** GCM library function pointer table */ +struct aesni_gcm_ops { + struct { + struct { + aes_keyexp_128_enc_t aes128_enc; + /**< AES128 enc key expansion */ + } keyexp; + /**< Key expansion functions */ + } aux; /**< Auxiliary functions */ + + struct { + aesni_gcm_t enc; /**< GCM encode function pointer */ + aesni_gcm_t dec; /**< GCM decode function pointer */ + aesni_gcm_precomp_t precomp; /**< GCM pre-compute */ + } gcm; /**< GCM functions */ +}; + + +static const struct aesni_gcm_ops gcm_ops[] = { + [RTE_AESNI_GCM_NOT_SUPPORTED] = { + .aux = { + .keyexp = { + NULL + } + }, + .gcm = { + NULL + } + }, + [RTE_AESNI_GCM_SSE] = { + .aux = { + .keyexp = { + aes_keyexp_128_enc_sse + } + }, + .gcm = { + aesni_gcm_enc_sse, + aesni_gcm_dec_sse, + aesni_gcm_precomp_sse + } + }, + [RTE_AESNI_GCM_AVX] = { + .aux = { + .keyexp = { + aes_keyexp_128_enc_avx, + } + }, + .gcm = { + aesni_gcm_enc_avx_gen2, + aesni_gcm_dec_avx_gen2, + aesni_gcm_precomp_avx_gen2 + } + }, + [RTE_AESNI_GCM_AVX2] = { + .aux = { + .keyexp = { + aes_keyexp_128_enc_avx2, + } + }, + .gcm = { + aesni_gcm_enc_avx_gen4, + aesni_gcm_dec_avx_gen4, + aesni_gcm_precomp_avx_gen4 + } + } +}; + + +#endif /* _AESNI_GCM_OPS_H_ */ diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c new file mode 100644 index 00000000..2987ef6b --- /dev/null +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -0,0 +1,524 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <openssl/aes.h> + +#include <rte_common.h> +#include <rte_config.h> +#include <rte_hexdump.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_dev.h> +#include <rte_malloc.h> +#include <rte_cpuflags.h> + +#include "aesni_gcm_pmd_private.h" + +/** + * Global static parameter used to create a unique name for each AES-NI multi + * buffer crypto device. + */ +static unsigned unique_name_id; + +static inline int +create_unique_device_name(char *name, size_t size) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_AESNI_GCM_PMD, + unique_name_id++); + if (ret < 0) + return ret; + return 0; +} + +static int +aesni_gcm_calculate_hash_sub_key(uint8_t *hsubkey, unsigned hsubkey_length, + uint8_t *aeskey, unsigned aeskey_length) +{ + uint8_t key[aeskey_length] __rte_aligned(16); + AES_KEY enc_key; + + if (hsubkey_length % 16 != 0 && aeskey_length % 16 != 0) + return -EFAULT; + + memcpy(key, aeskey, aeskey_length); + + if (AES_set_encrypt_key(key, aeskey_length << 3, &enc_key) != 0) + return -EFAULT; + + AES_encrypt(hsubkey, hsubkey, &enc_key); + + return 0; +} + +/** Get xform chain order */ +static int +aesni_gcm_get_mode(const struct rte_crypto_sym_xform *xform) +{ + /* + * GCM only supports authenticated encryption or authenticated + * decryption, all other options are invalid, so we must have exactly + * 2 xform structs chained together + */ + if (xform->next == NULL || xform->next->next != NULL) + return -1; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + return AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + } + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + return AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + } + + return -1; +} + +/** Parse crypto xform chain and set private session parameters */ +int +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, + struct aesni_gcm_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *cipher_xform = NULL; + + uint8_t hsubkey[16] __rte_aligned(16) = { 0 }; + + /* Select Crypto operation - hash then cipher / cipher then hash */ + switch (aesni_gcm_get_mode(xform)) { + case AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION: + sess->op = AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION; + + cipher_xform = xform; + auth_xform = xform->next; + break; + case AESNI_GCM_OP_AUTHENTICATED_DECRYPTION: + sess->op = AESNI_GCM_OP_AUTHENTICATED_DECRYPTION; + + auth_xform = xform; + cipher_xform = xform->next; + break; + default: + GCM_LOG_ERR("Unsupported operation chain order parameter"); + return -EINVAL; + } + + /* We only support AES GCM */ + if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_GCM && + auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GCM) + return -EINVAL; + + /* Select cipher direction */ + if (sess->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION && + cipher_xform->cipher.op != + RTE_CRYPTO_CIPHER_OP_ENCRYPT) { + GCM_LOG_ERR("xform chain (CIPHER/AUTH) and cipher operation " + "(DECRYPT) specified are an invalid selection"); + return -EINVAL; + } else if (sess->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION && + cipher_xform->cipher.op != + RTE_CRYPTO_CIPHER_OP_DECRYPT) { + GCM_LOG_ERR("xform chain (AUTH/CIPHER) and cipher operation " + "(ENCRYPT) specified are an invalid selection"); + return -EINVAL; + } + + /* Expand GCM AES128 key */ + (*gcm_ops->aux.keyexp.aes128_enc)(cipher_xform->cipher.key.data, + sess->gdata.expanded_keys); + + /* Calculate hash sub key here */ + aesni_gcm_calculate_hash_sub_key(hsubkey, sizeof(hsubkey), + cipher_xform->cipher.key.data, + cipher_xform->cipher.key.length); + + /* Calculate GCM pre-compute */ + (*gcm_ops->gcm.precomp)(&sess->gdata, hsubkey); + + return 0; +} + +/** Get gcm session */ +static struct aesni_gcm_session * +aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op) +{ + struct aesni_gcm_session *sess = NULL; + + if (op->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { + if (unlikely(op->session->dev_type + != RTE_CRYPTODEV_AESNI_GCM_PMD)) + return sess; + + sess = (struct aesni_gcm_session *)op->session->_private; + } else { + void *_sess; + + if (rte_mempool_get(qp->sess_mp, &_sess)) + return sess; + + sess = (struct aesni_gcm_session *) + ((struct rte_cryptodev_session *)_sess)->_private; + + if (unlikely(aesni_gcm_set_session_parameters(qp->ops, + sess, op->xform) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + sess = NULL; + } + } + return sess; +} + +/** + * Process a crypto operation and complete a JOB_AES_HMAC job structure for + * submission to the multi buffer library for processing. + * + * @param qp queue pair + * @param op symmetric crypto operation + * @param session GCM session + * + * @return + * + */ +static int +process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_sym_op *op, + struct aesni_gcm_session *session) +{ + uint8_t *src, *dst; + struct rte_mbuf *m = op->m_src; + + src = rte_pktmbuf_mtod(m, uint8_t *) + op->cipher.data.offset; + dst = op->m_dst ? + rte_pktmbuf_mtod_offset(op->m_dst, uint8_t *, + op->cipher.data.offset) : + rte_pktmbuf_mtod_offset(m, uint8_t *, + op->cipher.data.offset); + + /* sanity checks */ + if (op->cipher.iv.length != 16 && op->cipher.iv.length != 0) { + GCM_LOG_ERR("iv"); + return -1; + } + + if (op->auth.aad.length != 12 && op->auth.aad.length != 8 && + op->auth.aad.length != 0) { + GCM_LOG_ERR("iv"); + return -1; + } + + if (op->auth.digest.length != 16 && + op->auth.digest.length != 12 && + op->auth.digest.length != 8 && + op->auth.digest.length != 0) { + GCM_LOG_ERR("iv"); + return -1; + } + + if (session->op == AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION) { + + (*qp->ops->gcm.enc)(&session->gdata, dst, src, + (uint64_t)op->cipher.data.length, + op->cipher.iv.data, + op->auth.aad.data, + (uint64_t)op->auth.aad.length, + op->auth.digest.data, + (uint64_t)op->auth.digest.length); + } else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(m, + op->auth.digest.length); + + if (!auth_tag) { + GCM_LOG_ERR("iv"); + return -1; + } + + (*qp->ops->gcm.dec)(&session->gdata, dst, src, + (uint64_t)op->cipher.data.length, + op->cipher.iv.data, + op->auth.aad.data, + (uint64_t)op->auth.aad.length, + auth_tag, + (uint64_t)op->auth.digest.length); + } else { + GCM_LOG_ERR("iv"); + return -1; + } + + return 0; +} + +/** + * Process a completed job and return rte_mbuf which job processed + * + * @param job JOB_AES_HMAC job to process + * + * @return + * - Returns processed mbuf which is trimmed of output digest used in + * verification of supplied digest in the case of a HASH_CIPHER operation + * - Returns NULL on invalid job + */ +static void +post_process_gcm_crypto_op(struct rte_crypto_op *op) +{ + struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; + + struct aesni_gcm_session *session = + (struct aesni_gcm_session *)op->sym->session->_private; + + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Verify digest if required */ + if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) { + + uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *, + m->data_len - op->sym->auth.digest.length); + +#ifdef RTE_LIBRTE_PMD_AESNI_GCM_DEBUG + rte_hexdump(stdout, "auth tag (orig):", + op->sym->auth.digest.data, op->sym->auth.digest.length); + rte_hexdump(stdout, "auth tag (calc):", + tag, op->sym->auth.digest.length); +#endif + + if (memcmp(tag, op->sym->auth.digest.data, + op->sym->auth.digest.length) != 0) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + + /* trim area used for digest from mbuf */ + rte_pktmbuf_trim(m, op->sym->auth.digest.length); + } +} + +/** + * Process a completed GCM request + * + * @param qp Queue Pair to process + * @param job JOB_AES_HMAC job + * + * @return + * - Number of processed jobs + */ +static void +handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, + struct rte_crypto_op *op) +{ + post_process_gcm_crypto_op(op); + + /* Free session if a session-less crypto op */ + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } + + rte_ring_enqueue(qp->processed_pkts, (void *)op); +} + +static uint16_t +aesni_gcm_pmd_enqueue_burst(void *queue_pair, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct aesni_gcm_session *sess; + struct aesni_gcm_qp *qp = queue_pair; + + int i, retval = 0; + + for (i = 0; i < nb_ops; i++) { + + sess = aesni_gcm_get_session(qp, ops[i]->sym); + if (unlikely(sess == NULL)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + qp->qp_stats.enqueue_err_count++; + break; + } + + retval = process_gcm_crypto_op(qp, ops[i]->sym, sess); + if (retval < 0) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + qp->qp_stats.enqueue_err_count++; + break; + } + + handle_completed_gcm_crypto_op(qp, ops[i]); + + qp->qp_stats.enqueued_count++; + } + return i; +} + +static uint16_t +aesni_gcm_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct aesni_gcm_qp *qp = queue_pair; + + unsigned nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_pkts, + (void **)ops, nb_ops); + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +static int aesni_gcm_uninit(const char *name); + +static int +aesni_gcm_create(const char *name, + struct rte_crypto_vdev_init_params *init_params) +{ + struct rte_cryptodev *dev; + char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct aesni_gcm_private *internals; + enum aesni_gcm_vector_mode vector_mode; + + /* Check CPU for support for AES instruction set */ + if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { + GCM_LOG_ERR("AES instructions not supported by CPU"); + return -EFAULT; + } + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + vector_mode = RTE_AESNI_GCM_AVX2; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + vector_mode = RTE_AESNI_GCM_AVX; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + vector_mode = RTE_AESNI_GCM_SSE; + else { + GCM_LOG_ERR("Vector instructions are not supported by CPU"); + return -EFAULT; + } + + /* create a unique device name */ + if (create_unique_device_name(crypto_dev_name, + RTE_CRYPTODEV_NAME_MAX_LEN) != 0) { + GCM_LOG_ERR("failed to create unique cryptodev name"); + return -EINVAL; + } + + + dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name, + sizeof(struct aesni_gcm_private), init_params->socket_id); + if (dev == NULL) { + GCM_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + dev->dev_type = RTE_CRYPTODEV_AESNI_GCM_PMD; + dev->dev_ops = rte_aesni_gcm_pmd_ops; + + /* register rx/tx burst functions for data path */ + dev->dequeue_burst = aesni_gcm_pmd_dequeue_burst; + dev->enqueue_burst = aesni_gcm_pmd_enqueue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_CPU_AESNI; + + switch (vector_mode) { + case RTE_AESNI_GCM_SSE: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + break; + case RTE_AESNI_GCM_AVX: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + break; + case RTE_AESNI_GCM_AVX2: + dev->feature_flags |= RTE_CRYPTODEV_FF_CPU_AVX2; + break; + default: + break; + } + + /* Set vector instructions mode supported */ + internals = dev->data->dev_private; + + internals->vector_mode = vector_mode; + + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + + return 0; + +init_error: + GCM_LOG_ERR("driver %s: create failed", name); + + aesni_gcm_uninit(crypto_dev_name); + return -EFAULT; +} + +static int +aesni_gcm_init(const char *name, const char *input_args) +{ + struct rte_crypto_vdev_init_params init_params = { + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, + rte_socket_id() + }; + + rte_cryptodev_parse_vdev_init_params(&init_params, input_args); + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.socket_id); + RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", + init_params.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", + init_params.max_nb_sessions); + + return aesni_gcm_create(name, &init_params); +} + +static int +aesni_gcm_uninit(const char *name) +{ + if (name == NULL) + return -EINVAL; + + GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n", + name, rte_socket_id()); + + return 0; +} + +static struct rte_driver aesni_gcm_pmd_drv = { + .name = CRYPTODEV_NAME_AESNI_GCM_PMD, + .type = PMD_VDEV, + .init = aesni_gcm_init, + .uninit = aesni_gcm_uninit +}; + +PMD_REGISTER_DRIVER(aesni_gcm_pmd_drv); diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c new file mode 100644 index 00000000..e824d4b3 --- /dev/null +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -0,0 +1,343 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +#include <rte_common.h> +#include <rte_malloc.h> +#include <rte_cryptodev_pmd.h> + +#include "aesni_gcm_pmd_private.h" + +static const struct rte_cryptodev_capabilities aesni_gcm_pmd_capabilities[] = { + { /* AES GCM (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 8, + .max = 16, + .increment = 4 + }, + .aad_size = { + .min = 8, + .max = 12, + .increment = 4 + } + }, } + }, } + }, + { /* AES GCM (CIPHER) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +/** Configure device */ +static int +aesni_gcm_pmd_config(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Start device */ +static int +aesni_gcm_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Stop device */ +static void +aesni_gcm_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** Close device */ +static int +aesni_gcm_pmd_close(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + + +/** Get device statistics */ +static void +aesni_gcm_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +aesni_gcm_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct aesni_gcm_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + + +/** Get device info */ +static void +aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct aesni_gcm_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->dev_type = dev->dev_type; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = aesni_gcm_pmd_capabilities; + + dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + } +} + +/** Release queue pair */ +static int +aesni_gcm_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + if (dev->data->queue_pairs[qp_id] != NULL) { + rte_free(dev->data->queue_pairs[qp_id]); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** set a unique name for the queue pair based on it's name, dev_id and qp_id */ +static int +aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct aesni_gcm_qp *qp) +{ + unsigned n = snprintf(qp->name, sizeof(qp->name), + "aesni_gcm_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n > sizeof(qp->name)) + return -1; + + return 0; +} + +/** Create a ring to place process packets on */ +static struct rte_ring * +aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, + unsigned ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (r->prod.size >= ring_size) { + GCM_LOG_INFO("Reusing existing ring %s for processed" + " packets", qp->name); + return r; + } + + GCM_LOG_ERR("Unable to reuse existing ring %s for processed" + " packets", qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +/** Setup a queue pair */ +static int +aesni_gcm_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) +{ + struct aesni_gcm_qp *qp = NULL; + struct aesni_gcm_private *internals = dev->data->dev_private; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + aesni_gcm_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (aesni_gcm_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->ops = &gcm_ops[internals->vector_mode]; + + qp->processed_pkts = aesni_gcm_pmd_qp_create_processed_pkts_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_pkts == NULL) + goto qp_setup_cleanup; + + qp->sess_mp = dev->data->session_pool; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + + return 0; + +qp_setup_cleanup: + if (qp) + rte_free(qp); + + return -1; +} + +/** Start queue pair */ +static int +aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Stop queue pair */ +static int +aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Return the number of allocated queue pairs */ +static uint32_t +aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the aesni gcm session structure */ +static unsigned +aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct aesni_gcm_session); +} + +/** Configure a aesni gcm session from a crypto xform chain */ +static void * +aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *sess) +{ + struct aesni_gcm_private *internals = dev->data->dev_private; + + if (unlikely(sess == NULL)) { + GCM_LOG_ERR("invalid session struct"); + return NULL; + } + + if (aesni_gcm_set_session_parameters(&gcm_ops[internals->vector_mode], + sess, xform) != 0) { + GCM_LOG_ERR("failed configure session parameters"); + return NULL; + } + + return sess; +} + +/** Clear the memory of session so it doesn't leave key material behind */ +static void +aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) +{ + if (sess) + memset(sess, 0, sizeof(struct aesni_gcm_session)); +} + +struct rte_cryptodev_ops aesni_gcm_pmd_ops = { + .dev_configure = aesni_gcm_pmd_config, + .dev_start = aesni_gcm_pmd_start, + .dev_stop = aesni_gcm_pmd_stop, + .dev_close = aesni_gcm_pmd_close, + + .stats_get = aesni_gcm_pmd_stats_get, + .stats_reset = aesni_gcm_pmd_stats_reset, + + .dev_infos_get = aesni_gcm_pmd_info_get, + + .queue_pair_setup = aesni_gcm_pmd_qp_setup, + .queue_pair_release = aesni_gcm_pmd_qp_release, + .queue_pair_start = aesni_gcm_pmd_qp_start, + .queue_pair_stop = aesni_gcm_pmd_qp_stop, + .queue_pair_count = aesni_gcm_pmd_qp_count, + + .session_get_size = aesni_gcm_pmd_session_get_size, + .session_configure = aesni_gcm_pmd_session_configure, + .session_clear = aesni_gcm_pmd_session_clear +}; + +struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops; diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h new file mode 100644 index 00000000..a42f9414 --- /dev/null +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -0,0 +1,120 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_AESNI_GCM_PMD_PRIVATE_H_ +#define _RTE_AESNI_GCM_PMD_PRIVATE_H_ + +#include "aesni_gcm_ops.h" + +#define GCM_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_AESNI_GCM_PMD, \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_AESNI_MB_DEBUG +#define GCM_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_AESNI_GCM_PMD, \ + __func__, __LINE__, ## args) + +#define GCM_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_AESNI_GCM_PMD, \ + __func__, __LINE__, ## args) +#else +#define GCM_LOG_INFO(fmt, args...) +#define GCM_LOG_DBG(fmt, args...) +#endif + + +/** private data structure for each virtual AESNI GCM device */ +struct aesni_gcm_private { + enum aesni_gcm_vector_mode vector_mode; + /**< Vector mode */ + unsigned max_nb_queue_pairs; + /**< Max number of queue pairs supported by device */ + unsigned max_nb_sessions; + /**< Max number of sessions supported by device */ +}; + +struct aesni_gcm_qp { + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_LEN]; + /**< Unique Queue Pair Name */ + const struct aesni_gcm_ops *ops; + /**< Architecture dependent function pointer table of the gcm APIs */ + struct rte_ring *processed_pkts; + /**< Ring for placing process packets */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_cryptodev_stats qp_stats; + /**< Queue pair statistics */ +} __rte_cache_aligned; + + +enum aesni_gcm_operation { + AESNI_GCM_OP_AUTHENTICATED_ENCRYPTION, + AESNI_GCM_OP_AUTHENTICATED_DECRYPTION +}; + +/** AESNI GCM private session structure */ +struct aesni_gcm_session { + enum aesni_gcm_operation op; + /**< GCM operation type */ + struct gcm_data gdata __rte_cache_aligned; + /**< GCM parameters */ +}; + + +/** + * Setup GCM session parameters + * @param ops gcm ops function pointer table + * @param sess aesni gcm session structure + * @param xform crypto transform chain + * + * @return + * - On success returns 0 + * - On failure returns error code < 0 + */ +extern int +aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *ops, + struct aesni_gcm_session *sess, + const struct rte_crypto_sym_xform *xform); + + +/** + * Device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops; + + +#endif /* _RTE_AESNI_GCM_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map new file mode 100644 index 00000000..dc4d417b --- /dev/null +++ b/drivers/crypto/aesni_gcm/rte_pmd_aesni_gcm_version.map @@ -0,0 +1,3 @@ +DPDK_16.04 { + local: *; +}; |