From 97f17497d162afdb82c8704bf097f0fee3724b2e Mon Sep 17 00:00:00 2001 From: "C.J. Collier" Date: Tue, 14 Jun 2016 07:50:17 -0700 Subject: Imported Upstream version 16.04 Change-Id: I77eadcd8538a9122e4773cbe55b24033dc451757 Signed-off-by: C.J. Collier --- lib/librte_cryptodev/Makefile | 62 ++ lib/librte_cryptodev/rte_crypto.h | 415 +++++++++ lib/librte_cryptodev/rte_crypto_sym.h | 662 ++++++++++++++ lib/librte_cryptodev/rte_cryptodev.c | 1162 ++++++++++++++++++++++++ lib/librte_cryptodev/rte_cryptodev.h | 896 ++++++++++++++++++ lib/librte_cryptodev/rte_cryptodev_pmd.h | 543 +++++++++++ lib/librte_cryptodev/rte_cryptodev_version.map | 34 + 7 files changed, 3774 insertions(+) create mode 100644 lib/librte_cryptodev/Makefile create mode 100644 lib/librte_cryptodev/rte_crypto.h create mode 100644 lib/librte_cryptodev/rte_crypto_sym.h create mode 100644 lib/librte_cryptodev/rte_cryptodev.c create mode 100644 lib/librte_cryptodev/rte_cryptodev.h create mode 100644 lib/librte_cryptodev/rte_cryptodev_pmd.h create mode 100644 lib/librte_cryptodev/rte_cryptodev_version.map (limited to 'lib/librte_cryptodev') diff --git a/lib/librte_cryptodev/Makefile b/lib/librte_cryptodev/Makefile new file mode 100644 index 00000000..314a0466 --- /dev/null +++ b/lib/librte_cryptodev/Makefile @@ -0,0 +1,62 @@ +# BSD LICENSE +# +# Copyright(c) 2015 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_cryptodev.a + +# library version +LIBABIVER := 1 + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# library source files +SRCS-y += rte_cryptodev.c + +# export include files +SYMLINK-y-include += rte_crypto.h +SYMLINK-y-include += rte_crypto_sym.h +SYMLINK-y-include += rte_cryptodev.h +SYMLINK-y-include += rte_cryptodev_pmd.h + +# versioning export map +EXPORT_MAP := rte_cryptodev_version.map + +# library dependencies +DEPDIRS-y += lib/librte_eal +DEPDIRS-y += lib/librte_mempool +DEPDIRS-y += lib/librte_ring +DEPDIRS-y += lib/librte_mbuf +DEPDIRS-y += lib/librte_kvargs + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h new file mode 100644 index 00000000..5bc3eaa7 --- /dev/null +++ b/lib/librte_cryptodev/rte_crypto.h @@ -0,0 +1,415 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_CRYPTO_H_ +#define _RTE_CRYPTO_H_ + +/** + * @file rte_crypto.h + * + * RTE Cryptography Common Definitions + * + */ + +#ifdef __cplusplus +extern "C" { +#endif + + +#include +#include +#include + +#include "rte_crypto_sym.h" + +/** Crypto operation types */ +enum rte_crypto_op_type { + RTE_CRYPTO_OP_TYPE_UNDEFINED, + /**< Undefined operation type */ + RTE_CRYPTO_OP_TYPE_SYMMETRIC, + /**< Symmetric operation */ +}; + +/** Status of crypto operation */ +enum rte_crypto_op_status { + RTE_CRYPTO_OP_STATUS_SUCCESS, + /**< Operation completed successfully */ + RTE_CRYPTO_OP_STATUS_NOT_PROCESSED, + /**< Operation has not yet been processed by a crypto device */ + RTE_CRYPTO_OP_STATUS_ENQUEUED, + /**< Operation is enqueued on device */ + RTE_CRYPTO_OP_STATUS_AUTH_FAILED, + /**< Authentication verification failed */ + RTE_CRYPTO_OP_STATUS_INVALID_SESSION, + /**< + * Symmetric operation failed due to invalid session arguments, or if + * in session-less mode, failed to allocate private operation material. + */ + RTE_CRYPTO_OP_STATUS_INVALID_ARGS, + /**< Operation failed due to invalid arguments in request */ + RTE_CRYPTO_OP_STATUS_ERROR, + /**< Error handling operation */ +}; + +/** + * Cryptographic Operation. + * + * This structure contains data relating to performing cryptographic + * operations. This operation structure is used to contain any operation which + * is supported by the cryptodev API, PMDs should check the type parameter to + * verify that the operation is a support function of the device. Crypto + * operations are enqueued and dequeued in crypto PMDs using the + * rte_cryptodev_enqueue_burst() / rte_cryptodev_dequeue_burst() . + */ +struct rte_crypto_op { + enum rte_crypto_op_type type; + /**< operation type */ + + enum rte_crypto_op_status status; + /**< + * operation status - this is reset to + * RTE_CRYPTO_OP_STATUS_NOT_PROCESSED on allocation from mempool and + * will be set to RTE_CRYPTO_OP_STATUS_SUCCESS after crypto operation + * is successfully processed by a crypto PMD + */ + + struct rte_mempool *mempool; + /**< crypto operation mempool which operation is allocated from */ + + phys_addr_t phys_addr; + /**< physical address of crypto operation */ + + void *opaque_data; + /**< Opaque pointer for user data */ + + union { + struct rte_crypto_sym_op *sym; + /**< Symmetric operation parameters */ + }; /**< operation specific parameters */ +} __rte_cache_aligned; + +/** + * Reset the fields of a crypto operation to their default values. + * + * @param op The crypto operation to be reset. + * @param type The crypto operation type. + */ +static inline void +__rte_crypto_op_reset(struct rte_crypto_op *op, enum rte_crypto_op_type type) +{ + op->type = type; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + switch (type) { + case RTE_CRYPTO_OP_TYPE_SYMMETRIC: + /** Symmetric operation structure starts after the end of the + * rte_crypto_op structure. + */ + op->sym = (struct rte_crypto_sym_op *)(op + 1); + op->type = type; + + __rte_crypto_sym_op_reset(op->sym); + break; + default: + break; + } + + op->opaque_data = NULL; +} + +/** + * Private data structure belonging to a crypto symmetric operation pool. + */ +struct rte_crypto_op_pool_private { + enum rte_crypto_op_type type; + /**< Crypto op pool type operation. */ + uint16_t priv_size; + /**< Size of private area in each crypto operation. */ +}; + + +/** + * Returns the size of private data allocated with each rte_crypto_op object by + * the mempool + * + * @param mempool rte_crypto_op mempool + * + * @return private data size + */ +static inline uint16_t +__rte_crypto_op_get_priv_data_size(struct rte_mempool *mempool) +{ + struct rte_crypto_op_pool_private *priv = + (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool); + + return priv->priv_size; +} + + +/** + * Creates a crypto operation pool + * + * @param name pool name + * @param type crypto operation type, use + * RTE_CRYPTO_OP_TYPE_UNDEFINED for a pool which + * supports all operation types + * @param nb_elts number of elements in pool + * @param cache_size Number of elements to cache on lcore, see + * *rte_mempool_create* for further details about + * cache size + * @param priv_size Size of private data to allocate with each + * operation + * @param socket_id Socket to allocate memory on + * + * @return + * - On success pointer to mempool + * - On failure NULL + */ +extern struct rte_mempool * +rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, + unsigned nb_elts, unsigned cache_size, uint16_t priv_size, + int socket_id); + +/** + * Bulk allocate raw element from mempool and return as crypto operations + * + * @param mempool crypto operation mempool. + * @param type crypto operation type. + * @param ops Array to place allocated crypto operations + * @param nb_ops Number of crypto operations to allocate + * + * @returns + * - On success returns number of ops allocated + */ +static inline int +__rte_crypto_op_raw_bulk_alloc(struct rte_mempool *mempool, + enum rte_crypto_op_type type, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct rte_crypto_op_pool_private *priv; + + priv = (struct rte_crypto_op_pool_private *) rte_mempool_get_priv(mempool); + if (unlikely(priv->type != type && + priv->type != RTE_CRYPTO_OP_TYPE_UNDEFINED)) + return -EINVAL; + + if (rte_mempool_get_bulk(mempool, (void **)ops, nb_ops) == 0) + return nb_ops; + + return 0; +} + +/** + * Allocate a crypto operation from a mempool with default parameters set + * + * @param mempool crypto operation mempool + * @param type operation type to allocate + * + * @returns + * - On success returns a valid rte_crypto_op structure + * - On failure returns NULL + */ +static inline struct rte_crypto_op * +rte_crypto_op_alloc(struct rte_mempool *mempool, enum rte_crypto_op_type type) +{ + struct rte_crypto_op *op = NULL; + int retval; + + retval = __rte_crypto_op_raw_bulk_alloc(mempool, type, &op, 1); + if (unlikely(retval != 1)) + return NULL; + + __rte_crypto_op_reset(op, type); + + return op; +} + + +/** + * Bulk allocate crypto operations from a mempool with default parameters set + * + * @param mempool crypto operation mempool + * @param type operation type to allocate + * @param ops Array to place allocated crypto operations + * @param nb_ops Number of crypto operations to allocate + * + * @returns + * - On success returns a valid rte_crypto_op structure + * - On failure returns NULL + */ + +static inline unsigned +rte_crypto_op_bulk_alloc(struct rte_mempool *mempool, + enum rte_crypto_op_type type, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + int i; + + if (unlikely(__rte_crypto_op_raw_bulk_alloc(mempool, type, ops, nb_ops) + != nb_ops)) + return 0; + + for (i = 0; i < nb_ops; i++) + __rte_crypto_op_reset(ops[i], type); + + return nb_ops; +} + + + +/** + * Returns a pointer to the private data of a crypto operation if + * that operation has enough capacity for requested size. + * + * @param op crypto operation. + * @param size size of space requested in private data. + * + * @returns + * - if sufficient space available returns pointer to start of private data + * - if insufficient space returns NULL + */ +static inline void * +__rte_crypto_op_get_priv_data(struct rte_crypto_op *op, uint32_t size) +{ + uint32_t priv_size; + + if (likely(op->mempool != NULL)) { + priv_size = __rte_crypto_op_get_priv_data_size(op->mempool); + + if (likely(priv_size >= size)) + return (void *)((uint8_t *)(op + 1) + + sizeof(struct rte_crypto_sym_op)); + } + + return NULL; +} + +/** + * free crypto operation structure + * If operation has been allocate from a rte_mempool, then the operation will + * be returned to the mempool. + * + * @param op symmetric crypto operation + */ +static inline void +rte_crypto_op_free(struct rte_crypto_op *op) +{ + if (op != NULL && op->mempool != NULL) + rte_mempool_put(op->mempool, op); +} + +/** + * Allocate a symmetric crypto operation in the private data of an mbuf. + * + * @param m mbuf which is associated with the crypto operation, the + * operation will be allocated in the private data of that + * mbuf. + * + * @returns + * - On success returns a pointer to the crypto operation. + * - On failure returns NULL. + */ +static inline struct rte_crypto_op * +rte_crypto_sym_op_alloc_from_mbuf_priv_data(struct rte_mbuf *m) +{ + if (unlikely(m == NULL)) + return NULL; + + /* + * check that the mbuf's private data size is sufficient to contain a + * crypto operation + */ + if (unlikely(m->priv_size < (sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op)))) + return NULL; + + /* private data starts immediately after the mbuf header in the mbuf. */ + struct rte_crypto_op *op = (struct rte_crypto_op *)(m + 1); + + __rte_crypto_op_reset(op, RTE_CRYPTO_OP_TYPE_SYMMETRIC); + + op->mempool = NULL; + op->sym->m_src = m; + + return op; +} + +/** + * Allocate space for symmetric crypto xforms in the private data space of the + * crypto operation. This also defaults the crypto xform type and configures + * the chaining of the xforms in the crypto operation + * + * @return + * - On success returns pointer to first crypto xform in crypto operations chain + * - On failure returns NULL + */ +static inline struct rte_crypto_sym_xform * +rte_crypto_op_sym_xforms_alloc(struct rte_crypto_op *op, uint8_t nb_xforms) +{ + void *priv_data; + uint32_t size; + + if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) + return NULL; + + size = sizeof(struct rte_crypto_sym_xform) * nb_xforms; + + priv_data = __rte_crypto_op_get_priv_data(op, size); + if (priv_data == NULL) + return NULL; + + return __rte_crypto_sym_op_sym_xforms_alloc(op->sym, priv_data, + nb_xforms); +} + + +/** + * Attach a session to a crypto operation + * + * @param op crypto operation, must be of type symmetric + * @param sess cryptodev session + */ +static inline int +rte_crypto_op_attach_sym_session(struct rte_crypto_op *op, + struct rte_cryptodev_sym_session *sess) +{ + if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) + return -1; + + return __rte_crypto_sym_op_attach_sym_session(op->sym, sess); +} + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CRYPTO_H_ */ diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h new file mode 100644 index 00000000..4ae9b9e8 --- /dev/null +++ b/lib/librte_cryptodev/rte_crypto_sym.h @@ -0,0 +1,662 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_CRYPTO_SYM_H_ +#define _RTE_CRYPTO_SYM_H_ + +/** + * @file rte_crypto_sym.h + * + * RTE Definitions for Symmetric Cryptography + * + * Defines symmetric cipher and authentication algorithms and modes, as well + * as supported symmetric crypto operation combinations. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +#include +#include + + +/** Symmetric Cipher Algorithms */ +enum rte_crypto_cipher_algorithm { + RTE_CRYPTO_CIPHER_NULL = 1, + /**< NULL cipher algorithm. No mode applies to the NULL algorithm. */ + + RTE_CRYPTO_CIPHER_3DES_CBC, + /**< Triple DES algorithm in CBC mode */ + RTE_CRYPTO_CIPHER_3DES_CTR, + /**< Triple DES algorithm in CTR mode */ + RTE_CRYPTO_CIPHER_3DES_ECB, + /**< Triple DES algorithm in ECB mode */ + + RTE_CRYPTO_CIPHER_AES_CBC, + /**< AES algorithm in CBC mode */ + RTE_CRYPTO_CIPHER_AES_CCM, + /**< AES algorithm in CCM mode. When this cipher algorithm is used the + * *RTE_CRYPTO_AUTH_AES_CCM* element of the + * *rte_crypto_hash_algorithm* enum MUST be used to set up the related + * *rte_crypto_auth_xform* structure in the session context or in + * the op_params of the crypto operation structure in the case of a + * session-less crypto operation + */ + RTE_CRYPTO_CIPHER_AES_CTR, + /**< AES algorithm in Counter mode */ + RTE_CRYPTO_CIPHER_AES_ECB, + /**< AES algorithm in ECB mode */ + RTE_CRYPTO_CIPHER_AES_F8, + /**< AES algorithm in F8 mode */ + RTE_CRYPTO_CIPHER_AES_GCM, + /**< AES algorithm in GCM mode. When this cipher algorithm is used the + * *RTE_CRYPTO_AUTH_AES_GCM* element of the + * *rte_crypto_auth_algorithm* enum MUST be used to set up the related + * *rte_crypto_auth_setup_data* structure in the session context or in + * the op_params of the crypto operation structure in the case of a + * session-less crypto operation. + */ + RTE_CRYPTO_CIPHER_AES_XTS, + /**< AES algorithm in XTS mode */ + + RTE_CRYPTO_CIPHER_ARC4, + /**< (A)RC4 cipher algorithm */ + + RTE_CRYPTO_CIPHER_KASUMI_F8, + /**< Kasumi algorithm in F8 mode */ + + RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + /**< SNOW3G algorithm in UEA2 mode */ + + RTE_CRYPTO_CIPHER_ZUC_EEA3, + /**< ZUC algorithm in EEA3 mode */ + + RTE_CRYPTO_CIPHER_LIST_END +}; + +/** Symmetric Cipher Direction */ +enum rte_crypto_cipher_operation { + RTE_CRYPTO_CIPHER_OP_ENCRYPT, + /**< Encrypt cipher operation */ + RTE_CRYPTO_CIPHER_OP_DECRYPT + /**< Decrypt cipher operation */ +}; + +/** + * Symmetric Cipher Setup Data. + * + * This structure contains data relating to Cipher (Encryption and Decryption) + * use to create a session. + */ +struct rte_crypto_cipher_xform { + enum rte_crypto_cipher_operation op; + /**< This parameter determines if the cipher operation is an encrypt or + * a decrypt operation. For the RC4 algorithm and the F8/CTR modes, + * only encrypt operations are valid. + */ + enum rte_crypto_cipher_algorithm algo; + /**< Cipher algorithm */ + + struct { + uint8_t *data; /**< pointer to key data */ + size_t length; /**< key length in bytes */ + } key; + /**< Cipher key + * + * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.data will + * point to a concatenation of the AES encryption key followed by a + * keymask. As per RFC3711, the keymask should be padded with trailing + * bytes to match the length of the encryption key used. + * + * For AES-XTS mode of operation, two keys must be provided and + * key.data must point to the two keys concatenated together (Key1 || + * Key2). The cipher key length will contain the total size of both + * keys. + * + * Cipher key length is in bytes. For AES it can be 128 bits (16 bytes), + * 192 bits (24 bytes) or 256 bits (32 bytes). + * + * For the CCM mode of operation, the only supported key length is 128 + * bits (16 bytes). + * + * For the RTE_CRYPTO_CIPHER_AES_F8 mode of operation, key.length + * should be set to the combined length of the encryption key and the + * keymask. Since the keymask and the encryption key are the same size, + * key.length should be set to 2 x the AES encryption key length. + * + * For the AES-XTS mode of operation: + * - Two keys must be provided and key.length refers to total length of + * the two keys. + * - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes). + * - Both keys must have the same size. + **/ +}; + +/** Symmetric Authentication / Hash Algorithms */ +enum rte_crypto_auth_algorithm { + RTE_CRYPTO_AUTH_NULL = 1, + /**< NULL hash algorithm. */ + + RTE_CRYPTO_AUTH_AES_CBC_MAC, + /**< AES-CBC-MAC algorithm. Only 128-bit keys are supported. */ + RTE_CRYPTO_AUTH_AES_CCM, + /**< AES algorithm in CCM mode. This is an authenticated cipher. When + * this hash algorithm is used, the *RTE_CRYPTO_CIPHER_AES_CCM* + * element of the *rte_crypto_cipher_algorithm* enum MUST be used to + * set up the related rte_crypto_cipher_setup_data structure in the + * session context or the corresponding parameter in the crypto + * operation data structures op_params parameter MUST be set for a + * session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_CMAC, + /**< AES CMAC algorithm. */ + RTE_CRYPTO_AUTH_AES_GCM, + /**< AES algorithm in GCM mode. When this hash algorithm + * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the + * rte_crypto_cipher_algorithm enum MUST be used to set up the related + * rte_crypto_cipher_setup_data structure in the session context, or + * the corresponding parameter in the crypto operation data structures + * op_params parameter MUST be set for a session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_GMAC, + /**< AES GMAC algorithm. When this hash algorithm + * is used, the RTE_CRYPTO_CIPHER_AES_GCM element of the + * rte_crypto_cipher_algorithm enum MUST be used to set up the related + * rte_crypto_cipher_setup_data structure in the session context, or + * the corresponding parameter in the crypto operation data structures + * op_params parameter MUST be set for a session-less crypto operation. + */ + RTE_CRYPTO_AUTH_AES_XCBC_MAC, + /**< AES XCBC algorithm. */ + + RTE_CRYPTO_AUTH_KASUMI_F9, + /**< Kasumi algorithm in F9 mode. */ + + RTE_CRYPTO_AUTH_MD5, + /**< MD5 algorithm */ + RTE_CRYPTO_AUTH_MD5_HMAC, + /**< HMAC using MD5 algorithm */ + + RTE_CRYPTO_AUTH_SHA1, + /**< 128 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA1_HMAC, + /**< HMAC using 128 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA224, + /**< 224 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA224_HMAC, + /**< HMAC using 224 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA256, + /**< 256 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA256_HMAC, + /**< HMAC using 256 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA384, + /**< 384 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA384_HMAC, + /**< HMAC using 384 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA512, + /**< 512 bit SHA algorithm. */ + RTE_CRYPTO_AUTH_SHA512_HMAC, + /**< HMAC using 512 bit SHA algorithm. */ + + RTE_CRYPTO_AUTH_SNOW3G_UIA2, + /**< SNOW3G algorithm in UIA2 mode. */ + + RTE_CRYPTO_AUTH_ZUC_EIA3, + /**< ZUC algorithm in EIA3 mode */ + + RTE_CRYPTO_AUTH_LIST_END +}; + +/** Symmetric Authentication / Hash Operations */ +enum rte_crypto_auth_operation { + RTE_CRYPTO_AUTH_OP_VERIFY, /**< Verify authentication digest */ + RTE_CRYPTO_AUTH_OP_GENERATE /**< Generate authentication digest */ +}; + +/** + * Authentication / Hash transform data. + * + * This structure contains data relating to an authentication/hash crypto + * transforms. The fields op, algo and digest_length are common to all + * authentication transforms and MUST be set. + */ +struct rte_crypto_auth_xform { + enum rte_crypto_auth_operation op; + /**< Authentication operation type */ + enum rte_crypto_auth_algorithm algo; + /**< Authentication algorithm selection */ + + struct { + uint8_t *data; /**< pointer to key data */ + size_t length; /**< key length in bytes */ + } key; + /**< Authentication key data. + * The authentication key length MUST be less than or equal to the + * block size of the algorithm. It is the callers responsibility to + * ensure that the key length is compliant with the standard being used + * (for example RFC 2104, FIPS 198a). + */ + + uint32_t digest_length; + /**< Length of the digest to be returned. If the verify option is set, + * this specifies the length of the digest to be compared for the + * session. + * + * If the value is less than the maximum length allowed by the hash, + * the result shall be truncated. If the value is greater than the + * maximum length allowed by the hash then an error will be generated + * by *rte_cryptodev_sym_session_create* or by the + * *rte_cryptodev_sym_enqueue_burst* if using session-less APIs. + */ + + uint32_t add_auth_data_length; + /**< The length of the additional authenticated data (AAD) in bytes. + * The maximum permitted value is 240 bytes, unless otherwise specified + * below. + * + * This field must be specified when the hash algorithm is one of the + * following: + * + * - For SNOW3G (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2), this is the + * length of the IV (which should be 16). + * + * - For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM). In this case, this is + * the length of the Additional Authenticated Data (called A, in NIST + * SP800-38D). + * + * - For CCM (@ref RTE_CRYPTO_AUTH_AES_CCM). In this case, this is + * the length of the associated data (called A, in NIST SP800-38C). + * Note that this does NOT include the length of any padding, or the + * 18 bytes reserved at the start of the above field to store the + * block B0 and the encoded length. The maximum permitted value in + * this case is 222 bytes. + * + * @note + * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of operation + * this field is not used and should be set to 0. Instead the length + * of the AAD data is specified in the message length to hash field of + * the rte_crypto_sym_op_data structure. + */ +}; + +/** Crypto transformation types */ +enum rte_crypto_sym_xform_type { + RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED = 0, /**< No xform specified */ + RTE_CRYPTO_SYM_XFORM_AUTH, /**< Authentication xform */ + RTE_CRYPTO_SYM_XFORM_CIPHER /**< Cipher xform */ +}; + +/** + * Symmetric crypto transform structure. + * + * This is used to specify the crypto transforms required, multiple transforms + * can be chained together to specify a chain transforms such as authentication + * then cipher, or cipher then authentication. Each transform structure can + * hold a single transform, the type field is used to specify which transform + * is contained within the union + */ +struct rte_crypto_sym_xform { + struct rte_crypto_sym_xform *next; + /**< next xform in chain */ + enum rte_crypto_sym_xform_type type + ; /**< xform type */ + union { + struct rte_crypto_auth_xform auth; + /**< Authentication / hash xform */ + struct rte_crypto_cipher_xform cipher; + /**< Cipher xform */ + }; +}; + +/** + * Crypto operation session type. This is used to specify whether a crypto + * operation has session structure attached for immutable parameters or if all + * operation information is included in the operation data structure. + */ +enum rte_crypto_sym_op_sess_type { + RTE_CRYPTO_SYM_OP_WITH_SESSION, /**< Session based crypto operation */ + RTE_CRYPTO_SYM_OP_SESSIONLESS /**< Session-less crypto operation */ +}; + + +struct rte_cryptodev_sym_session; + +/** + * Symmetric Cryptographic Operation. + * + * This structure contains data relating to performing symmetric cryptographic + * processing on a referenced mbuf data buffer. + * + * When a symmetric crypto operation is enqueued with the device for processing + * it must have a valid *rte_mbuf* structure attached, via m_src parameter, + * which contains the source data which the crypto operation is to be performed + * on. + */ +struct rte_crypto_sym_op { + struct rte_mbuf *m_src; /**< source mbuf */ + struct rte_mbuf *m_dst; /**< destination mbuf */ + + enum rte_crypto_sym_op_sess_type sess_type; + + union { + struct rte_cryptodev_sym_session *session; + /**< Handle for the initialised session context */ + struct rte_crypto_sym_xform *xform; + /**< Session-less API crypto operation parameters */ + }; + + struct { + struct { + uint32_t offset; + /**< Starting point for cipher processing, specified + * as number of bytes from start of data in the source + * buffer. The result of the cipher operation will be + * written back into the output buffer starting at + * this location. + * + * @note + * For Snow3G @ RTE_CRYPTO_CIPHER_SNOW3G_UEA2, + * this field should be in bits. + */ + + uint32_t length; + /**< The message length, in bytes, of the source buffer + * on which the cryptographic operation will be + * computed. This must be a multiple of the block size + * if a block cipher is being used. This is also the + * same as the result length. + * + * @note + * In the case of CCM @ref RTE_CRYPTO_AUTH_AES_CCM, + * this value should not include the length of the + * padding or the length of the MAC; the driver will + * compute the actual number of bytes over which the + * encryption will occur, which will include these + * values. + * + * @note + * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC, this + * field should be set to 0. + * + * @note + * For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UEA2 + * this field should be in bits. + */ + } data; /**< Data offsets and length for ciphering */ + + struct { + uint8_t *data; + /**< Initialisation Vector or Counter. + * + * - For block ciphers in CBC or F8 mode, or for Kasumi + * in F8 mode, or for SNOW3G in UEA2 mode, this is the + * Initialisation Vector (IV) value. + * + * - For block ciphers in CTR mode, this is the counter. + * + * - For GCM mode, this is either the IV (if the length + * is 96 bits) or J0 (for other sizes), where J0 is as + * defined by NIST SP800-38D. Regardless of the IV + * length, a full 16 bytes needs to be allocated. + * + * - For CCM mode, the first byte is reserved, and the + * nonce should be written starting at &iv[1] (to allow + * space for the implementation to write in the flags + * in the first byte). Note that a full 16 bytes should + * be allocated, even though the length field will + * have a value less than this. + * + * - For AES-XTS, this is the 128bit tweak, i, from + * IEEE Std 1619-2007. + * + * For optimum performance, the data pointed to SHOULD + * be 8-byte aligned. + */ + phys_addr_t phys_addr; + uint16_t length; + /**< Length of valid IV data. + * + * - For block ciphers in CBC or F8 mode, or for Kasumi + * in F8 mode, or for SNOW3G in UEA2 mode, this is the + * length of the IV (which must be the same as the + * block length of the cipher). + * + * - For block ciphers in CTR mode, this is the length + * of the counter (which must be the same as the block + * length of the cipher). + * + * - For GCM mode, this is either 12 (for 96-bit IVs) + * or 16, in which case data points to J0. + * + * - For CCM mode, this is the length of the nonce, + * which can be in the range 7 to 13 inclusive. + */ + } iv; /**< Initialisation vector parameters */ + } cipher; + + struct { + struct { + uint32_t offset; + /**< Starting point for hash processing, specified as + * number of bytes from start of packet in source + * buffer. + * + * @note + * For CCM and GCM modes of operation, this field is + * ignored. The field @ref aad field + * should be set instead. + * + * @note For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) + * mode of operation, this field specifies the start + * of the AAD data in the source buffer. + * + * @note + * For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2 + * this field should be in bits. + */ + + uint32_t length; + /**< The message length, in bytes, of the source + * buffer that the hash will be computed on. + * + * @note + * For CCM and GCM modes of operation, this field is + * ignored. The field @ref aad field should be set + * instead. + * + * @note + * For AES-GMAC @ref RTE_CRYPTO_AUTH_AES_GMAC mode + * of operation, this field specifies the length of + * the AAD data in the source buffer. + * + * @note + * For Snow3G @ RTE_CRYPTO_AUTH_SNOW3G_UIA2 + * this field should be in bits. + */ + } data; /**< Data offsets and length for authentication */ + + struct { + uint8_t *data; + /**< If this member of this structure is set this is a + * pointer to the location where the digest result + * should be inserted (in the case of digest generation) + * or where the purported digest exists (in the case of + * digest verification). + * + * At session creation time, the client specified the + * digest result length with the digest_length member + * of the @ref rte_crypto_auth_xform structure. For + * physical crypto devices the caller must allocate at + * least digest_length of physically contiguous memory + * at this location. + * + * For digest generation, the digest result will + * overwrite any data at this location. + * + * @note + * For GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), for + * "digest result" read "authentication tag T". + * + * If this member is not set the digest result is + * understood to be in the destination buffer for + * digest generation, and in the source buffer for + * digest verification. The location of the digest + * result in this case is immediately following the + * region over which the digest is computed. + */ + phys_addr_t phys_addr; + /**< Physical address of digest */ + uint16_t length; + /**< Length of digest */ + } digest; /**< Digest parameters */ + + struct { + uint8_t *data; + /**< Pointer to Additional Authenticated Data (AAD) + * needed for authenticated cipher mechanisms (CCM and + * GCM), and to the IV for SNOW3G authentication + * (@ref RTE_CRYPTO_AUTH_SNOW3G_UIA2). For other + * authentication mechanisms this pointer is ignored. + * + * The length of the data pointed to by this field is + * set up for the session in the @ref + * rte_crypto_auth_xform structure as part of the @ref + * rte_cryptodev_sym_session_create function call. + * This length must not exceed 240 bytes. + * + * Specifically for CCM (@ref RTE_CRYPTO_AUTH_AES_CCM), + * the caller should setup this field as follows: + * + * - the nonce should be written starting at an offset + * of one byte into the array, leaving room for the + * implementation to write in the flags to the first + * byte. + * + * - the additional authentication data itself should + * be written starting at an offset of 18 bytes into + * the array, leaving room for the length encoding in + * the first two bytes of the second block. + * + * - the array should be big enough to hold the above + * fields, plus any padding to round this up to the + * nearest multiple of the block size (16 bytes). + * Padding will be added by the implementation. + * + * Finally, for GCM (@ref RTE_CRYPTO_AUTH_AES_GCM), the + * caller should setup this field as follows: + * + * - the AAD is written in starting at byte 0 + * - the array must be big enough to hold the AAD, plus + * any space to round this up to the nearest multiple + * of the block size (16 bytes). + * + * @note + * For AES-GMAC (@ref RTE_CRYPTO_AUTH_AES_GMAC) mode of + * operation, this field is not used and should be set + * to 0. Instead the AAD data should be placed in the + * source buffer. + */ + phys_addr_t phys_addr; /**< physical address */ + uint16_t length; /**< Length of digest */ + } aad; + /**< Additional authentication parameters */ + } auth; +} __rte_cache_aligned; + + +/** + * Reset the fields of a symmetric operation to their default values. + * + * @param op The crypto operation to be reset. + */ +static inline void +__rte_crypto_sym_op_reset(struct rte_crypto_sym_op *op) +{ + memset(op, 0, sizeof(*op)); + + op->sess_type = RTE_CRYPTO_SYM_OP_SESSIONLESS; +} + + +/** + * Allocate space for symmetric crypto xforms in the private data space of the + * crypto operation. This also defaults the crypto xform type to + * RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED and configures the chaining of the xforms + * in the crypto operation + * + * @return + * - On success returns pointer to first crypto xform in crypto operations chain + * - On failure returns NULL + */ +static inline struct rte_crypto_sym_xform * +__rte_crypto_sym_op_sym_xforms_alloc(struct rte_crypto_sym_op *sym_op, + void *priv_data, uint8_t nb_xforms) +{ + struct rte_crypto_sym_xform *xform; + + sym_op->xform = xform = (struct rte_crypto_sym_xform *)priv_data; + + do { + xform->type = RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED; + xform = xform->next = --nb_xforms > 0 ? xform + 1 : NULL; + } while (xform); + + return sym_op->xform; +} + + +/** + * Attach a session to a symmetric crypto operation + * + * @param sym_op crypto operation + * @param sess cryptodev session + */ +static inline int +__rte_crypto_sym_op_attach_sym_session(struct rte_crypto_sym_op *sym_op, + struct rte_cryptodev_sym_session *sess) +{ + sym_op->session = sess; + sym_op->sess_type = RTE_CRYPTO_SYM_OP_WITH_SESSION; + + return 0; +} + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CRYPTO_SYM_H_ */ diff --git a/lib/librte_cryptodev/rte_cryptodev.c b/lib/librte_cryptodev/rte_cryptodev.c new file mode 100644 index 00000000..aa4ea425 --- /dev/null +++ b/lib/librte_cryptodev/rte_cryptodev.c @@ -0,0 +1,1162 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rte_crypto.h" +#include "rte_cryptodev.h" +#include "rte_cryptodev_pmd.h" + +struct rte_cryptodev rte_crypto_devices[RTE_CRYPTO_MAX_DEVS]; + +struct rte_cryptodev *rte_cryptodevs = &rte_crypto_devices[0]; + +static struct rte_cryptodev_global cryptodev_globals = { + .devs = &rte_crypto_devices[0], + .data = { NULL }, + .nb_devs = 0, + .max_devs = RTE_CRYPTO_MAX_DEVS +}; + +struct rte_cryptodev_global *rte_cryptodev_globals = &cryptodev_globals; + +/* spinlock for crypto device callbacks */ +static rte_spinlock_t rte_cryptodev_cb_lock = RTE_SPINLOCK_INITIALIZER; + + +/** + * The user application callback description. + * + * It contains callback address to be registered by user application, + * the pointer to the parameters for callback, and the event type. + */ +struct rte_cryptodev_callback { + TAILQ_ENTRY(rte_cryptodev_callback) next; /**< Callbacks list */ + rte_cryptodev_cb_fn cb_fn; /**< Callback address */ + void *cb_arg; /**< Parameter for callback */ + enum rte_cryptodev_event_type event; /**< Interrupt event type */ + uint32_t active; /**< Callback is executing */ +}; + + +const char * +rte_cryptodev_get_feature_name(uint64_t flag) +{ + switch (flag) { + case RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO: + return "SYMMETRIC_CRYPTO"; + case RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO: + return "ASYMMETRIC_CRYPTO"; + case RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING: + return "SYM_OPERATION_CHAINING"; + case RTE_CRYPTODEV_FF_CPU_SSE: + return "CPU_SSE"; + case RTE_CRYPTODEV_FF_CPU_AVX: + return "CPU_AVX"; + case RTE_CRYPTODEV_FF_CPU_AVX2: + return "CPU_AVX2"; + case RTE_CRYPTODEV_FF_CPU_AESNI: + return "CPU_AESNI"; + case RTE_CRYPTODEV_FF_HW_ACCELERATED: + return "HW_ACCELERATED"; + + default: + return NULL; + } +} + + +int +rte_cryptodev_create_vdev(const char *name, const char *args) +{ + return rte_eal_vdev_init(name, args); +} + +int +rte_cryptodev_get_dev_id(const char *name) { + unsigned i; + + if (name == NULL) + return -1; + + for (i = 0; i < rte_cryptodev_globals->max_devs; i++) + if ((strcmp(rte_cryptodev_globals->devs[i].data->name, name) + == 0) && + (rte_cryptodev_globals->devs[i].attached == + RTE_CRYPTODEV_ATTACHED)) + return i; + + return -1; +} + +uint8_t +rte_cryptodev_count(void) +{ + return rte_cryptodev_globals->nb_devs; +} + +uint8_t +rte_cryptodev_count_devtype(enum rte_cryptodev_type type) +{ + uint8_t i, dev_count = 0; + + for (i = 0; i < rte_cryptodev_globals->max_devs; i++) + if (rte_cryptodev_globals->devs[i].dev_type == type && + rte_cryptodev_globals->devs[i].attached == + RTE_CRYPTODEV_ATTACHED) + dev_count++; + + return dev_count; +} + +int +rte_cryptodev_socket_id(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) + return -1; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + + return dev->data->socket_id; +} + +static inline int +rte_cryptodev_data_alloc(uint8_t dev_id, struct rte_cryptodev_data **data, + int socket_id) +{ + char mz_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + const struct rte_memzone *mz; + int n; + + /* generate memzone name */ + n = snprintf(mz_name, sizeof(mz_name), "rte_cryptodev_data_%u", dev_id); + if (n >= (int)sizeof(mz_name)) + return -EINVAL; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + mz = rte_memzone_reserve(mz_name, + sizeof(struct rte_cryptodev_data), + socket_id, 0); + } else + mz = rte_memzone_lookup(mz_name); + + if (mz == NULL) + return -ENOMEM; + + *data = mz->addr; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + memset(*data, 0, sizeof(struct rte_cryptodev_data)); + + return 0; +} + +static uint8_t +rte_cryptodev_find_free_device_index(void) +{ + uint8_t dev_id; + + for (dev_id = 0; dev_id < RTE_CRYPTO_MAX_DEVS; dev_id++) { + if (rte_crypto_devices[dev_id].attached == + RTE_CRYPTODEV_DETACHED) + return dev_id; + } + return RTE_CRYPTO_MAX_DEVS; +} + +struct rte_cryptodev * +rte_cryptodev_pmd_allocate(const char *name, enum pmd_type type, int socket_id) +{ + struct rte_cryptodev *cryptodev; + uint8_t dev_id; + + if (rte_cryptodev_pmd_get_named_dev(name) != NULL) { + CDEV_LOG_ERR("Crypto device with name %s already " + "allocated!", name); + return NULL; + } + + dev_id = rte_cryptodev_find_free_device_index(); + if (dev_id == RTE_CRYPTO_MAX_DEVS) { + CDEV_LOG_ERR("Reached maximum number of crypto devices"); + return NULL; + } + + cryptodev = rte_cryptodev_pmd_get_dev(dev_id); + + if (cryptodev->data == NULL) { + struct rte_cryptodev_data *cryptodev_data = + cryptodev_globals.data[dev_id]; + + int retval = rte_cryptodev_data_alloc(dev_id, &cryptodev_data, + socket_id); + + if (retval < 0 || cryptodev_data == NULL) + return NULL; + + cryptodev->data = cryptodev_data; + + snprintf(cryptodev->data->name, RTE_CRYPTODEV_NAME_MAX_LEN, + "%s", name); + + cryptodev->data->dev_id = dev_id; + cryptodev->data->socket_id = socket_id; + cryptodev->data->dev_started = 0; + + cryptodev->attached = RTE_CRYPTODEV_ATTACHED; + cryptodev->pmd_type = type; + + cryptodev_globals.nb_devs++; + } + + return cryptodev; +} + +static inline int +rte_cryptodev_create_unique_device_name(char *name, size_t size, + struct rte_pci_device *pci_dev) +{ + int ret; + + if ((name == NULL) || (pci_dev == NULL)) + return -EINVAL; + + ret = snprintf(name, size, "%d:%d.%d", + pci_dev->addr.bus, pci_dev->addr.devid, + pci_dev->addr.function); + if (ret < 0) + return ret; + return 0; +} + +int +rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev) +{ + int ret; + + if (cryptodev == NULL) + return -EINVAL; + + ret = rte_cryptodev_close(cryptodev->data->dev_id); + if (ret < 0) + return ret; + + cryptodev->attached = RTE_CRYPTODEV_DETACHED; + cryptodev_globals.nb_devs--; + return 0; +} + +struct rte_cryptodev * +rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size, + int socket_id) +{ + struct rte_cryptodev *cryptodev; + + /* allocate device structure */ + cryptodev = rte_cryptodev_pmd_allocate(name, PMD_VDEV, socket_id); + if (cryptodev == NULL) + return NULL; + + /* allocate private device structure */ + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + cryptodev->data->dev_private = + rte_zmalloc_socket("cryptodev device private", + dev_private_size, + RTE_CACHE_LINE_SIZE, + socket_id); + + if (cryptodev->data->dev_private == NULL) + rte_panic("Cannot allocate memzone for private device" + " data"); + } + + /* initialise user call-back tail queue */ + TAILQ_INIT(&(cryptodev->link_intr_cbs)); + + return cryptodev; +} + +static int +rte_cryptodev_init(struct rte_pci_driver *pci_drv, + struct rte_pci_device *pci_dev) +{ + struct rte_cryptodev_driver *cryptodrv; + struct rte_cryptodev *cryptodev; + + char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + int retval; + + cryptodrv = (struct rte_cryptodev_driver *)pci_drv; + if (cryptodrv == NULL) + return -ENODEV; + + /* Create unique Crypto device name using PCI address */ + rte_cryptodev_create_unique_device_name(cryptodev_name, + sizeof(cryptodev_name), pci_dev); + + cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, PMD_PDEV, + rte_socket_id()); + if (cryptodev == NULL) + return -ENOMEM; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + cryptodev->data->dev_private = + rte_zmalloc_socket( + "cryptodev private structure", + cryptodrv->dev_private_size, + RTE_CACHE_LINE_SIZE, + rte_socket_id()); + + if (cryptodev->data->dev_private == NULL) + rte_panic("Cannot allocate memzone for private " + "device data"); + } + + cryptodev->pci_dev = pci_dev; + cryptodev->driver = cryptodrv; + + /* init user callbacks */ + TAILQ_INIT(&(cryptodev->link_intr_cbs)); + + /* Invoke PMD device initialization function */ + retval = (*cryptodrv->cryptodev_init)(cryptodrv, cryptodev); + if (retval == 0) + return 0; + + CDEV_LOG_ERR("driver %s: crypto_dev_init(vendor_id=0x%x device_id=0x%x)" + " failed", pci_drv->name, + (unsigned) pci_dev->id.vendor_id, + (unsigned) pci_dev->id.device_id); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(cryptodev->data->dev_private); + + cryptodev->attached = RTE_CRYPTODEV_DETACHED; + cryptodev_globals.nb_devs--; + + return -ENXIO; +} + +static int +rte_cryptodev_uninit(struct rte_pci_device *pci_dev) +{ + const struct rte_cryptodev_driver *cryptodrv; + struct rte_cryptodev *cryptodev; + char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + int ret; + + if (pci_dev == NULL) + return -EINVAL; + + /* Create unique device name using PCI address */ + rte_cryptodev_create_unique_device_name(cryptodev_name, + sizeof(cryptodev_name), pci_dev); + + cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); + if (cryptodev == NULL) + return -ENODEV; + + cryptodrv = (const struct rte_cryptodev_driver *)pci_dev->driver; + if (cryptodrv == NULL) + return -ENODEV; + + /* Invoke PMD device uninit function */ + if (*cryptodrv->cryptodev_uninit) { + ret = (*cryptodrv->cryptodev_uninit)(cryptodrv, cryptodev); + if (ret) + return ret; + } + + /* free crypto device */ + rte_cryptodev_pmd_release_device(cryptodev); + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + rte_free(cryptodev->data->dev_private); + + cryptodev->pci_dev = NULL; + cryptodev->driver = NULL; + cryptodev->data = NULL; + + return 0; +} + +int +rte_cryptodev_pmd_driver_register(struct rte_cryptodev_driver *cryptodrv, + enum pmd_type type) +{ + /* Call crypto device initialization directly if device is virtual */ + if (type == PMD_VDEV) + return rte_cryptodev_init((struct rte_pci_driver *)cryptodrv, + NULL); + + /* + * Register PCI driver for physical device intialisation during + * PCI probing + */ + cryptodrv->pci_drv.devinit = rte_cryptodev_init; + cryptodrv->pci_drv.devuninit = rte_cryptodev_uninit; + + rte_eal_pci_register(&cryptodrv->pci_drv); + + return 0; +} + + +uint16_t +rte_cryptodev_queue_pair_count(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + + dev = &rte_crypto_devices[dev_id]; + return dev->data->nb_queue_pairs; +} + +static int +rte_cryptodev_queue_pairs_config(struct rte_cryptodev *dev, uint16_t nb_qpairs, + int socket_id) +{ + struct rte_cryptodev_info dev_info; + void **qp; + unsigned i; + + if ((dev == NULL) || (nb_qpairs < 1)) { + CDEV_LOG_ERR("invalid param: dev %p, nb_queues %u", + dev, nb_qpairs); + return -EINVAL; + } + + CDEV_LOG_DEBUG("Setup %d queues pairs on device %u", + nb_qpairs, dev->data->dev_id); + + memset(&dev_info, 0, sizeof(struct rte_cryptodev_info)); + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + (*dev->dev_ops->dev_infos_get)(dev, &dev_info); + + if (nb_qpairs > (dev_info.max_nb_queue_pairs)) { + CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u", + nb_qpairs, dev->data->dev_id); + return -EINVAL; + } + + if (dev->data->queue_pairs == NULL) { /* first time configuration */ + dev->data->queue_pairs = rte_zmalloc_socket( + "cryptodev->queue_pairs", + sizeof(dev->data->queue_pairs[0]) * nb_qpairs, + RTE_CACHE_LINE_SIZE, socket_id); + + if (dev->data->queue_pairs == NULL) { + dev->data->nb_queue_pairs = 0; + CDEV_LOG_ERR("failed to get memory for qp meta data, " + "nb_queues %u", + nb_qpairs); + return -(ENOMEM); + } + } else { /* re-configure */ + int ret; + uint16_t old_nb_queues = dev->data->nb_queue_pairs; + + qp = dev->data->queue_pairs; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_release, + -ENOTSUP); + + for (i = nb_qpairs; i < old_nb_queues; i++) { + ret = (*dev->dev_ops->queue_pair_release)(dev, i); + if (ret < 0) + return ret; + } + + qp = rte_realloc(qp, sizeof(qp[0]) * nb_qpairs, + RTE_CACHE_LINE_SIZE); + if (qp == NULL) { + CDEV_LOG_ERR("failed to realloc qp meta data," + " nb_queues %u", nb_qpairs); + return -(ENOMEM); + } + + if (nb_qpairs > old_nb_queues) { + uint16_t new_qs = nb_qpairs - old_nb_queues; + + memset(qp + old_nb_queues, 0, + sizeof(qp[0]) * new_qs); + } + + dev->data->queue_pairs = qp; + + } + dev->data->nb_queue_pairs = nb_qpairs; + return 0; +} + +int +rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + if (queue_pair_id >= dev->data->nb_queue_pairs) { + CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); + return -EINVAL; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_start, -ENOTSUP); + + return dev->dev_ops->queue_pair_start(dev, queue_pair_id); + +} + +int +rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + if (queue_pair_id >= dev->data->nb_queue_pairs) { + CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); + return -EINVAL; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_stop, -ENOTSUP); + + return dev->dev_ops->queue_pair_stop(dev, queue_pair_id); + +} + +static int +rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, + unsigned nb_objs, unsigned obj_cache_size, int socket_id); + +int +rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config) +{ + struct rte_cryptodev *dev; + int diag; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + + if (dev->data->dev_started) { + CDEV_LOG_ERR( + "device %d must be stopped to allow configuration", dev_id); + return -EBUSY; + } + + /* Setup new number of queue pairs and reconfigure device. */ + diag = rte_cryptodev_queue_pairs_config(dev, config->nb_queue_pairs, + config->socket_id); + if (diag != 0) { + CDEV_LOG_ERR("dev%d rte_crypto_dev_queue_pairs_config = %d", + dev_id, diag); + return diag; + } + + /* Setup Session mempool for device */ + return rte_cryptodev_sym_session_pool_create(dev, + config->session_mp.nb_objs, + config->session_mp.cache_size, + config->socket_id); +} + + +int +rte_cryptodev_start(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + int diag; + + CDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id); + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP); + + if (dev->data->dev_started != 0) { + CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already started", + dev_id); + return 0; + } + + diag = (*dev->dev_ops->dev_start)(dev); + if (diag == 0) + dev->data->dev_started = 1; + else + return diag; + + return 0; +} + +void +rte_cryptodev_stop(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return; + } + + dev = &rte_crypto_devices[dev_id]; + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop); + + if (dev->data->dev_started == 0) { + CDEV_LOG_ERR("Device with dev_id=%" PRIu8 " already stopped", + dev_id); + return; + } + + dev->data->dev_started = 0; + (*dev->dev_ops->dev_stop)(dev); +} + +int +rte_cryptodev_close(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + int retval; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -1; + } + + dev = &rte_crypto_devices[dev_id]; + + /* Device must be stopped before it can be closed */ + if (dev->data->dev_started == 1) { + CDEV_LOG_ERR("Device %u must be stopped before closing", + dev_id); + return -EBUSY; + } + + /* We can't close the device if there are outstanding sessions in use */ + if (dev->data->session_pool != NULL) { + if (!rte_mempool_full(dev->data->session_pool)) { + CDEV_LOG_ERR("dev_id=%u close failed, session mempool " + "has sessions still in use, free " + "all sessions before calling close", + (unsigned)dev_id); + return -EBUSY; + } + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP); + retval = (*dev->dev_ops->dev_close)(dev); + + if (retval < 0) + return retval; + + return 0; +} + +int +rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + if (queue_pair_id >= dev->data->nb_queue_pairs) { + CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id); + return -EINVAL; + } + + if (dev->data->dev_started) { + CDEV_LOG_ERR( + "device %d must be stopped to allow configuration", dev_id); + return -EBUSY; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_pair_setup, -ENOTSUP); + + return (*dev->dev_ops->queue_pair_setup)(dev, queue_pair_id, qp_conf, + socket_id); +} + + +int +rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return -ENODEV; + } + + if (stats == NULL) { + CDEV_LOG_ERR("Invalid stats ptr"); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + memset(stats, 0, sizeof(*stats)); + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_get, -ENOTSUP); + (*dev->dev_ops->stats_get)(dev, stats); + return 0; +} + +void +rte_cryptodev_stats_reset(uint8_t dev_id) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return; + } + + dev = &rte_crypto_devices[dev_id]; + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->stats_reset); + (*dev->dev_ops->stats_reset)(dev); +} + + +void +rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info) +{ + struct rte_cryptodev *dev; + + if (dev_id >= cryptodev_globals.nb_devs) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return; + } + + dev = &rte_crypto_devices[dev_id]; + + memset(dev_info, 0, sizeof(struct rte_cryptodev_info)); + + RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); + (*dev->dev_ops->dev_infos_get)(dev, dev_info); + + dev_info->pci_dev = dev->pci_dev; + if (dev->driver) + dev_info->driver_name = dev->driver->pci_drv.name; +} + + +int +rte_cryptodev_callback_register(uint8_t dev_id, + enum rte_cryptodev_event_type event, + rte_cryptodev_cb_fn cb_fn, void *cb_arg) +{ + struct rte_cryptodev *dev; + struct rte_cryptodev_callback *user_cb; + + if (!cb_fn) + return -EINVAL; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + rte_spinlock_lock(&rte_cryptodev_cb_lock); + + TAILQ_FOREACH(user_cb, &(dev->link_intr_cbs), next) { + if (user_cb->cb_fn == cb_fn && + user_cb->cb_arg == cb_arg && + user_cb->event == event) { + break; + } + } + + /* create a new callback. */ + if (user_cb == NULL) { + user_cb = rte_zmalloc("INTR_USER_CALLBACK", + sizeof(struct rte_cryptodev_callback), 0); + if (user_cb != NULL) { + user_cb->cb_fn = cb_fn; + user_cb->cb_arg = cb_arg; + user_cb->event = event; + TAILQ_INSERT_TAIL(&(dev->link_intr_cbs), user_cb, next); + } + } + + rte_spinlock_unlock(&rte_cryptodev_cb_lock); + return (user_cb == NULL) ? -ENOMEM : 0; +} + +int +rte_cryptodev_callback_unregister(uint8_t dev_id, + enum rte_cryptodev_event_type event, + rte_cryptodev_cb_fn cb_fn, void *cb_arg) +{ + int ret; + struct rte_cryptodev *dev; + struct rte_cryptodev_callback *cb, *next; + + if (!cb_fn) + return -EINVAL; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id); + return -EINVAL; + } + + dev = &rte_crypto_devices[dev_id]; + rte_spinlock_lock(&rte_cryptodev_cb_lock); + + ret = 0; + for (cb = TAILQ_FIRST(&dev->link_intr_cbs); cb != NULL; cb = next) { + + next = TAILQ_NEXT(cb, next); + + if (cb->cb_fn != cb_fn || cb->event != event || + (cb->cb_arg != (void *)-1 && + cb->cb_arg != cb_arg)) + continue; + + /* + * if this callback is not executing right now, + * then remove it. + */ + if (cb->active == 0) { + TAILQ_REMOVE(&(dev->link_intr_cbs), cb, next); + rte_free(cb); + } else { + ret = -EAGAIN; + } + } + + rte_spinlock_unlock(&rte_cryptodev_cb_lock); + return ret; +} + +void +rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, + enum rte_cryptodev_event_type event) +{ + struct rte_cryptodev_callback *cb_lst; + struct rte_cryptodev_callback dev_cb; + + rte_spinlock_lock(&rte_cryptodev_cb_lock); + TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { + if (cb_lst->cb_fn == NULL || cb_lst->event != event) + continue; + dev_cb = *cb_lst; + cb_lst->active = 1; + rte_spinlock_unlock(&rte_cryptodev_cb_lock); + dev_cb.cb_fn(dev->data->dev_id, dev_cb.event, + dev_cb.cb_arg); + rte_spinlock_lock(&rte_cryptodev_cb_lock); + cb_lst->active = 0; + } + rte_spinlock_unlock(&rte_cryptodev_cb_lock); +} + + +static void +rte_cryptodev_sym_session_init(struct rte_mempool *mp, + void *opaque_arg, + void *_sess, + __rte_unused unsigned i) +{ + struct rte_cryptodev_sym_session *sess = _sess; + struct rte_cryptodev *dev = opaque_arg; + + memset(sess, 0, mp->elt_size); + + sess->dev_id = dev->data->dev_id; + sess->dev_type = dev->dev_type; + sess->mp = mp; + + if (dev->dev_ops->session_initialize) + (*dev->dev_ops->session_initialize)(mp, sess->_private); +} + +static int +rte_cryptodev_sym_session_pool_create(struct rte_cryptodev *dev, + unsigned nb_objs, unsigned obj_cache_size, int socket_id) +{ + char mp_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + unsigned priv_sess_size; + + unsigned n = snprintf(mp_name, sizeof(mp_name), "cdev_%d_sess_mp", + dev->data->dev_id); + if (n > sizeof(mp_name)) { + CDEV_LOG_ERR("Unable to create unique name for session mempool"); + return -ENOMEM; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_get_size, -ENOTSUP); + priv_sess_size = (*dev->dev_ops->session_get_size)(dev); + if (priv_sess_size == 0) { + CDEV_LOG_ERR("%s returned and invalid private session size ", + dev->data->name); + return -ENOMEM; + } + + unsigned elt_size = sizeof(struct rte_cryptodev_sym_session) + + priv_sess_size; + + dev->data->session_pool = rte_mempool_lookup(mp_name); + if (dev->data->session_pool != NULL) { + if ((dev->data->session_pool->elt_size != elt_size) || + (dev->data->session_pool->cache_size < + obj_cache_size) || + (dev->data->session_pool->size < nb_objs)) { + + CDEV_LOG_ERR("%s mempool already exists with different" + " initialization parameters", mp_name); + dev->data->session_pool = NULL; + return -ENOMEM; + } + } else { + dev->data->session_pool = rte_mempool_create( + mp_name, /* mempool name */ + nb_objs, /* number of elements*/ + elt_size, /* element size*/ + obj_cache_size, /* Cache size*/ + 0, /* private data size */ + NULL, /* obj initialization constructor */ + NULL, /* obj initialization constructor arg */ + rte_cryptodev_sym_session_init, + /**< obj constructor*/ + dev, /* obj constructor arg */ + socket_id, /* socket id */ + 0); /* flags */ + + if (dev->data->session_pool == NULL) { + CDEV_LOG_ERR("%s mempool allocation failed", mp_name); + return -ENOMEM; + } + } + + CDEV_LOG_DEBUG("%s mempool created!", mp_name); + return 0; +} + +struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_create(uint8_t dev_id, + struct rte_crypto_sym_xform *xform) +{ + struct rte_cryptodev *dev; + struct rte_cryptodev_sym_session *sess; + void *_sess; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return NULL; + } + + dev = &rte_crypto_devices[dev_id]; + + /* Allocate a session structure from the session pool */ + if (rte_mempool_get(dev->data->session_pool, &_sess)) { + CDEV_LOG_ERR("Couldn't get object from session mempool"); + return NULL; + } + + sess = (struct rte_cryptodev_sym_session *)_sess; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_configure, NULL); + if (dev->dev_ops->session_configure(dev, xform, sess->_private) == + NULL) { + CDEV_LOG_ERR("dev_id %d failed to configure session details", + dev_id); + + /* Return session to mempool */ + rte_mempool_put(sess->mp, _sess); + return NULL; + } + + return sess; +} + +struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_free(uint8_t dev_id, + struct rte_cryptodev_sym_session *sess) +{ + struct rte_cryptodev *dev; + + if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) { + CDEV_LOG_ERR("Invalid dev_id=%d", dev_id); + return sess; + } + + dev = &rte_crypto_devices[dev_id]; + + /* Check the session belongs to this device type */ + if (sess->dev_type != dev->dev_type) + return sess; + + /* Let device implementation clear session material */ + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->session_clear, sess); + dev->dev_ops->session_clear(dev, (void *)sess->_private); + + /* Return session to mempool */ + rte_mempool_put(sess->mp, (void *)sess); + + return NULL; +} + +/** Initialise rte_crypto_op mempool element */ +static void +rte_crypto_op_init(struct rte_mempool *mempool, + void *opaque_arg, + void *_op_data, + __rte_unused unsigned i) +{ + struct rte_crypto_op *op = _op_data; + enum rte_crypto_op_type type = *(enum rte_crypto_op_type *)opaque_arg; + + memset(_op_data, 0, mempool->elt_size); + + __rte_crypto_op_reset(op, type); + + op->phys_addr = rte_mem_virt2phy(_op_data); + op->mempool = mempool; +} + + +struct rte_mempool * +rte_crypto_op_pool_create(const char *name, enum rte_crypto_op_type type, + unsigned nb_elts, unsigned cache_size, uint16_t priv_size, + int socket_id) +{ + struct rte_crypto_op_pool_private *priv; + + unsigned elt_size = sizeof(struct rte_crypto_op) + + sizeof(struct rte_crypto_sym_op) + + priv_size; + + /* lookup mempool in case already allocated */ + struct rte_mempool *mp = rte_mempool_lookup(name); + + if (mp != NULL) { + priv = (struct rte_crypto_op_pool_private *) + rte_mempool_get_priv(mp); + + if (mp->elt_size != elt_size || + mp->cache_size < cache_size || + mp->size < nb_elts || + priv->priv_size < priv_size) { + mp = NULL; + CDEV_LOG_ERR("Mempool %s already exists but with " + "incompatible parameters", name); + return NULL; + } + return mp; + } + + mp = rte_mempool_create( + name, + nb_elts, + elt_size, + cache_size, + sizeof(struct rte_crypto_op_pool_private), + NULL, + NULL, + rte_crypto_op_init, + &type, + socket_id, + 0); + + if (mp == NULL) { + CDEV_LOG_ERR("Failed to create mempool %s", name); + return NULL; + } + + priv = (struct rte_crypto_op_pool_private *) + rte_mempool_get_priv(mp); + + priv->priv_size = priv_size; + priv->type = type; + + return mp; +} diff --git a/lib/librte_cryptodev/rte_cryptodev.h b/lib/librte_cryptodev/rte_cryptodev.h new file mode 100644 index 00000000..d47f1e87 --- /dev/null +++ b/lib/librte_cryptodev/rte_cryptodev.h @@ -0,0 +1,896 @@ +/*- + * + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_CRYPTODEV_H_ +#define _RTE_CRYPTODEV_H_ + +/** + * @file rte_cryptodev.h + * + * RTE Cryptographic Device APIs + * + * Defines RTE Crypto Device APIs for the provisioning of cipher and + * authentication operations. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "rte_kvargs.h" +#include "rte_crypto.h" +#include "rte_dev.h" + +#define CRYPTODEV_NAME_NULL_PMD ("cryptodev_null_pmd") +/**< Null crypto PMD device name */ +#define CRYPTODEV_NAME_AESNI_MB_PMD ("cryptodev_aesni_mb_pmd") +/**< AES-NI Multi buffer PMD device name */ +#define CRYPTODEV_NAME_AESNI_GCM_PMD ("cryptodev_aesni_gcm_pmd") +/**< AES-NI GCM PMD device name */ +#define CRYPTODEV_NAME_QAT_SYM_PMD ("cryptodev_qat_sym_pmd") +/**< Intel QAT Symmetric Crypto PMD device name */ +#define CRYPTODEV_NAME_SNOW3G_PMD ("cryptodev_snow3g_pmd") +/**< SNOW 3G PMD device name */ + +/** Crypto device type */ +enum rte_cryptodev_type { + RTE_CRYPTODEV_NULL_PMD = 1, /**< Null crypto PMD */ + RTE_CRYPTODEV_AESNI_GCM_PMD, /**< AES-NI GCM PMD */ + RTE_CRYPTODEV_AESNI_MB_PMD, /**< AES-NI multi buffer PMD */ + RTE_CRYPTODEV_QAT_SYM_PMD, /**< QAT PMD Symmetric Crypto */ + RTE_CRYPTODEV_SNOW3G_PMD, /**< SNOW 3G PMD */ +}; + +extern const char **rte_cyptodev_names; + +/* Logging Macros */ + +#define CDEV_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) + +#define CDEV_PMD_LOG_ERR(dev, fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + dev, __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG +#define CDEV_LOG_DEBUG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) \ + +#define CDEV_PMD_TRACE(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s: " fmt "\n", \ + dev, __func__, ## args) + +#else +#define CDEV_LOG_DEBUG(fmt, args...) +#define CDEV_PMD_TRACE(fmt, args...) +#endif + +/** + * Symmetric Crypto Capability + */ +struct rte_cryptodev_symmetric_capability { + enum rte_crypto_sym_xform_type xform_type; + /**< Transform type : Authentication / Cipher */ + union { + struct { + enum rte_crypto_auth_algorithm algo; + /**< authentication algorithm */ + uint16_t block_size; + /**< algorithm block size */ + struct { + uint16_t min; /**< minimum key size */ + uint16_t max; /**< maximum key size */ + uint16_t increment; + /**< if a range of sizes are supported, + * this parameter is used to indicate + * increments in byte size that are supported + * between the minimum and maximum */ + } key_size; + /**< auth key size range */ + struct { + uint16_t min; /**< minimum digest size */ + uint16_t max; /**< maximum digest size */ + uint16_t increment; + /**< if a range of sizes are supported, + * this parameter is used to indicate + * increments in byte size that are supported + * between the minimum and maximum */ + } digest_size; + /**< digest size range */ + struct { + uint16_t min; /**< minimum aad size */ + uint16_t max; /**< maximum aad size */ + uint16_t increment; + /**< if a range of sizes are supported, + * this parameter is used to indicate + * increments in byte size that are supported + * between the minimum and maximum */ + } aad_size; + /**< Additional authentication data size range */ + } auth; + /**< Symmetric Authentication transform capabilities */ + struct { + enum rte_crypto_cipher_algorithm algo; + /**< cipher algorithm */ + uint16_t block_size; + /**< algorithm block size */ + struct { + uint16_t min; /**< minimum key size */ + uint16_t max; /**< maximum key size */ + uint16_t increment; + /**< if a range of sizes are supported, + * this parameter is used to indicate + * increments in byte size that are supported + * between the minimum and maximum */ + } key_size; + /**< cipher key size range */ + struct { + uint16_t min; /**< minimum iv size */ + uint16_t max; /**< maximum iv size */ + uint16_t increment; + /**< if a range of sizes are supported, + * this parameter is used to indicate + * increments in byte size that are supported + * between the minimum and maximum */ + } iv_size; + /**< Initialisation vector data size range */ + } cipher; + /**< Symmetric Cipher transform capabilities */ + }; +}; + +/** Structure used to capture a capability of a crypto device */ +struct rte_cryptodev_capabilities { + enum rte_crypto_op_type op; + /**< Operation type */ + + union { + struct rte_cryptodev_symmetric_capability sym; + /**< Symmetric operation capability parameters */ + }; +}; + +/** Macro used at end of crypto PMD list */ +#define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \ + { RTE_CRYPTO_OP_TYPE_UNDEFINED } + + +/** + * Crypto device supported feature flags + * + * Note: + * New features flags should be added to the end of the list + * + * Keep these flags synchronised with rte_cryptodev_get_feature_name() + */ +#define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0) +/**< Symmetric crypto operations are supported */ +#define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1) +/**< Asymmetric crypto operations are supported */ +#define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2) +/**< Chaining symmetric crypto operations are supported */ +#define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3) +/**< Utilises CPU SIMD SSE instructions */ +#define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4) +/**< Utilises CPU SIMD AVX instructions */ +#define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5) +/**< Utilises CPU SIMD AVX2 instructions */ +#define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6) +/**< Utilises CPU AES-NI instructions */ +#define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7) +/**< Operations are off-loaded to an external hardware accelerator */ + + +/** + * Get the name of a crypto device feature flag + * + * @param flag The mask describing the flag. + * + * @return + * The name of this flag, or NULL if it's not a valid feature flag. + */ + +extern const char * +rte_cryptodev_get_feature_name(uint64_t flag); + +/** Crypto device information */ +struct rte_cryptodev_info { + const char *driver_name; /**< Driver name. */ + enum rte_cryptodev_type dev_type; /**< Device type */ + struct rte_pci_device *pci_dev; /**< PCI information. */ + + uint64_t feature_flags; /**< Feature flags */ + + const struct rte_cryptodev_capabilities *capabilities; + /**< Array of devices supported capabilities */ + + unsigned max_nb_queue_pairs; + /**< Maximum number of queues pairs supported by device. */ + + struct { + unsigned max_nb_sessions; + /**< Maximum number of sessions supported by device. */ + } sym; +}; + +#define RTE_CRYPTODEV_DETACHED (0) +#define RTE_CRYPTODEV_ATTACHED (1) + +/** Definitions of Crypto device event types */ +enum rte_cryptodev_event_type { + RTE_CRYPTODEV_EVENT_UNKNOWN, /**< unknown event type */ + RTE_CRYPTODEV_EVENT_ERROR, /**< error interrupt event */ + RTE_CRYPTODEV_EVENT_MAX /**< max value of this enum */ +}; + +/** Crypto device queue pair configuration structure. */ +struct rte_cryptodev_qp_conf { + uint32_t nb_descriptors; /**< Number of descriptors per queue pair */ +}; + +/** + * Typedef for application callback function to be registered by application + * software for notification of device events + * + * @param dev_id Crypto device identifier + * @param event Crypto device event to register for notification of. + * @param cb_arg User specified parameter to be passed as to passed to + * users callback function. + */ +typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id, + enum rte_cryptodev_event_type event, void *cb_arg); + + +/** Crypto Device statistics */ +struct rte_cryptodev_stats { + uint64_t enqueued_count; + /**< Count of all operations enqueued */ + uint64_t dequeued_count; + /**< Count of all operations dequeued */ + + uint64_t enqueue_err_count; + /**< Total error count on operations enqueued */ + uint64_t dequeue_err_count; + /**< Total error count on operations dequeued */ +}; + +#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS 8 +#define RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS 2048 + +/** + * @internal + * Initialisation parameters for virtual crypto devices + */ +struct rte_crypto_vdev_init_params { + unsigned max_nb_queue_pairs; + unsigned max_nb_sessions; + uint8_t socket_id; +}; + +#define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") +#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") +#define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") + +static const char *cryptodev_vdev_valid_params[] = { + RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, + RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, + RTE_CRYPTODEV_VDEV_SOCKET_ID +}; + +static inline uint8_t +number_of_sockets(void) +{ + int sockets = 0; + int i; + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + + for (i = 0; ((i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL)); i++) { + if (sockets < ms[i].socket_id) + sockets = ms[i].socket_id; + } + + /* Number of sockets = maximum socket_id + 1 */ + return ++sockets; +} + +/** Parse integer from integer argument */ +static inline int +__rte_cryptodev_parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *) extra_args; + + *i = atoi(value); + if (*i < 0) { + CDEV_LOG_ERR("Argument has to be positive."); + return -1; + } + + return 0; +} + +/** + * Parse virtual device initialisation parameters input arguments + * @internal + * + * @params params Initialisation parameters with defaults set. + * @params input_args Command line arguments + * + * @return + * 0 on successful parse + * <0 on failure to parse + */ +static inline int +rte_cryptodev_parse_vdev_init_params(struct rte_crypto_vdev_init_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist; + int ret; + + if (params == NULL) + return -EINVAL; + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + cryptodev_vdev_valid_params); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, + &__rte_cryptodev_parse_integer_arg, + ¶ms->max_nb_queue_pairs); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, + &__rte_cryptodev_parse_integer_arg, + ¶ms->max_nb_sessions); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID, + &__rte_cryptodev_parse_integer_arg, + ¶ms->socket_id); + if (ret < 0) + goto free_kvlist; + + if (params->socket_id >= number_of_sockets()) { + CDEV_LOG_ERR("Invalid socket id specified to create " + "the virtual crypto device on"); + goto free_kvlist; + } + } + + return 0; + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +/** + * Create a virtual crypto device + * + * @param name Cryptodev PMD name of device to be created. + * @param args Options arguments for device. + * + * @return + * - On successful creation of the cryptodev the device index is returned, + * which will be between 0 and rte_cryptodev_count(). + * - In the case of a failure, returns -1. + */ +extern int +rte_cryptodev_create_vdev(const char *name, const char *args); + +/** + * Get the device identifier for the named crypto device. + * + * @param name device name to select the device structure. + * + * @return + * - Returns crypto device identifier on success. + * - Return -1 on failure to find named crypto device. + */ +extern int +rte_cryptodev_get_dev_id(const char *name); + +/** + * Get the total number of crypto devices that have been successfully + * initialised. + * + * @return + * - The total number of usable crypto devices. + */ +extern uint8_t +rte_cryptodev_count(void); + +extern uint8_t +rte_cryptodev_count_devtype(enum rte_cryptodev_type type); +/* + * Return the NUMA socket to which a device is connected + * + * @param dev_id + * The identifier of the device + * @return + * The NUMA socket id to which the device is connected or + * a default of zero if the socket could not be determined. + * -1 if returned is the dev_id value is out of range. + */ +extern int +rte_cryptodev_socket_id(uint8_t dev_id); + +/** Crypto device configuration structure */ +struct rte_cryptodev_config { + int socket_id; /**< Socket to allocate resources on */ + uint16_t nb_queue_pairs; + /**< Number of queue pairs to configure on device */ + + struct { + uint32_t nb_objs; /**< Number of objects in mempool */ + uint32_t cache_size; /**< l-core object cache size */ + } session_mp; /**< Session mempool configuration */ +}; + +/** + * Configure a device. + * + * This function must be invoked first before any other function in the + * API. This function can also be re-invoked when a device is in the + * stopped state. + * + * @param dev_id The identifier of the device to configure. + * @param config The crypto device configuration structure. + * + * @return + * - 0: Success, device configured. + * - <0: Error code returned by the driver configuration function. + */ +extern int +rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config); + +/** + * Start an device. + * + * The device start step is the last one and consists of setting the configured + * offload features and in starting the transmit and the receive units of the + * device. + * On success, all basic functions exported by the API (link status, + * receive/transmit, and so on) can be invoked. + * + * @param dev_id + * The identifier of the device. + * @return + * - 0: Success, device started. + * - <0: Error code of the driver device start function. + */ +extern int +rte_cryptodev_start(uint8_t dev_id); + +/** + * Stop an device. The device can be restarted with a call to + * rte_cryptodev_start() + * + * @param dev_id The identifier of the device. + */ +extern void +rte_cryptodev_stop(uint8_t dev_id); + +/** + * Close an device. The device cannot be restarted! + * + * @param dev_id The identifier of the device. + * + * @return + * - 0 on successfully closing device + * - <0 on failure to close device + */ +extern int +rte_cryptodev_close(uint8_t dev_id); + +/** + * Allocate and set up a receive queue pair for a device. + * + * + * @param dev_id The identifier of the device. + * @param queue_pair_id The index of the queue pairs to set up. The + * value must be in the range [0, nb_queue_pair + * - 1] previously supplied to + * rte_cryptodev_configure(). + * @param qp_conf The pointer to the configuration data to be + * used for the queue pair. NULL value is + * allowed, in which case default configuration + * will be used. + * @param socket_id The *socket_id* argument is the socket + * identifier in case of NUMA. The value can be + * *SOCKET_ID_ANY* if there is no NUMA constraint + * for the DMA memory allocated for the receive + * queue pair. + * + * @return + * - 0: Success, queue pair correctly set up. + * - <0: Queue pair configuration failed + */ +extern int +rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, int socket_id); + +/** + * Start a specified queue pair of a device. It is used + * when deferred_start flag of the specified queue is true. + * + * @param dev_id The identifier of the device + * @param queue_pair_id The index of the queue pair to start. The value + * must be in the range [0, nb_queue_pair - 1] + * previously supplied to + * rte_crypto_dev_configure(). + * @return + * - 0: Success, the transmit queue is correctly set up. + * - -EINVAL: The dev_id or the queue_id out of range. + * - -ENOTSUP: The function not supported in PMD driver. + */ +extern int +rte_cryptodev_queue_pair_start(uint8_t dev_id, uint16_t queue_pair_id); + +/** + * Stop specified queue pair of a device + * + * @param dev_id The identifier of the device + * @param queue_pair_id The index of the queue pair to stop. The value + * must be in the range [0, nb_queue_pair - 1] + * previously supplied to + * rte_cryptodev_configure(). + * @return + * - 0: Success, the transmit queue is correctly set up. + * - -EINVAL: The dev_id or the queue_id out of range. + * - -ENOTSUP: The function not supported in PMD driver. + */ +extern int +rte_cryptodev_queue_pair_stop(uint8_t dev_id, uint16_t queue_pair_id); + +/** + * Get the number of queue pairs on a specific crypto device + * + * @param dev_id Crypto device identifier. + * @return + * - The number of configured queue pairs. + */ +extern uint16_t +rte_cryptodev_queue_pair_count(uint8_t dev_id); + + +/** + * Retrieve the general I/O statistics of a device. + * + * @param dev_id The identifier of the device. + * @param stats A pointer to a structure of type + * *rte_cryptodev_stats* to be filled with the + * values of device counters. + * @return + * - Zero if successful. + * - Non-zero otherwise. + */ +extern int +rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats); + +/** + * Reset the general I/O statistics of a device. + * + * @param dev_id The identifier of the device. + */ +extern void +rte_cryptodev_stats_reset(uint8_t dev_id); + +/** + * Retrieve the contextual information of a device. + * + * @param dev_id The identifier of the device. + * @param dev_info A pointer to a structure of type + * *rte_cryptodev_info* to be filled with the + * contextual information of the device. + */ +extern void +rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info); + + +/** + * Register a callback function for specific device id. + * + * @param dev_id Device id. + * @param event Event interested. + * @param cb_fn User supplied callback function to be called. + * @param cb_arg Pointer to the parameters for the registered + * callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +extern int +rte_cryptodev_callback_register(uint8_t dev_id, + enum rte_cryptodev_event_type event, + rte_cryptodev_cb_fn cb_fn, void *cb_arg); + +/** + * Unregister a callback function for specific device id. + * + * @param dev_id The device identifier. + * @param event Event interested. + * @param cb_fn User supplied callback function to be called. + * @param cb_arg Pointer to the parameters for the registered + * callback. + * + * @return + * - On success, zero. + * - On failure, a negative value. + */ +extern int +rte_cryptodev_callback_unregister(uint8_t dev_id, + enum rte_cryptodev_event_type event, + rte_cryptodev_cb_fn cb_fn, void *cb_arg); + + +typedef uint16_t (*dequeue_pkt_burst_t)(void *qp, + struct rte_crypto_op **ops, uint16_t nb_ops); +/**< Dequeue processed packets from queue pair of a device. */ + +typedef uint16_t (*enqueue_pkt_burst_t)(void *qp, + struct rte_crypto_op **ops, uint16_t nb_ops); +/**< Enqueue packets for processing on queue pair of a device. */ + + + + +struct rte_cryptodev_callback; + +/** Structure to keep track of registered callbacks */ +TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback); + +/** The data structure associated with each crypto device. */ +struct rte_cryptodev { + dequeue_pkt_burst_t dequeue_burst; + /**< Pointer to PMD receive function. */ + enqueue_pkt_burst_t enqueue_burst; + /**< Pointer to PMD transmit function. */ + + const struct rte_cryptodev_driver *driver; + /**< Driver for this device */ + struct rte_cryptodev_data *data; + /**< Pointer to device data */ + struct rte_cryptodev_ops *dev_ops; + /**< Functions exported by PMD */ + uint64_t feature_flags; + /**< Supported features */ + struct rte_pci_device *pci_dev; + /**< PCI info. supplied by probing */ + + enum rte_cryptodev_type dev_type; + /**< Crypto device type */ + enum pmd_type pmd_type; + /**< PMD type - PDEV / VDEV */ + + struct rte_cryptodev_cb_list link_intr_cbs; + /**< User application callback for interrupts if present */ + + uint8_t attached : 1; + /**< Flag indicating the device is attached */ +} __rte_cache_aligned; + + +#define RTE_CRYPTODEV_NAME_MAX_LEN (64) +/**< Max length of name of crypto PMD */ + +/** + * + * The data part, with no function pointers, associated with each device. + * + * This structure is safe to place in shared memory to be common among + * different processes in a multi-process configuration. + */ +struct rte_cryptodev_data { + uint8_t dev_id; + /**< Device ID for this instance */ + uint8_t socket_id; + /**< Socket ID where memory is allocated */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + /**< Unique identifier name */ + + uint8_t dev_started : 1; + /**< Device state: STARTED(1)/STOPPED(0) */ + + struct rte_mempool *session_pool; + /**< Session memory pool */ + void **queue_pairs; + /**< Array of pointers to queue pairs. */ + uint16_t nb_queue_pairs; + /**< Number of device queue pairs. */ + + void *dev_private; + /**< PMD-specific private data */ +} __rte_cache_aligned; + +extern struct rte_cryptodev *rte_cryptodevs; +/** + * + * Dequeue a burst of processed crypto operations from a queue on the crypto + * device. The dequeued operation are stored in *rte_crypto_op* structures + * whose pointers are supplied in the *ops* array. + * + * The rte_cryptodev_dequeue_burst() function returns the number of ops + * actually dequeued, which is the number of *rte_crypto_op* data structures + * effectively supplied into the *ops* array. + * + * A return value equal to *nb_ops* indicates that the queue contained + * at least *nb_ops* operations, and this is likely to signify that other + * processed operations remain in the devices output queue. Applications + * implementing a "retrieve as many processed operations as possible" policy + * can check this specific case and keep invoking the + * rte_cryptodev_dequeue_burst() function until a value less than + * *nb_ops* is returned. + * + * The rte_cryptodev_dequeue_burst() function does not provide any error + * notification to avoid the corresponding overhead. + * + * @param dev_id The symmetric crypto device identifier + * @param qp_id The index of the queue pair from which to + * retrieve processed packets. The value must be + * in the range [0, nb_queue_pair - 1] previously + * supplied to rte_cryptodev_configure(). + * @param ops The address of an array of pointers to + * *rte_crypto_op* structures that must be + * large enough to store *nb_ops* pointers in it. + * @param nb_ops The maximum number of operations to dequeue. + * + * @return + * - The number of operations actually dequeued, which is the number + * of pointers to *rte_crypto_op* structures effectively supplied to the + * *ops* array. + */ +static inline uint16_t +rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; + + nb_ops = (*dev->dequeue_burst) + (dev->data->queue_pairs[qp_id], ops, nb_ops); + + return nb_ops; +} + +/** + * Enqueue a burst of operations for processing on a crypto device. + * + * The rte_cryptodev_enqueue_burst() function is invoked to place + * crypto operations on the queue *qp_id* of the device designated by + * its *dev_id*. + * + * The *nb_ops* parameter is the number of operations to process which are + * supplied in the *ops* array of *rte_crypto_op* structures. + * + * The rte_cryptodev_enqueue_burst() function returns the number of + * operations it actually enqueued for processing. A return value equal to + * *nb_ops* means that all packets have been enqueued. + * + * @param dev_id The identifier of the device. + * @param qp_id The index of the queue pair which packets are + * to be enqueued for processing. The value + * must be in the range [0, nb_queue_pairs - 1] + * previously supplied to + * *rte_cryptodev_configure*. + * @param ops The address of an array of *nb_ops* pointers + * to *rte_crypto_op* structures which contain + * the crypto operations to be processed. + * @param nb_ops The number of operations to process. + * + * @return + * The number of operations actually enqueued on the crypto device. The return + * value can be less than the value of the *nb_ops* parameter when the + * crypto devices queue is full or if invalid parameters are specified in + * a *rte_crypto_op*. + */ +static inline uint16_t +rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, + struct rte_crypto_op **ops, uint16_t nb_ops) +{ + struct rte_cryptodev *dev = &rte_cryptodevs[dev_id]; + + return (*dev->enqueue_burst)( + dev->data->queue_pairs[qp_id], ops, nb_ops); +} + + +/** Cryptodev symmetric crypto session */ +struct rte_cryptodev_sym_session { + struct { + uint8_t dev_id; + /**< Device Id */ + enum rte_cryptodev_type dev_type; + /** Crypto Device type session created on */ + struct rte_mempool *mp; + /**< Mempool session allocated from */ + } __rte_aligned(8); + /**< Public symmetric session details */ + + char _private[0]; + /**< Private session material */ +}; + + +/** + * Initialise a session for symmetric cryptographic operations. + * + * This function is used by the client to initialize immutable + * parameters of symmetric cryptographic operation. + * To perform the operation the rte_cryptodev_enqueue_burst function is + * used. Each mbuf should contain a reference to the session + * pointer returned from this function contained within it's crypto_op if a + * session-based operation is being provisioned. Memory to contain the session + * information is allocated from within mempool managed by the cryptodev. + * + * The rte_cryptodev_session_free must be called to free allocated + * memory when the session is no longer required. + * + * @param dev_id The device identifier. + * @param xform Crypto transform chain. + + * + * @return + * Pointer to the created session or NULL + */ +extern struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_create(uint8_t dev_id, + struct rte_crypto_sym_xform *xform); + +/** + * Free the memory associated with a previously allocated session. + * + * @param dev_id The device identifier. + * @param session Session pointer previously allocated by + * *rte_cryptodev_sym_session_create*. + * + * @return + * NULL on successful freeing of session. + * Session pointer on failure to free session. + */ +extern struct rte_cryptodev_sym_session * +rte_cryptodev_sym_session_free(uint8_t dev_id, + struct rte_cryptodev_sym_session *session); + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CRYPTODEV_H_ */ diff --git a/lib/librte_cryptodev/rte_cryptodev_pmd.h b/lib/librte_cryptodev/rte_cryptodev_pmd.h new file mode 100644 index 00000000..7d049ea3 --- /dev/null +++ b/lib/librte_cryptodev/rte_cryptodev_pmd.h @@ -0,0 +1,543 @@ +/*- + * + * Copyright(c) 2015-2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_CRYPTODEV_PMD_H_ +#define _RTE_CRYPTODEV_PMD_H_ + +/** @file + * RTE Crypto PMD APIs + * + * @note + * These API are from crypto PMD only and user applications should not call + * them directly. + */ + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +#include +#include +#include +#include +#include +#include + +#include "rte_crypto.h" +#include "rte_cryptodev.h" + + +#ifdef RTE_LIBRTE_CRYPTODEV_DEBUG +#define RTE_PMD_DEBUG_TRACE(...) \ + rte_pmd_debug_trace(__func__, __VA_ARGS__) +#else +#define RTE_PMD_DEBUG_TRACE(fmt, args...) +#endif + +struct rte_cryptodev_session { + struct { + uint8_t dev_id; + enum rte_cryptodev_type type; + struct rte_mempool *mp; + } __rte_aligned(8); + + char _private[0]; +}; + +struct rte_cryptodev_driver; + +/** + * Initialisation function of a crypto driver invoked for each matching + * crypto PCI device detected during the PCI probing phase. + * + * @param drv The pointer to the [matching] crypto driver structure + * supplied by the PMD when it registered itself. + * @param dev The dev pointer is the address of the *rte_cryptodev* + * structure associated with the matching device and which + * has been [automatically] allocated in the + * *rte_crypto_devices* array. + * + * @return + * - 0: Success, the device is properly initialised by the driver. + * In particular, the driver MUST have set up the *dev_ops* pointer + * of the *dev* structure. + * - <0: Error code of the device initialisation failure. + */ +typedef int (*cryptodev_init_t)(struct rte_cryptodev_driver *drv, + struct rte_cryptodev *dev); + +/** + * Finalisation function of a driver invoked for each matching + * PCI device detected during the PCI closing phase. + * + * @param drv The pointer to the [matching] driver structure supplied + * by the PMD when it registered itself. + * @param dev The dev pointer is the address of the *rte_cryptodev* + * structure associated with the matching device and which + * has been [automatically] allocated in the + * *rte_crypto_devices* array. + * + * * @return + * - 0: Success, the device is properly finalised by the driver. + * In particular, the driver MUST free the *dev_ops* pointer + * of the *dev* structure. + * - <0: Error code of the device initialisation failure. + */ +typedef int (*cryptodev_uninit_t)(const struct rte_cryptodev_driver *drv, + struct rte_cryptodev *dev); + +/** + * The structure associated with a PMD driver. + * + * Each driver acts as a PCI driver and is represented by a generic + * *crypto_driver* structure that holds: + * + * - An *rte_pci_driver* structure (which must be the first field). + * + * - The *cryptodev_init* function invoked for each matching PCI device. + * + * - The size of the private data to allocate for each matching device. + */ +struct rte_cryptodev_driver { + struct rte_pci_driver pci_drv; /**< The PMD is also a PCI driver. */ + unsigned dev_private_size; /**< Size of device private data. */ + + cryptodev_init_t cryptodev_init; /**< Device init function. */ + cryptodev_uninit_t cryptodev_uninit; /**< Device uninit function. */ +}; + + +/** Global structure used for maintaining state of allocated crypto devices */ +struct rte_cryptodev_global { + struct rte_cryptodev *devs; /**< Device information array */ + struct rte_cryptodev_data *data[RTE_CRYPTO_MAX_DEVS]; + /**< Device private data */ + uint8_t nb_devs; /**< Number of devices found */ + uint8_t max_devs; /**< Max number of devices */ +}; + +/** pointer to global crypto devices data structure. */ +extern struct rte_cryptodev_global *rte_cryptodev_globals; + +/** + * Get the rte_cryptodev structure device pointer for the device. Assumes a + * valid device index. + * + * @param dev_id Device ID value to select the device structure. + * + * @return + * - The rte_cryptodev structure pointer for the given device ID. + */ +static inline struct rte_cryptodev * +rte_cryptodev_pmd_get_dev(uint8_t dev_id) +{ + return &rte_cryptodev_globals->devs[dev_id]; +} + +/** + * Get the rte_cryptodev structure device pointer for the named device. + * + * @param name device name to select the device structure. + * + * @return + * - The rte_cryptodev structure pointer for the given device ID. + */ +static inline struct rte_cryptodev * +rte_cryptodev_pmd_get_named_dev(const char *name) +{ + struct rte_cryptodev *dev; + unsigned i; + + if (name == NULL) + return NULL; + + for (i = 0, dev = &rte_cryptodev_globals->devs[i]; + i < rte_cryptodev_globals->max_devs; i++) { + if ((dev->attached == RTE_CRYPTODEV_ATTACHED) && + (strcmp(dev->data->name, name) == 0)) + return dev; + } + + return NULL; +} + +/** + * Validate if the crypto device index is valid attached crypto device. + * + * @param dev_id Crypto device index. + * + * @return + * - If the device index is valid (1) or not (0). + */ +static inline unsigned +rte_cryptodev_pmd_is_valid_dev(uint8_t dev_id) +{ + struct rte_cryptodev *dev = NULL; + + if (dev_id >= rte_cryptodev_globals->nb_devs) + return 0; + + dev = rte_cryptodev_pmd_get_dev(dev_id); + if (dev->attached != RTE_CRYPTODEV_ATTACHED) + return 0; + else + return 1; +} + +/** + * The pool of rte_cryptodev structures. + */ +extern struct rte_cryptodev *rte_cryptodevs; + + +/** + * Definitions of all functions exported by a driver through the + * the generic structure of type *crypto_dev_ops* supplied in the + * *rte_cryptodev* structure associated with a device. + */ + +/** + * Function used to configure device. + * + * @param dev Crypto device pointer + * + * @return Returns 0 on success + */ +typedef int (*cryptodev_configure_t)(struct rte_cryptodev *dev); + +/** + * Function used to start a configured device. + * + * @param dev Crypto device pointer + * + * @return Returns 0 on success + */ +typedef int (*cryptodev_start_t)(struct rte_cryptodev *dev); + +/** + * Function used to stop a configured device. + * + * @param dev Crypto device pointer + */ +typedef void (*cryptodev_stop_t)(struct rte_cryptodev *dev); + +/** + * Function used to close a configured device. + * + * @param dev Crypto device pointer + * @return + * - 0 on success. + * - EAGAIN if can't close as device is busy + */ +typedef int (*cryptodev_close_t)(struct rte_cryptodev *dev); + + +/** + * Function used to get statistics of a device. + * + * @param dev Crypto device pointer + * @param stats Pointer to crypto device stats structure to populate + */ +typedef void (*cryptodev_stats_get_t)(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats); + + +/** + * Function used to reset statistics of a device. + * + * @param dev Crypto device pointer + */ +typedef void (*cryptodev_stats_reset_t)(struct rte_cryptodev *dev); + + +/** + * Function used to get specific information of a device. + * + * @param dev Crypto device pointer + */ +typedef void (*cryptodev_info_get_t)(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info); + +/** + * Start queue pair of a device. + * + * @param dev Crypto device pointer + * @param qp_id Queue Pair Index + * + * @return Returns 0 on success. + */ +typedef int (*cryptodev_queue_pair_start_t)(struct rte_cryptodev *dev, + uint16_t qp_id); + +/** + * Stop queue pair of a device. + * + * @param dev Crypto device pointer + * @param qp_id Queue Pair Index + * + * @return Returns 0 on success. + */ +typedef int (*cryptodev_queue_pair_stop_t)(struct rte_cryptodev *dev, + uint16_t qp_id); + +/** + * Setup a queue pair for a device. + * + * @param dev Crypto device pointer + * @param qp_id Queue Pair Index + * @param qp_conf Queue configuration structure + * @param socket_id Socket Index + * + * @return Returns 0 on success. + */ +typedef int (*cryptodev_queue_pair_setup_t)(struct rte_cryptodev *dev, + uint16_t qp_id, const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id); + +/** + * Release memory resources allocated by given queue pair. + * + * @param dev Crypto device pointer + * @param qp_id Queue Pair Index + * + * @return + * - 0 on success. + * - EAGAIN if can't close as device is busy + */ +typedef int (*cryptodev_queue_pair_release_t)(struct rte_cryptodev *dev, + uint16_t qp_id); + +/** + * Get number of available queue pairs of a device. + * + * @param dev Crypto device pointer + * + * @return Returns number of queue pairs on success. + */ +typedef uint32_t (*cryptodev_queue_pair_count_t)(struct rte_cryptodev *dev); + +/** + * Create a session mempool to allocate sessions from + * + * @param dev Crypto device pointer + * @param nb_objs number of sessions objects in mempool + * @param obj_cache l-core object cache size, see *rte_ring_create* + * @param socket_id Socket Id to allocate mempool on. + * + * @return + * - On success returns a pointer to a rte_mempool + * - On failure returns a NULL pointer + */ +typedef int (*cryptodev_sym_create_session_pool_t)( + struct rte_cryptodev *dev, unsigned nb_objs, + unsigned obj_cache_size, int socket_id); + + +/** + * Get the size of a cryptodev session + * + * @param dev Crypto device pointer + * + * @return + * - On success returns the size of the session structure for device + * - On failure returns 0 + */ +typedef unsigned (*cryptodev_sym_get_session_private_size_t)( + struct rte_cryptodev *dev); + +/** + * Initialize a Crypto session on a device. + * + * @param dev Crypto device pointer + * @param xform Single or chain of crypto xforms + * @param priv_sess Pointer to cryptodev's private session structure + * + * @return + * - Returns private session structure on success. + * - Returns NULL on failure. + */ +typedef void (*cryptodev_sym_initialize_session_t)(struct rte_mempool *mempool, + void *session_private); + +/** + * Configure a Crypto session on a device. + * + * @param dev Crypto device pointer + * @param xform Single or chain of crypto xforms + * @param priv_sess Pointer to cryptodev's private session structure + * + * @return + * - Returns private session structure on success. + * - Returns NULL on failure. + */ +typedef void * (*cryptodev_sym_configure_session_t)(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private); + +/** + * Free Crypto session. + * @param session Cryptodev session structure to free + */ +typedef void (*cryptodev_sym_free_session_t)(struct rte_cryptodev *dev, + void *session_private); + + +/** Crypto device operations function pointer table */ +struct rte_cryptodev_ops { + cryptodev_configure_t dev_configure; /**< Configure device. */ + cryptodev_start_t dev_start; /**< Start device. */ + cryptodev_stop_t dev_stop; /**< Stop device. */ + cryptodev_close_t dev_close; /**< Close device. */ + + cryptodev_info_get_t dev_infos_get; /**< Get device info. */ + + cryptodev_stats_get_t stats_get; + /**< Get device statistics. */ + cryptodev_stats_reset_t stats_reset; + /**< Reset device statistics. */ + + cryptodev_queue_pair_setup_t queue_pair_setup; + /**< Set up a device queue pair. */ + cryptodev_queue_pair_release_t queue_pair_release; + /**< Release a queue pair. */ + cryptodev_queue_pair_start_t queue_pair_start; + /**< Start a queue pair. */ + cryptodev_queue_pair_stop_t queue_pair_stop; + /**< Stop a queue pair. */ + cryptodev_queue_pair_count_t queue_pair_count; + /**< Get count of the queue pairs. */ + + cryptodev_sym_get_session_private_size_t session_get_size; + /**< Return private session. */ + cryptodev_sym_initialize_session_t session_initialize; + /**< Initialization function for private session data */ + cryptodev_sym_configure_session_t session_configure; + /**< Configure a Crypto session. */ + cryptodev_sym_free_session_t session_clear; + /**< Clear a Crypto sessions private data. */ +}; + + +/** + * Function for internal use by dummy drivers primarily, e.g. ring-based + * driver. + * Allocates a new cryptodev slot for an crypto device and returns the pointer + * to that slot for the driver to use. + * + * @param name Unique identifier name for each device + * @param type Device type of this Crypto device + * @param socket_id Socket to allocate resources on. + * @return + * - Slot in the rte_dev_devices array for a new device; + */ +struct rte_cryptodev * +rte_cryptodev_pmd_allocate(const char *name, enum pmd_type type, int socket_id); + +/** + * Creates a new virtual crypto device and returns the pointer + * to that device. + * + * @param name PMD type name + * @param dev_private_size Size of crypto PMDs private data + * @param socket_id Socket to allocate resources on. + * + * @return + * - Cryptodev pointer if device is successfully created. + * - NULL if device cannot be created. + */ +struct rte_cryptodev * +rte_cryptodev_pmd_virtual_dev_init(const char *name, size_t dev_private_size, + int socket_id); + + +/** + * Function for internal use by dummy drivers primarily, e.g. ring-based + * driver. + * Release the specified cryptodev device. + * + * @param cryptodev + * The *cryptodev* pointer is the address of the *rte_cryptodev* structure. + * @return + * - 0 on success, negative on error + */ +extern int +rte_cryptodev_pmd_release_device(struct rte_cryptodev *cryptodev); + + +/** + * Register a Crypto [Poll Mode] driver. + * + * Function invoked by the initialization function of a Crypto driver + * to simultaneously register itself as Crypto Poll Mode Driver and to either: + * + * a - register itself as PCI driver if the crypto device is a physical + * device, by invoking the rte_eal_pci_register() function to + * register the *pci_drv* structure embedded in the *crypto_drv* + * structure, after having stored the address of the + * rte_cryptodev_init() function in the *devinit* field of the + * *pci_drv* structure. + * + * During the PCI probing phase, the rte_cryptodev_init() + * function is invoked for each PCI [device] matching the + * embedded PCI identifiers provided by the driver. + * + * b, complete the initialization sequence if the device is a virtual + * device by calling the rte_cryptodev_init() directly passing a + * NULL parameter for the rte_pci_device structure. + * + * @param crypto_drv crypto_driver structure associated with the crypto + * driver. + * @param type pmd type + */ +extern int +rte_cryptodev_pmd_driver_register(struct rte_cryptodev_driver *crypto_drv, + enum pmd_type type); + +/** + * Executes all the user application registered callbacks for the specific + * device. + * * + * @param dev Pointer to cryptodev struct + * @param event Crypto device interrupt event type. + * + * @return + * void + */ +void rte_cryptodev_pmd_callback_process(struct rte_cryptodev *dev, + enum rte_cryptodev_event_type event); + + +#ifdef __cplusplus +} +#endif + +#endif /* _RTE_CRYPTODEV_PMD_H_ */ diff --git a/lib/librte_cryptodev/rte_cryptodev_version.map b/lib/librte_cryptodev/rte_cryptodev_version.map new file mode 100644 index 00000000..41004e1c --- /dev/null +++ b/lib/librte_cryptodev/rte_cryptodev_version.map @@ -0,0 +1,34 @@ +DPDK_16.04 { + global: + + rte_cryptodevs; + rte_cryptodev_callback_register; + rte_cryptodev_callback_unregister; + rte_cryptodev_close; + rte_cryptodev_count; + rte_cryptodev_count_devtype; + rte_cryptodev_configure; + rte_cryptodev_create_vdev; + rte_cryptodev_get_dev_id; + rte_cryptodev_get_feature_name; + rte_cryptodev_info_get; + rte_cryptodev_pmd_allocate; + rte_cryptodev_pmd_callback_process; + rte_cryptodev_pmd_driver_register; + rte_cryptodev_pmd_release_device; + rte_cryptodev_pmd_virtual_dev_init; + rte_cryptodev_sym_session_create; + rte_cryptodev_sym_session_free; + rte_cryptodev_socket_id; + rte_cryptodev_start; + rte_cryptodev_stats_get; + rte_cryptodev_stats_reset; + rte_cryptodev_stop; + rte_cryptodev_queue_pair_count; + rte_cryptodev_queue_pair_setup; + rte_cryptodev_queue_pair_start; + rte_cryptodev_queue_pair_stop; + rte_crypto_op_pool_create; + + local: *; +}; -- cgit 1.2.3-korg