aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/crypto
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile15
-rw-r--r--drivers/crypto/aesni_gcm/Makefile3
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c97
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c2
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h7
-rw-r--r--drivers/crypto/aesni_mb/Makefile5
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c196
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c51
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h12
-rw-r--r--drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map (renamed from drivers/crypto/aesni_mb/rte_pmd_aesni_version.map)0
-rw-r--r--drivers/crypto/armv8/Makefile5
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c66
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c2
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_private.h8
-rw-r--r--drivers/crypto/armv8/rte_pmd_armv8_version.map (renamed from drivers/crypto/armv8/rte_armv8_pmd_version.map)0
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile15
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c497
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h62
-rw-r--r--drivers/crypto/dpaa2_sec/mc/dpseci.c676
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h782
-rw-r--r--drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h387
-rw-r--r--drivers/crypto/dpaa_sec/Makefile73
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c1548
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h402
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h70
-rw-r--r--drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map4
-rw-r--r--drivers/crypto/kasumi/Makefile3
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c79
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_ops.c2
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd_private.h7
-rw-r--r--drivers/crypto/mrvl/Makefile66
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h50
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd.c857
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_ops.c778
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_pmd_private.h123
-rw-r--r--drivers/crypto/mrvl/rte_pmd_mrvl_version.map3
-rw-r--r--drivers/crypto/null/Makefile3
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c80
-rw-r--r--drivers/crypto/null/null_crypto_pmd_ops.c2
-rw-r--r--drivers/crypto/null/null_crypto_pmd_private.h2
-rw-r--r--drivers/crypto/openssl/Makefile3
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c584
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_ops.c52
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd_private.h10
-rw-r--r--drivers/crypto/qat/Makefile3
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h20
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h6
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c28
-rw-r--r--drivers/crypto/qat/qat_crypto.c279
-rw-r--r--drivers/crypto/qat/qat_crypto.h17
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h54
-rw-r--r--drivers/crypto/qat/qat_qp.c20
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c63
-rw-r--r--drivers/crypto/scheduler/Makefile3
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c52
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c4
-rw-r--r--drivers/crypto/snow3g/Makefile3
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c71
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_ops.c2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd_private.h7
-rw-r--r--drivers/crypto/zuc/Makefile3
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c65
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_ops.c2
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd_private.h7
64 files changed, 6613 insertions, 1785 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 7a719b9d..645b6967 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -31,29 +31,18 @@
include $(RTE_SDK)/mk/rte.vars.mk
-core-libs := librte_eal librte_mbuf librte_mempool librte_ring librte_cryptodev
-
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
-DEPDIRS-aesni_gcm = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
-DEPDIRS-aesni_mb = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
-DEPDIRS-armv8 = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
-DEPDIRS-openssl = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
-DEPDIRS-qat = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
-DEPDIRS-scheduler = $(core-libs) librte_kvargs librte_reorder
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
-DEPDIRS-snow3g = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
-DEPDIRS-kasumi = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
-DEPDIRS-zuc = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
-DEPDIRS-null = $(core-libs)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
-DEPDIRS-dpaa2_sec = $(core-libs)
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index 6fca5e1c..ddfec4c6 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -53,6 +53,9 @@ EXPORT_MAP := rte_pmd_aesni_gcm_version.map
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm_pmd.c
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index d9c91d06..08dcacce 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -31,12 +31,10 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_byteorder.h>
@@ -224,7 +222,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
RTE_ASSERT(m_src != NULL);
- while (offset >= m_src->data_len) {
+ while (offset >= m_src->data_len && data_length != 0) {
offset -= m_src->data_len;
m_src = m_src->next;
@@ -298,14 +296,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
sym_op->aead.digest.data,
(uint64_t)session->digest_length);
} else if (session->op == AESNI_GCM_OP_AUTHENTICATED_DECRYPTION) {
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
- sym_op->m_dst : sym_op->m_src,
- session->digest_length);
-
- if (!auth_tag) {
- GCM_LOG_ERR("auth_tag");
- return -1;
- }
+ uint8_t *auth_tag = qp->temp_digest;
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
@@ -350,14 +341,7 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
sym_op->auth.digest.data,
(uint64_t)session->digest_length);
} else { /* AESNI_GMAC_OP_VERIFY */
- uint8_t *auth_tag = (uint8_t *)rte_pktmbuf_append(sym_op->m_dst ?
- sym_op->m_dst : sym_op->m_src,
- session->digest_length);
-
- if (!auth_tag) {
- GCM_LOG_ERR("auth_tag");
- return -1;
- }
+ uint8_t *auth_tag = qp->temp_digest;
qp->ops[session->key].init(&session->gdata_key,
&qp->gdata_ctx,
@@ -385,11 +369,10 @@ process_gcm_crypto_op(struct aesni_gcm_qp *qp, struct rte_crypto_op *op,
* - Returns NULL on invalid job
*/
static void
-post_process_gcm_crypto_op(struct rte_crypto_op *op,
+post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
+ struct rte_crypto_op *op,
struct aesni_gcm_session *session)
{
- struct rte_mbuf *m = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
-
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Verify digest if required */
@@ -397,8 +380,7 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op,
session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *digest;
- uint8_t *tag = rte_pktmbuf_mtod_offset(m, uint8_t *,
- m->data_len - session->digest_length);
+ uint8_t *tag = (uint8_t *)&qp->temp_digest;
if (session->op == AESNI_GMAC_OP_VERIFY)
digest = op->sym->auth.digest.data;
@@ -414,9 +396,6 @@ post_process_gcm_crypto_op(struct rte_crypto_op *op,
if (memcmp(tag, digest, session->digest_length) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m, session->digest_length);
}
}
@@ -435,7 +414,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
struct rte_crypto_op *op,
struct aesni_gcm_session *sess)
{
- post_process_gcm_crypto_op(op, sess);
+ post_process_gcm_crypto_op(qp, op, sess);
/* Free session if a session-less crypto op */
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
@@ -505,22 +484,24 @@ static int aesni_gcm_remove(struct rte_vdev_device *vdev);
static int
aesni_gcm_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_gcm_private *internals;
enum aesni_gcm_vector_mode vector_mode;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
GCM_LOG_ERR("AES instructions not supported by CPU");
return -EFAULT;
}
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ GCM_LOG_ERR("driver %s: create failed", init_params->name);
+ return -ENODEV;
+ }
+
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
vector_mode = RTE_AESNI_GCM_AVX2;
@@ -529,14 +510,6 @@ aesni_gcm_create(const char *name,
else
vector_mode = RTE_AESNI_GCM_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct aesni_gcm_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- GCM_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_gcm_pmd_ops;
@@ -571,22 +544,17 @@ aesni_gcm_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-
-init_error:
- GCM_LOG_ERR("driver %s: create failed", init_params->name);
-
- aesni_gcm_remove(vdev);
- return -EFAULT;
}
static int
aesni_gcm_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_gcm_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -595,17 +563,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return aesni_gcm_create(name, vdev, &init_params);
}
@@ -613,16 +571,18 @@ aesni_gcm_probe(struct rte_vdev_device *vdev)
static int
aesni_gcm_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- GCM_LOG_INFO("Closing AESNI crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver aesni_gcm_pmd_drv = {
@@ -630,10 +590,13 @@ static struct rte_vdev_driver aesni_gcm_pmd_drv = {
.remove = aesni_gcm_remove
};
+static struct cryptodev_driver aesni_gcm_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
index 48400ac2..0f315f00 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c
@@ -195,7 +195,7 @@ aesni_gcm_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"aesni_gcm_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
index 7e155729..1c8835b5 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h
@@ -58,6 +58,8 @@
#define GCM_LOG_DBG(fmt, args...)
#endif
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 16
/** private data structure for each virtual AESNI GCM device */
struct aesni_gcm_private {
@@ -84,6 +86,11 @@ struct aesni_gcm_qp {
/**< Queue Pair Identifier */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index 611d4123..a49f06f2 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -47,12 +47,15 @@ CFLAGS += $(WERROR_FLAGS)
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_pmd_aesni_version.map
+EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 16e14512..70043897 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -30,12 +30,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <des.h>
+
#include <rte_common.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -188,6 +189,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
struct aesni_mb_session *sess,
const struct rte_crypto_sym_xform *xform)
{
+ uint8_t is_aes = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
@@ -217,45 +219,68 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
switch (xform->cipher.algo) {
case RTE_CRYPTO_CIPHER_AES_CBC:
sess->cipher.mode = CBC;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
sess->cipher.mode = CNTR;
+ is_aes = 1;
break;
case RTE_CRYPTO_CIPHER_AES_DOCSISBPI:
sess->cipher.mode = DOCSIS_SEC_BPI;
+ is_aes = 1;
break;
- default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
- return -ENOTSUP;
- }
-
- /* Check key length and choose key expansion function */
- switch (xform->cipher.key.length) {
- case AES_128_BYTES:
- sess->cipher.key_length_in_bytes = AES_128_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
- break;
- case AES_192_BYTES:
- sess->cipher.key_length_in_bytes = AES_192_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.mode = DES;
break;
- case AES_256_BYTES:
- sess->cipher.key_length_in_bytes = AES_256_BYTES;
- aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
+ sess->cipher.mode = DOCSIS_DES;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
- return -EINVAL;
+ MB_LOG_ERR("Unsupported cipher mode parameter");
+ return -ENOTSUP;
}
/* Set IV parameters */
sess->iv.offset = xform->cipher.iv.offset;
sess->iv.length = xform->cipher.iv.length;
- /* Expanded cipher keys */
- (*aes_keyexp_fn)(xform->cipher.key.data,
- sess->cipher.expanded_aes_keys.encode,
- sess->cipher.expanded_aes_keys.decode);
+ /* Check key length and choose key expansion function for AES */
+ if (is_aes) {
+ switch (xform->cipher.key.length) {
+ case AES_128_BYTES:
+ sess->cipher.key_length_in_bytes = AES_128_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
+ break;
+ case AES_192_BYTES:
+ sess->cipher.key_length_in_bytes = AES_192_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes192;
+ break;
+ case AES_256_BYTES:
+ sess->cipher.key_length_in_bytes = AES_256_BYTES;
+ aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
+ break;
+ default:
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+
+ /* Expanded cipher keys */
+ (*aes_keyexp_fn)(xform->cipher.key.data,
+ sess->cipher.expanded_aes_keys.encode,
+ sess->cipher.expanded_aes_keys.decode);
+
+ } else {
+ if (xform->cipher.key.length != 8) {
+ MB_LOG_ERR("Invalid cipher key length");
+ return -EINVAL;
+ }
+ sess->cipher.key_length_in_bytes = 8;
+
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.encode,
+ xform->cipher.key.data);
+ des_key_schedule((uint64_t *)sess->cipher.expanded_aes_keys.decode,
+ xform->cipher.key.data);
+ }
return 0;
}
@@ -407,7 +432,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
*/
static inline int
set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
- struct rte_crypto_op *op)
+ struct rte_crypto_op *op, uint8_t *digest_idx)
{
struct rte_mbuf *m_src = op->sym->m_src, *m_dst;
struct aesni_mb_session *session;
@@ -466,19 +491,8 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set digest output location */
if (job->hash_alg != NULL_HASH &&
session->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY) {
- job->auth_tag_output = (uint8_t *)rte_pktmbuf_append(m_dst,
- get_digest_byte_length(job->hash_alg));
-
- if (job->auth_tag_output == NULL) {
- MB_LOG_ERR("failed to allocate space in output mbuf "
- "for temp digest");
- op->status = RTE_CRYPTO_OP_STATUS_ERROR;
- return -1;
- }
-
- memset(job->auth_tag_output, 0,
- sizeof(get_digest_byte_length(job->hash_alg)));
-
+ job->auth_tag_output = qp->temp_digests[*digest_idx];
+ *digest_idx = (*digest_idx + 1) % MAX_JOBS;
} else {
job->auth_tag_output = op->sym->auth.digest.data;
}
@@ -507,22 +521,17 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set user data to be crypto operation data struct */
job->user_data = op;
- job->user_data2 = m_dst;
return 0;
}
static inline void
-verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
- struct rte_mbuf *m_dst = (struct rte_mbuf *)job->user_data2;
-
+verify_digest(struct aesni_mb_qp *qp __rte_unused, JOB_AES_HMAC *job,
+ struct rte_crypto_op *op) {
/* Verify digest if required */
if (memcmp(job->auth_tag_output, op->sym->auth.digest.data,
job->auth_tag_output_len_in_bytes) != 0)
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* trim area used for digest from mbuf */
- rte_pktmbuf_trim(m_dst, get_digest_byte_length(job->hash_alg));
}
/**
@@ -532,8 +541,7 @@ verify_digest(JOB_AES_HMAC *job, struct rte_crypto_op *op) {
* @param job JOB_AES_HMAC job to process
*
* @return
- * - Returns processed crypto operation which mbuf is trimmed of output digest
- * used in verification of supplied digest.
+ * - Returns processed crypto operation.
* - Returns NULL on invalid job
*/
static inline struct rte_crypto_op *
@@ -552,7 +560,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (job->hash_alg != NULL_HASH) {
if (sess->auth.operation ==
RTE_CRYPTO_AUTH_OP_VERIFY)
- verify_digest(job, op);
+ verify_digest(qp, job, op);
}
break;
default:
@@ -626,13 +634,16 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
}
static inline JOB_AES_HMAC *
-set_job_null_op(JOB_AES_HMAC *job)
+set_job_null_op(JOB_AES_HMAC *job, struct rte_crypto_op *op)
{
job->chain_order = HASH_CIPHER;
job->cipher_mode = NULL_CIPHER;
job->hash_alg = NULL_HASH;
job->cipher_direction = DECRYPT;
+ /* Set user data to be crypto operation data struct */
+ job->user_data = op;
+
return job;
}
@@ -650,6 +661,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
if (unlikely(nb_ops == 0))
return 0;
+ uint8_t digest_idx = qp->digest_idx;
do {
/* Get next operation to process from ingress queue */
retval = rte_ring_dequeue(qp->ingress_queue, (void **)&op);
@@ -667,10 +679,10 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
}
- retval = set_mb_job_params(job, qp, op);
+ retval = set_mb_job_params(job, qp, op, &digest_idx);
if (unlikely(retval != 0)) {
qp->stats.dequeue_err_count++;
- set_job_null_op(job);
+ set_job_null_op(job, op);
}
/* Submit job to multi-buffer for processing */
@@ -687,6 +699,8 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
} while (processed_jobs < nb_ops);
+ qp->digest_idx = digest_idx;
+
if (processed_jobs < 1)
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
@@ -700,15 +714,23 @@ static int cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev);
static int
cryptodev_aesni_mb_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct aesni_mb_private *internals;
enum aesni_mb_vector_mode vector_mode;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
+ /* Check CPU for support for AES instruction set */
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
+ MB_LOG_ERR("AES instructions not supported by CPU");
+ return -EFAULT;
+ }
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ MB_LOG_ERR("failed to create cryptodev vdev");
+ return -ENODEV;
+ }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
@@ -720,14 +742,6 @@ cryptodev_aesni_mb_create(const char *name,
else
vector_mode = RTE_AESNI_MB_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct aesni_mb_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_aesni_mb_pmd_ops;
@@ -764,41 +778,33 @@ cryptodev_aesni_mb_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-init_error:
- MB_LOG_ERR("driver %s: cryptodev_aesni_create failed",
- init_params->name);
-
- cryptodev_aesni_mb_remove(vdev);
- return -EFAULT;
}
static int
cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct aesni_mb_private),
rte_socket_id(),
- ""
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
- const char *name;
- const char *input_args;
+ const char *name, *args;
+ int retval;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+
+ args = rte_vdev_device_args(vdev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
return cryptodev_aesni_mb_create(name, vdev, &init_params);
}
@@ -806,16 +812,18 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
static int
cryptodev_aesni_mb_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing AESNI crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
@@ -823,10 +831,14 @@ static struct rte_vdev_driver cryptodev_aesni_mb_pmd_drv = {
.remove = cryptodev_aesni_mb_remove
};
+static struct cryptodev_driver aesni_mb_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_aesni_mb_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
+ cryptodev_aesni_mb_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 692b354f..3b3ef0c0 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -247,6 +247,48 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* DES DOCSIS BPI */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -355,7 +397,7 @@ aesni_mb_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"aesni_mb_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
@@ -373,7 +415,7 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
"%s_%s",
qp->name, str);
- if (n > sizeof(ring_name))
+ if (n >= sizeof(ring_name))
return NULL;
r = rte_ring_lookup(ring_name);
@@ -430,6 +472,11 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
memset(&qp->stats, 0, sizeof(qp->stats));
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "digest_mp_%u_%u", dev->data->dev_id, qp_id);
+
/* Initialise multi-buffer manager */
(*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
return 0;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 6676948e..fe3bd730 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -61,6 +61,8 @@
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
+/* Maximum length for digest (SHA-512 truncated needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
static const unsigned auth_blocksize[] = {
[MD5] = 64,
[SHA1] = 64,
@@ -164,9 +166,17 @@ struct aesni_mb_qp {
/**< Session Mempool */
struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
+ uint8_t digest_idx;
+ /**< Index of the next slot to be used in temp_digests,
+ * to store the digest for a given operation
+ */
+ uint8_t temp_digests[MAX_JOBS][DIGEST_LENGTH_MAX];
+ /**< Buffers used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
-
/** AES-NI multi-buffer private session structure */
struct aesni_mb_session {
JOB_CHAIN_ORDER chain_order;
diff --git a/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map b/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
index ad607bbe..ad607bbe 100644
--- a/drivers/crypto/aesni_mb/rte_pmd_aesni_version.map
+++ b/drivers/crypto/aesni_mb/rte_pmd_aesni_mb_version.map
diff --git a/drivers/crypto/armv8/Makefile b/drivers/crypto/armv8/Makefile
index 86611fa2..79c260ff 100644
--- a/drivers/crypto/armv8/Makefile
+++ b/drivers/crypto/armv8/Makefile
@@ -51,12 +51,15 @@ CFLAGS += $(WERROR_FLAGS)
LIBABIVER := 1
# versioning export map
-EXPORT_MAP := rte_armv8_pmd_version.map
+EXPORT_MAP := rte_pmd_armv8_version.map
# external library dependencies
CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)
CFLAGS += -I$(ARMV8_CRYPTO_LIB_PATH)/asm/include
LDLIBS += -L$(ARMV8_CRYPTO_LIB_PATH) -larmv8_crypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += rte_armv8_pmd.c
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index a5c39c9b..97719f27 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -36,8 +36,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -575,8 +574,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op)
/** Process cipher operation */
static inline void
-process_armv8_chained_op
- (struct rte_crypto_op *op, struct armv8_crypto_session *sess,
+process_armv8_chained_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+ struct armv8_crypto_session *sess,
struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
{
crypto_func_t crypto_func;
@@ -633,8 +632,7 @@ process_armv8_chained_op
op->sym->auth.data.length);
}
} else {
- adst = (uint8_t *)rte_pktmbuf_append(m_asrc,
- sess->auth.digest_length);
+ adst = qp->temp_digest;
}
arg.cipher.iv = rte_crypto_op_ctod_offset(op, uint8_t *,
@@ -655,15 +653,12 @@ process_armv8_chained_op
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(m_asrc,
- sess->auth.digest_length);
}
}
/** Process crypto operation for mbuf */
static inline int
-process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
+process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
struct armv8_crypto_session *sess)
{
struct rte_mbuf *msrc, *mdst;
@@ -676,7 +671,7 @@ process_op(const struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
switch (sess->chain_order) {
case ARMV8_CRYPTO_CHAIN_CIPHER_AUTH:
case ARMV8_CRYPTO_CHAIN_AUTH_CIPHER: /* Fall through */
- process_armv8_chained_op(op, sess, msrc, mdst);
+ process_armv8_chained_op(qp, op, sess, msrc, mdst);
break;
default:
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
@@ -763,7 +758,7 @@ armv8_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
static int
cryptodev_armv8_crypto_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct armv8_crypto_private *internals;
@@ -790,14 +785,7 @@ cryptodev_armv8_crypto_create(const char *name,
return -EFAULT;
}
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct armv8_crypto_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
ARMV8_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -836,11 +824,12 @@ init_error:
static int
cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct armv8_crypto_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -849,18 +838,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
if (name == NULL)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0') {
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- }
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_armv8_crypto_create(name, vdev, &init_params);
}
@@ -869,6 +847,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev)
static int
cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
@@ -879,18 +858,25 @@ cryptodev_armv8_crypto_uninit(struct rte_vdev_device *vdev)
"Closing ARMv8 crypto device %s on numa socket %u\n",
name, rte_socket_id());
- return 0;
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
-static struct rte_vdev_driver armv8_crypto_drv = {
+static struct rte_vdev_driver armv8_crypto_pmd_drv = {
.probe = cryptodev_armv8_crypto_init,
.remove = cryptodev_armv8_crypto_uninit
};
-RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_drv);
+static struct cryptodev_driver armv8_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 00297beb..63776b26 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -209,7 +209,7 @@ armv8_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
n = snprintf(qp->name, sizeof(qp->name), "armv8_crypto_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h
index d02992a6..fa31f0a0 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_private.h
+++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h
@@ -69,6 +69,9 @@ do { \
#define NBBY 8 /* Number of bits in a byte */
#define BYTE_LENGTH(x) ((x) / NBBY) /* Number of bytes in x (round down) */
+/* Maximum length for digest (SHA-256 needs 32 bytes) */
+#define DIGEST_LENGTH_MAX 32
+
/** ARMv8 operation order mode enumerator */
enum armv8_crypto_chain_order {
ARMV8_CRYPTO_CHAIN_CIPHER_AUTH,
@@ -147,6 +150,11 @@ struct armv8_crypto_qp {
/**< Queue pair statistics */
char name[RTE_CRYPTODEV_NAME_LEN];
/**< Unique Queue Pair Name */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
/** ARMv8 crypto private session structure */
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_version.map b/drivers/crypto/armv8/rte_pmd_armv8_version.map
index 1f84b68a..1f84b68a 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_version.map
+++ b/drivers/crypto/armv8/rte_pmd_armv8_version.map
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index ae15c99f..a08f2717 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -31,6 +31,12 @@
include $(RTE_SDK)/mk/rte.vars.mk
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build RTE_LIBRTE_PMD_DPAA2_SEC")
+endif
+endif
+
#
# library name
#
@@ -71,14 +77,9 @@ LIBABIVER := 1
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec_dpseci.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += mc/dpseci.c
-# library dependencies
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_eal
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_mempool lib/librte_mbuf
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += lib/librte_cryptodev
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/bus/fslmc
-DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += drivers/mempool/dpaa2
-
LDLIBS += -lrte_bus_fslmc
LDLIBS += -lrte_mempool_dpaa2
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index e0f6cfca..9c64c5d9 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -36,6 +36,7 @@
#include <rte_mbuf.h>
#include <rte_cryptodev.h>
+#include <rte_security_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
@@ -73,12 +74,44 @@
#define FLE_POOL_NUM_BUFS 32000
#define FLE_POOL_BUF_SIZE 256
#define FLE_POOL_CACHE_SIZE 512
+#define SEC_FLC_DHR_OUTBOUND -114
+#define SEC_FLC_DHR_INBOUND 0
enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
static uint8_t cryptodev_driver_id;
static inline int
+build_proto_fd(dpaa2_sec_session *sess,
+ struct rte_crypto_op *op,
+ struct qbman_fd *fd, uint16_t bpid)
+{
+ struct rte_crypto_sym_op *sym_op = op->sym;
+ struct ctxt_priv *priv = sess->ctxt;
+ struct sec_flow_context *flc;
+ struct rte_mbuf *mbuf = sym_op->m_src;
+
+ if (likely(bpid < MAX_BPID))
+ DPAA2_SET_FD_BPID(fd, bpid);
+ else
+ DPAA2_SET_FD_IVP(fd);
+
+ /* Save the shared descriptor */
+ flc = &priv->flc_desc[0].flc;
+
+ DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
+ DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
+ DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+
+ /* save physical address of mbuf */
+ op->sym->aead.digest.phys_addr = mbuf->buf_iova;
+ mbuf->buf_iova = (uint64_t)op;
+
+ return 0;
+}
+
+static inline int
build_authenc_gcm_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
@@ -90,11 +123,17 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
uint32_t auth_only_len = sess->ext_params.aead_ctxt.auth_only_len;
int icv_len = sess->digest_length, retval;
uint8_t *old_icv;
+ struct rte_mbuf *dst;
uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
/* TODO we are using the first FLE entry to store Mbuf and session ctxt.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -155,9 +194,9 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
/* Configure Output SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(sge, sym_op->aead.data.offset +
- sym_op->m_src->data_off - auth_only_len);
+ dst->data_off - auth_only_len);
sge->length = sym_op->aead.data.length + auth_only_len;
if (sess->dir == DIR_ENC) {
@@ -203,7 +242,6 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->aead.digest.data,
sess->digest_length);
- memset(sym_op->aead.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->aead.data.length +
@@ -236,9 +274,15 @@ build_authenc_fd(dpaa2_sec_session *sess,
uint8_t *old_icv;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+ struct rte_mbuf *dst;
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
* So while retreiving we can go back 1 FLE from the FD -ADDR
@@ -301,9 +345,9 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FLE_SG_EXT(fle);
/* Configure Output SGE for Encap/Decap */
- DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(sge, sym_op->cipher.data.offset +
- sym_op->m_src->data_off);
+ dst->data_off);
sge->length = sym_op->cipher.data.length;
if (sess->dir == DIR_ENC) {
@@ -342,7 +386,6 @@ build_authenc_fd(dpaa2_sec_session *sess,
old_icv = (uint8_t *)(sge + 1);
memcpy(old_icv, sym_op->auth.digest.data,
sess->digest_length);
- memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_icv));
sge->length = sess->digest_length;
DPAA2_SET_FD_LEN(fd, (sym_op->auth.data.length +
@@ -436,7 +479,6 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
old_digest = (uint8_t *)(sge + 1);
rte_memcpy(old_digest, sym_op->auth.digest.data,
sess->digest_length);
- memset(sym_op->auth.digest.data, 0, sess->digest_length);
DPAA2_SET_FLE_ADDR(sge, DPAA2_VADDR_TO_IOVA(old_digest));
sge->length = sess->digest_length;
fle->length = sym_op->auth.data.length +
@@ -459,9 +501,15 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
struct ctxt_priv *priv = sess->ctxt;
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
+ struct rte_mbuf *dst;
PMD_INIT_FUNC_TRACE();
+ if (sym_op->m_dst)
+ dst = sym_op->m_dst;
+ else
+ dst = sym_op->m_src;
+
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
@@ -506,9 +554,9 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
sess->iv.length,
sym_op->m_src->data_off);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
+ DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
- sym_op->m_src->data_off);
+ dst->data_off);
fle->length = sym_op->cipher.data.length + sess->iv.length;
@@ -545,12 +593,29 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
static inline int
-build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
+build_sec_fd(struct rte_crypto_op *op,
struct qbman_fd *fd, uint16_t bpid)
{
int ret = -1;
+ dpaa2_sec_session *sess;
PMD_INIT_FUNC_TRACE();
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -ENOTSUP;
+ }
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
+ sess = (dpaa2_sec_session *)get_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
+ sess = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ else
+ return -1;
switch (sess->ctxt_type) {
case DPAA2_SEC_CIPHER:
@@ -565,6 +630,9 @@ build_sec_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
case DPAA2_SEC_CIPHER_HASH:
ret = build_authenc_fd(sess, op, fd, bpid);
break;
+ case DPAA2_SEC_IPSEC:
+ ret = build_proto_fd(sess, op, fd, bpid);
+ break;
case DPAA2_SEC_HASH_CIPHER:
default:
RTE_LOG(ERR, PMD, "error: Unsupported session\n");
@@ -588,12 +656,11 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
/*todo - need to support multiple buffer pools */
uint16_t bpid;
struct rte_mempool *mb_pool;
- dpaa2_sec_session *sess;
if (unlikely(nb_ops == 0))
return 0;
- if (ops[0]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+ if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
return 0;
}
@@ -618,13 +685,9 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
for (loop = 0; loop < frames_to_send; loop++) {
/*Clear the unused FD fields before sending*/
memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
- sess = (dpaa2_sec_session *)
- get_session_private_data(
- (*ops)->sym->session,
- cryptodev_driver_id);
mb_pool = (*ops)->sym->m_src->pool;
bpid = mempool_to_bpid(mb_pool);
- ret = build_sec_fd(sess, *ops, &fd_arr[loop], bpid);
+ ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
PMD_DRV_LOG(ERR, "error: Improper packet"
" contents for crypto operation\n");
@@ -634,7 +697,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
}
loop = 0;
while (loop < frames_to_send) {
- loop += qbman_swp_send_multiple(swp, &eqdesc,
+ loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
&fd_arr[loop],
frames_to_send - loop);
}
@@ -649,11 +712,44 @@ skip_tx:
}
static inline struct rte_crypto_op *
-sec_fd_to_mbuf(const struct qbman_fd *fd)
+sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
+{
+ struct rte_crypto_op *op;
+ uint16_t len = DPAA2_GET_FD_LEN(fd);
+ uint16_t diff = 0;
+ dpaa2_sec_session *sess_priv;
+
+ struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ op = (struct rte_crypto_op *)mbuf->buf_iova;
+ mbuf->buf_iova = op->sym->aead.digest.phys_addr;
+ op->sym->aead.digest.phys_addr = 0L;
+
+ sess_priv = (dpaa2_sec_session *)get_sec_session_private_data(
+ op->sym->sec_session);
+ if (sess_priv->dir == DIR_ENC)
+ mbuf->data_off += SEC_FLC_DHR_OUTBOUND;
+ else
+ mbuf->data_off += SEC_FLC_DHR_INBOUND;
+ diff = len - mbuf->pkt_len;
+ mbuf->pkt_len += diff;
+ mbuf->data_len += diff;
+
+ return op;
+}
+
+static inline struct rte_crypto_op *
+sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
{
struct qbman_fle *fle;
struct rte_crypto_op *op;
struct ctxt_priv *priv;
+ struct rte_mbuf *dst, *src;
+
+ if (DPAA2_FD_GET_FORMAT(fd) == qbman_fd_single)
+ return sec_simple_fd_to_mbuf(fd, driver_id);
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
@@ -676,10 +772,17 @@ sec_fd_to_mbuf(const struct qbman_fd *fd)
DPAA2_GET_FLE_ADDR((fle - 1)));
/* Prefeth op */
- rte_prefetch0(op->sym->m_src);
+ src = op->sym->m_src;
+ rte_prefetch0(src);
+
+ if (op->sym->m_dst) {
+ dst = op->sym->m_dst;
+ rte_prefetch0(dst);
+ } else
+ dst = src;
PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)op->sym->m_src, op->sym->m_src->buf_addr);
+ (void *)dst, dst->buf_addr);
PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
(void *)DPAA2_GET_FD_ADDR(fd),
@@ -701,6 +804,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
{
/* Function is responsible to receive frames for a given device and VQ*/
struct dpaa2_sec_qp *dpaa2_qp = (struct dpaa2_sec_qp *)qp;
+ struct rte_cryptodev *dev =
+ (struct rte_cryptodev *)(dpaa2_qp->rx_vq.dev);
struct qbman_result *dq_storage;
uint32_t fqid = dpaa2_qp->rx_vq.fqid;
int ret, num_rx = 0;
@@ -747,13 +852,13 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
* Also seems like the SWP is shared between the Ethernet Driver
* and the SEC driver.
*/
- while (!qbman_check_command_complete(swp, dq_storage))
+ while (!qbman_check_command_complete(dq_storage))
;
/* Loop until the dq_storage is updated with
* new token by QBMAN
*/
- while (!qbman_result_has_new_result(swp, dq_storage))
+ while (!qbman_check_new_result(dq_storage))
;
/* Check whether Last Pull command is Expired and
* setting Condition for Loop termination
@@ -770,7 +875,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
}
fd = qbman_result_DQ_fd(dq_storage);
- ops[num_rx] = sec_fd_to_mbuf(fd);
+ ops[num_rx] = sec_fd_to_mbuf(fd, dev->driver_id);
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
@@ -1547,6 +1652,300 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
}
static int
+dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
+ struct rte_security_session_conf *conf,
+ void *sess)
+{
+ struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
+ struct rte_crypto_auth_xform *auth_xform;
+ struct rte_crypto_cipher_xform *cipher_xform;
+ dpaa2_sec_session *session = (dpaa2_sec_session *)sess;
+ struct ctxt_priv *priv;
+ struct ipsec_encap_pdb encap_pdb;
+ struct ipsec_decap_pdb decap_pdb;
+ struct alginfo authdata, cipherdata;
+ unsigned int bufsize;
+ struct sec_flow_context *flc;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ cipher_xform = &conf->crypto_xform->cipher;
+ auth_xform = &conf->crypto_xform->next->auth;
+ } else {
+ auth_xform = &conf->crypto_xform->auth;
+ cipher_xform = &conf->crypto_xform->next->cipher;
+ }
+ priv = (struct ctxt_priv *)rte_zmalloc(NULL,
+ sizeof(struct ctxt_priv) +
+ sizeof(struct sec_flc_desc),
+ RTE_CACHE_LINE_SIZE);
+
+ if (priv == NULL) {
+ RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ return -ENOMEM;
+ }
+
+ flc = &priv->flc_desc[0].flc;
+
+ session->ctxt_type = DPAA2_SEC_IPSEC;
+ session->cipher_key.data = rte_zmalloc(NULL,
+ cipher_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL &&
+ cipher_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ rte_free(priv);
+ return -ENOMEM;
+ }
+
+ session->cipher_key.length = cipher_xform->key.length;
+ session->auth_key.data = rte_zmalloc(NULL,
+ auth_xform->key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL &&
+ auth_xform->key.length > 0) {
+ RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -ENOMEM;
+ }
+ session->auth_key.length = auth_xform->key.length;
+ memcpy(session->cipher_key.data, cipher_xform->key.data,
+ cipher_xform->key.length);
+ memcpy(session->auth_key.data, auth_xform->key.data,
+ auth_xform->key.length);
+
+ authdata.key = (uint64_t)session->auth_key.data;
+ authdata.keylen = session->auth_key.length;
+ authdata.key_enc_flags = 0;
+ authdata.key_type = RTA_DATA_IMM;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA1_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_MD5_96;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_256_128;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_384_192;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_SHA2_512_256;
+ authdata.algmode = OP_ALG_AAI_HMAC;
+ session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ authdata.algtype = OP_PCL_IPSEC_AES_CMAC_96;
+ session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case RTE_CRYPTO_AUTH_NULL:
+ authdata.algtype = OP_PCL_IPSEC_HMAC_NULL;
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
+ case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
+ case RTE_CRYPTO_AUTH_SHA1:
+ case RTE_CRYPTO_AUTH_SHA256:
+ case RTE_CRYPTO_AUTH_SHA512:
+ case RTE_CRYPTO_AUTH_SHA224:
+ case RTE_CRYPTO_AUTH_SHA384:
+ case RTE_CRYPTO_AUTH_MD5:
+ case RTE_CRYPTO_AUTH_AES_GMAC:
+ case RTE_CRYPTO_AUTH_KASUMI_F9:
+ case RTE_CRYPTO_AUTH_AES_CBC_MAC:
+ case RTE_CRYPTO_AUTH_ZUC_EIA3:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ auth_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ auth_xform->algo);
+ goto out;
+ }
+ cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.keylen = session->cipher_key.length;
+ cipherdata.key_enc_flags = 0;
+ cipherdata.key_type = RTA_DATA_IMM;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CBC;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ cipherdata.algtype = OP_PCL_IPSEC_3DES;
+ cipherdata.algmode = OP_ALG_AAI_CBC;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ cipherdata.algtype = OP_PCL_IPSEC_AES_CTR;
+ cipherdata.algmode = OP_ALG_AAI_CTR;
+ session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case RTE_CRYPTO_CIPHER_NULL:
+ cipherdata.algtype = OP_PCL_IPSEC_NULL;
+ break;
+ case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
+ case RTE_CRYPTO_CIPHER_3DES_ECB:
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ case RTE_CRYPTO_CIPHER_KASUMI_F8:
+ RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ cipher_xform->algo);
+ goto out;
+ default:
+ RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ cipher_xform->algo);
+ goto out;
+ }
+
+ if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
+ struct ip ip4_hdr;
+
+ flc->dhr = SEC_FLC_DHR_OUTBOUND;
+ ip4_hdr.ip_v = IPVERSION;
+ ip4_hdr.ip_hl = 5;
+ ip4_hdr.ip_len = rte_cpu_to_be_16(sizeof(ip4_hdr));
+ ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
+ ip4_hdr.ip_id = 0;
+ ip4_hdr.ip_off = 0;
+ ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
+ ip4_hdr.ip_p = 0x32;
+ ip4_hdr.ip_sum = 0;
+ ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
+ ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
+ ip4_hdr.ip_sum = calc_chksum((uint16_t *)(void *)&ip4_hdr,
+ sizeof(struct ip));
+
+ /* For Sec Proto only one descriptor is required. */
+ memset(&encap_pdb, 0, sizeof(struct ipsec_encap_pdb));
+ encap_pdb.options = (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
+ PDBOPTS_ESP_OIHI_PDB_INL |
+ PDBOPTS_ESP_IVSRC |
+ PDBHMO_ESP_ENCAP_DTTL;
+ encap_pdb.spi = ipsec_xform->spi;
+ encap_pdb.ip_hdr_len = sizeof(struct ip);
+
+ session->dir = DIR_ENC;
+ bufsize = cnstr_shdsc_ipsec_new_encap(priv->flc_desc[0].desc,
+ 1, 0, &encap_pdb,
+ (uint8_t *)&ip4_hdr,
+ &cipherdata, &authdata);
+ } else if (ipsec_xform->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
+ flc->dhr = SEC_FLC_DHR_INBOUND;
+ memset(&decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
+ decap_pdb.options = sizeof(struct ip) << 16;
+ session->dir = DIR_DEC;
+ bufsize = cnstr_shdsc_ipsec_new_decap(priv->flc_desc[0].desc,
+ 1, 0, &decap_pdb, &cipherdata, &authdata);
+ } else
+ goto out;
+ flc->word1_sdl = (uint8_t)bufsize;
+
+ /* Enable the stashing control bit */
+ DPAA2_SET_FLC_RSC(flc);
+ flc->word2_rflc_31_0 = lower_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq) | 0x14);
+ flc->word3_rflc_63_32 = upper_32_bits(
+ (uint64_t)&(((struct dpaa2_sec_qp *)
+ dev->data->queue_pairs[0])->rx_vq));
+
+ /* Set EWS bit i.e. enable write-safe */
+ DPAA2_SET_FLC_EWS(flc);
+ /* Set BS = 1 i.e reuse input buffers as output buffers */
+ DPAA2_SET_FLC_REUSE_BS(flc);
+ /* Set FF = 10; reuse input buffers if they provide sufficient space */
+ DPAA2_SET_FLC_REUSE_FF(flc);
+
+ session->ctxt = priv;
+
+ return 0;
+out:
+ rte_free(session->auth_key.data);
+ rte_free(session->cipher_key.data);
+ rte_free(priv);
+ return -1;
+}
+
+static int
+dpaa2_sec_security_session_create(void *dev,
+ struct rte_security_session_conf *conf,
+ struct rte_security_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
+ int ret;
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ switch (conf->protocol) {
+ case RTE_SECURITY_PROTOCOL_IPSEC:
+ ret = dpaa2_sec_set_ipsec_session(cdev, conf,
+ sess_private_data);
+ break;
+ case RTE_SECURITY_PROTOCOL_MACSEC:
+ return -ENOTSUP;
+ default:
+ return -EINVAL;
+ }
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR,
+ "DPAA2 PMD: failed to configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_sec_session_private_data(sess, sess_private_data);
+
+ return ret;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static int
+dpaa2_sec_security_session_destroy(void *dev __rte_unused,
+ struct rte_security_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ void *sess_priv = get_sec_session_private_data(sess);
+
+ dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_free(s->ctxt);
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(sess, 0, sizeof(dpaa2_sec_session));
+ set_sec_session_private_data(sess, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+ return 0;
+}
+
+static int
dpaa2_sec_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
@@ -1820,11 +2219,28 @@ static struct rte_cryptodev_ops crypto_ops = {
.session_clear = dpaa2_sec_session_clear,
};
+static const struct rte_security_capability *
+dpaa2_sec_capabilities_get(void *device __rte_unused)
+{
+ return dpaa2_sec_security_cap;
+}
+
+struct rte_security_ops dpaa2_sec_security_ops = {
+ .session_create = dpaa2_sec_security_session_create,
+ .session_update = NULL,
+ .session_stats_get = NULL,
+ .session_destroy = dpaa2_sec_security_session_destroy,
+ .set_pkt_metadata = NULL,
+ .capabilities_get = dpaa2_sec_capabilities_get
+};
+
static int
dpaa2_sec_uninit(const struct rte_cryptodev *dev)
{
struct dpaa2_sec_dev_private *internals = dev->data->dev_private;
+ rte_free(dev->security_ctx);
+
rte_mempool_free(internals->fle_pool);
PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
@@ -1839,6 +2255,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
struct dpaa2_sec_dev_private *internals;
struct rte_device *dev = cryptodev->device;
struct rte_dpaa2_device *dpaa2_dev;
+ struct rte_security_ctx *security_instance;
struct fsl_mc_io *dpseci;
uint16_t token;
struct dpseci_attr attr;
@@ -1860,7 +2277,8 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
cryptodev->dequeue_burst = dpaa2_sec_dequeue_burst;
cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
RTE_CRYPTODEV_FF_HW_ACCELERATED |
- RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_SECURITY;
internals = cryptodev->data->dev_private;
internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS;
@@ -1874,6 +2292,17 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_LOG(DEBUG, "Device already init by primary process");
return 0;
}
+
+ /* Initialize security_ctx only for primary process*/
+ security_instance = rte_malloc("rte_security_instances_ops",
+ sizeof(struct rte_security_ctx), 0);
+ if (security_instance == NULL)
+ return -ENOMEM;
+ security_instance->device = (void *)cryptodev;
+ security_instance->ops = &dpaa2_sec_security_ops;
+ security_instance->sess_cnt = 0;
+ cryptodev->security_ctx = security_instance;
+
/*Open the rte device via MC and save the handle for further use*/
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
@@ -1987,20 +2416,11 @@ cryptodev_dpaa2_sec_remove(struct rte_dpaa2_device *dpaa2_dev)
if (ret)
return ret;
- /* free crypto device */
- rte_cryptodev_pmd_release_device(cryptodev);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
- rte_free(cryptodev->data->dev_private);
-
- cryptodev->device = NULL;
- cryptodev->data = NULL;
-
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
- .drv_type = DPAA2_MC_DPSECI_DEVID,
+ .drv_type = DPAA2_CRYPTO,
.driver = {
.name = "DPAA2 SEC PMD"
},
@@ -2008,5 +2428,8 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
.remove = cryptodev_dpaa2_sec_remove,
};
+static struct cryptodev_driver dpaa2_sec_crypto_drv;
+
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_dpaa2_sec_driver, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index 3849a052..14e71df5 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -67,6 +67,11 @@ enum shr_desc_type {
#define DIR_ENC 1
#define DIR_DEC 0
+#define DPAA2_SET_FLC_EWS(flc) (flc->word1_bits23_16 |= 0x1)
+#define DPAA2_SET_FLC_RSC(flc) (flc->word1_bits31_24 |= 0x1)
+#define DPAA2_SET_FLC_REUSE_BS(flc) (flc->mode_bits |= 0x8000)
+#define DPAA2_SET_FLC_REUSE_FF(flc) (flc->mode_bits |= 0x2000)
+
/* SEC Flow Context Descriptor */
struct sec_flow_context {
/* word 0 */
@@ -411,4 +416,61 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
+
+static const struct rte_security_capability dpaa2_sec_security_cap[] = {
+ { /* IPsec Lookaside Protocol offload ESP Transport Egress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_EGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ { /* IPsec Lookaside Protocol offload ESP Tunnel Ingress */
+ .action = RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = RTE_SECURITY_IPSEC_SA_MODE_TUNNEL,
+ .direction = RTE_SECURITY_IPSEC_SA_DIR_INGRESS,
+ .options = { 0 }
+ },
+ .crypto_capabilities = dpaa2_sec_capabilities
+ },
+ {
+ .action = RTE_SECURITY_ACTION_TYPE_NONE
+ }
+};
+
+/**
+ * Checksum
+ *
+ * @param buffer calculate chksum for buffer
+ * @param len buffer length
+ *
+ * @return checksum value in host cpu order
+ */
+static inline uint16_t
+calc_chksum(void *buffer, int len)
+{
+ uint16_t *buf = (uint16_t *)buffer;
+ uint32_t sum = 0;
+ uint16_t result;
+
+ for (sum = 0; len > 1; len -= 2)
+ sum += *buf++;
+
+ if (len == 1)
+ sum += *(unsigned char *)buf;
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ result = ~sum;
+
+ return result;
+}
+
#endif /* _RTE_DPAA2_SEC_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/dpaa2_sec/mc/dpseci.c b/drivers/crypto/dpaa2_sec/mc/dpseci.c
index 4a109620..2a216afc 100644
--- a/drivers/crypto/dpaa2_sec/mc/dpseci.c
+++ b/drivers/crypto/dpaa2_sec/mc/dpseci.c
@@ -37,18 +37,34 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#include <fsl_mc_sys.h>
#include <fsl_mc_cmd.h>
#include <fsl_dpseci.h>
#include <fsl_dpseci_cmd.h>
-int
-dpseci_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpseci_id,
- uint16_t *token)
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpseci_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token)
{
+ struct dpseci_cmd_open *cmd_params;
struct mc_command cmd = { 0 };
int err;
@@ -56,23 +72,34 @@ dpseci_open(struct fsl_mc_io *mc_io,
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
cmd_flags,
0);
- DPSECI_CMD_OPEN(cmd, dpseci_id);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- *token = MC_CMD_HDR_READ_TOKEN(cmd.header);
+ *token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
-int
-dpseci_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
@@ -81,478 +108,569 @@ dpseci_close(struct fsl_mc_io *mc_io,
cmd_flags,
token);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpseci_cfg *cfg,
- uint32_t *obj_id)
+/**
+ * dpseci_create() - Create the DPSECI object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPSECI object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id)
{
+ struct dpseci_cmd_create *cmd_params;
struct mc_command cmd = { 0 };
- int err;
+ int err, i;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CREATE,
cmd_flags,
dprc_token);
- DPSECI_CMD_CREATE(cmd, cfg);
-
- /* send command to mc */
+ cmd_params = (struct dpseci_cmd_create *)cmd.params;
+ for (i = 0; i < DPSECI_PRIO_NUM; i++)
+ cmd_params->priorities[i] = cfg->priorities[i];
+ cmd_params->num_tx_queues = cfg->num_tx_queues;
+ cmd_params->num_rx_queues = cfg->num_rx_queues;
+ cmd_params->options = cfg->options;
+
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- CMD_CREATE_RSP_GET_OBJ_ID_PARAM0(cmd, *obj_id);
+ *obj_id = mc_cmd_read_object_id(&cmd);
return 0;
}
-int
-dpseci_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id)
+/**
+ * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id)
{
+ struct dpseci_cmd_destroy *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DESTROY,
cmd_flags,
dprc_token);
- /* set object id to destroy */
- CMD_DESTROY_SET_OBJ_ID_PARAM0(cmd, object_id);
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
-{
- struct mc_command cmd = { 0 };
+ cmd_params = (struct dpseci_cmd_destroy *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(object_id);
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
- cmd_flags,
- token);
-
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
- cmd_flags,
- token);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_is_enabled(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
- uint16_t token,
- int *en)
-{
- struct mc_command cmd = { 0 };
- int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
- cmd_flags,
- token);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_IS_ENABLED(cmd, *en);
-
- return 0;
-}
-
-int
-dpseci_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token)
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
cmd_flags,
token);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpseci_irq_cfg *irq_cfg)
-{
- struct mc_command cmd = { 0 };
- int err;
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ,
- cmd_flags,
- token);
- DPSECI_CMD_GET_IRQ(cmd, irq_index);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_GET_IRQ(cmd, *type, irq_cfg);
-
- return 0;
-}
-
-int
-dpseci_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpseci_irq_cfg *irq_cfg)
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint8_t *en)
+ int *en)
{
+ struct dpseci_rsp_is_enabled *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_ENABLE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_ENABLE(cmd, *en);
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->en, ENABLE);
return 0;
}
-int
-dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t en)
+/**
+ * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token)
{
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_ENABLE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en);
- /* send command to mc */
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask)
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr)
{
+ struct dpseci_rsp_get_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_MASK,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_MASK(cmd, *mask);
+ rsp_params = (struct dpseci_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->options = rsp_params->options;
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
return 0;
}
-int
-dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask)
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation; use
+ * DPSECI_ALL_QUEUES to configure all Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg)
{
+ struct dpseci_cmd_set_rx_queue *cmd_params;
struct mc_command cmd = { 0 };
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_IRQ_MASK,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
cmd_flags,
token);
- DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask);
-
- /* send command to mc */
+ cmd_params = (struct dpseci_cmd_set_rx_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->dest_type,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ /* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status)
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr)
{
+ struct dpseci_rsp_get_rx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_IRQ_STATUS,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
cmd_flags,
token);
- DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_IRQ_STATUS(cmd, *status);
+ rsp_params = (struct dpseci_rsp_get_rx_queue *)cmd.params;
+ attr->user_ctx = le64_to_cpu(rsp_params->user_ctx);
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ attr->dest_cfg.priority = rsp_params->dest_priority;
+ attr->dest_cfg.dest_type =
+ dpseci_get_field(rsp_params->dest_type,
+ DEST_TYPE);
+ attr->order_preservation_en =
+ dpseci_get_field(rsp_params->order_preservation_en,
+ ORDER_PRESERVATION);
return 0;
}
-int
-dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of
+ * priorities configured at DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint32_t status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags,
- token);
- DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_attr *attr)
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr)
{
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ struct dpseci_cmd_get_queue *cmd_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
cmd_flags,
token);
+ cmd_params = (struct dpseci_cmd_get_queue *)cmd.params;
+ cmd_params->queue = queue;
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_ATTR(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
return 0;
}
-int
-dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- const struct dpseci_rx_queue_cfg *cfg)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
- cmd_flags,
- token);
- DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg);
-
- /* send command to mc */
- return mc_send_command(mc_io, &cmd);
-}
-
-int
-dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_rx_queue_attr *attr)
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr)
{
+ struct dpseci_rsp_get_sec_attr *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
cmd_flags,
token);
- DPSECI_CMD_GET_RX_QUEUE(cmd, queue);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_RX_QUEUE(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
return 0;
}
-int
-dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_tx_queue_attr *attr)
+/**
+ * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @counters: Returned SEC counters
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters)
{
+ struct dpseci_rsp_get_sec_counters *rsp_params;
struct mc_command cmd = { 0 };
int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
cmd_flags,
token);
- DPSECI_CMD_GET_TX_QUEUE(cmd, queue);
- /* send command to mc */
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
- DPSECI_RSP_GET_TX_QUEUE(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_sec_counters *)cmd.params;
+ counters->dequeued_requests =
+ le64_to_cpu(rsp_params->dequeued_requests);
+ counters->ob_enc_requests = le64_to_cpu(rsp_params->ob_enc_requests);
+ counters->ib_dec_requests = le64_to_cpu(rsp_params->ib_dec_requests);
+ counters->ob_enc_bytes = le64_to_cpu(rsp_params->ob_enc_bytes);
+ counters->ob_prot_bytes = le64_to_cpu(rsp_params->ob_prot_bytes);
+ counters->ib_dec_bytes = le64_to_cpu(rsp_params->ib_dec_bytes);
+ counters->ib_valid_bytes = le64_to_cpu(rsp_params->ib_valid_bytes);
return 0;
}
-int
-dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_attr *attr)
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver)
{
+ struct dpseci_rsp_get_api_version *rsp_params;
struct mc_command cmd = { 0 };
int err;
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
- cmd_flags,
- token);
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
- /* send command to mc */
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- /* retrieve response parameters */
- DPSECI_RSP_GET_SEC_ATTR(cmd, attr);
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
return 0;
}
-int
-dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- struct dpseci_sec_counters *counters)
+ const struct dpseci_congestion_notification_cfg *cfg)
{
+ struct dpseci_cmd_set_congestion_notification *cmd_params;
struct mc_command cmd = { 0 };
- int err;
/* prepare command */
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_COUNTERS,
- cmd_flags,
- token);
-
- /* send command to mc */
- err = mc_send_command(mc_io, &cmd);
- if (err)
- return err;
-
- /* retrieve response parameters */
- DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters);
-
- return 0;
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+
+ cmd_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cfg->dest_cfg.dest_id;
+ cmd_params->dest_priority = cfg->dest_cfg.priority;
+ cmd_params->message_ctx = cfg->message_ctx;
+ cmd_params->message_iova = cfg->message_iova;
+ cmd_params->notification_mode = cfg->notification_mode;
+ cmd_params->threshold_entry = cfg->threshold_entry;
+ cmd_params->threshold_exit = cfg->threshold_exit;
+ dpseci_set_field(cmd_params->type_units,
+ DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->type_units,
+ CG_UNITS,
+ cfg->units);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
}
-int
-dpseci_get_api_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver)
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg)
{
+ struct dpseci_cmd_set_congestion_notification *rsp_params;
struct mc_command cmd = { 0 };
int err;
- cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
- cmd_flags,
- 0);
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ /* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
- DPSECI_RSP_GET_API_VERSION(cmd, *major_ver, *minor_ver);
+ rsp_params =
+ (struct dpseci_cmd_set_congestion_notification *)cmd.params;
+
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->dest_cfg.priority = rsp_params->dest_priority;
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+ cfg->units = dpseci_get_field(rsp_params->type_units, CG_UNITS);
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->type_units,
+ DEST_TYPE);
return 0;
}
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
index 6cc14a66..4acb595c 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,7 +37,6 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef __FSL_DPSECI_H
#define __FSL_DPSECI_H
@@ -61,394 +60,89 @@ struct fsl_mc_io;
*/
#define DPSECI_ALL_QUEUES (uint8_t)(-1)
-/**
- * dpseci_open() - Open a control session for the specified object
- * This function can be used to open a control session for an
- * already created object; an object may have been declared in
- * the DPL or by calling the dpseci_create() function.
- * This function returns a unique authentication token,
- * associated with the specific object ID and the specific MC
- * portal; this token must be used in all subsequent commands for
- * this specific object.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param dpseci_id DPSECI unique ID
- * @param token Returned token; use in subsequent API calls
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_open(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- int dpseci_id,
- uint16_t *token);
+int dpseci_open(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ int dpseci_id,
+ uint16_t *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
- * dpseci_close() - Close the control session of the object
- * After this function is called, no further operations are
- * allowed on the object without opening a new control session.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * Enable the Congestion Group support
*/
-int
-dpseci_close(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+#define DPSECI_OPT_HAS_CG 0x000020
/**
* struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC,
+ * valid priorities are configured with values 1-8;
*/
struct dpseci_cfg {
- uint8_t num_tx_queues; /* num of queues towards the SEC */
- uint8_t num_rx_queues; /* num of queues back from the SEC */
+ uint32_t options;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
uint8_t priorities[DPSECI_PRIO_NUM];
- /**< Priorities for the SEC hardware processing;
- * each place in the array is the priority of the tx queue
- * towards the SEC,
- * valid priorities are configured with values 1-8;
- */
};
-/**
- * dpseci_create() - Create the DPSECI object
- * Create the DPSECI object, allocate required resources and
- * perform required initialization.
- *
- * The object can be created either by declaring it in the
- * DPL file, or by calling this function.
- *
- * The function accepts an authentication token of a parent
- * container that this object should be assigned to. The token
- * can be '0' so the object will be assigned to the default container.
- * The newly created object can be opened with the returned
- * object id and using the container's associated tokens and MC portals.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param dprc_token Parent container token; '0' for default container
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param cfg Configuration structure
- * @param obj_id returned object id
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_create(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- const struct dpseci_cfg *cfg,
- uint32_t *obj_id);
-
-/**
- * dpseci_destroy() - Destroy the DPSECI object and release all its resources.
- * The function accepts the authentication token of the parent container that
- * created the object (not the one that currently owns the object). The object
- * is searched within parent using the provided 'object_id'.
- * All tokens to the object must be closed before calling destroy.
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param dprc_token Parent container token; '0' for default container
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param object_id The object id; it must be a valid id within the
- * container that created this object;
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_destroy(struct fsl_mc_io *mc_io,
- uint16_t dprc_token,
- uint32_t cmd_flags,
- uint32_t object_id);
-
-/**
- * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/**
- * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_disable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
-
-/**
- * dpseci_is_enabled() - Check if the DPSECI is enabled.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param en Returns '1' if object is enabled; '0' otherwise
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_is_enabled(struct fsl_mc_io *mc_io,
+int dpseci_create(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
uint32_t cmd_flags,
- uint16_t token,
- int *en);
+ const struct dpseci_cfg *cfg,
+ uint32_t *obj_id);
-/**
- * dpseci_reset() - Reset the DPSECI, returns the object to initial state.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_reset(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token);
+int dpseci_destroy(struct fsl_mc_io *mc_io,
+ uint16_t dprc_token,
+ uint32_t cmd_flags,
+ uint32_t object_id);
-/**
- * struct dpseci_irq_cfg - IRQ configuration
- */
-struct dpseci_irq_cfg {
- uint64_t addr;
- /* Address that must be written to signal a message-based interrupt */
- uint32_t val;
- /* Value to write into irq_addr address */
- int irq_num;
- /* A user defined number associated with this IRQ */
-};
+int dpseci_enable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpseci_set_irq() - Set IRQ information for the DPSECI to trigger an interrupt
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index Identifies the interrupt index to configure
- * @param irq_cfg IRQ configuration
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- struct dpseci_irq_cfg *irq_cfg);
+int dpseci_disable(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
-/**
- * dpseci_get_irq() - Get IRQ information from the DPSECI
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param type Interrupt type: 0 represents message interrupt
- * type (both irq_addr and irq_val are valid)
- * @param irq_cfg IRQ attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- int *type,
- struct dpseci_irq_cfg *irq_cfg);
-
-/**
- * dpseci_set_irq_enable() - Set overall interrupt state.
- * Allows GPP software to control when interrupts are generated.
- * Each interrupt can have up to 32 causes. The enable/disable control's the
- * overall interrupt state. if the interrupt is disabled no causes will cause
- * an interrupt
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param en Interrupt state - enable = 1, disable = 0
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq_enable(struct fsl_mc_io *mc_io,
+int dpseci_is_enabled(struct fsl_mc_io *mc_io,
uint32_t cmd_flags,
uint16_t token,
- uint8_t irq_index,
- uint8_t en);
+ int *en);
-/**
- * dpseci_get_irq_enable() - Get overall interrupt state
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param en Returned Interrupt state - enable = 1,
- * disable = 0
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_enable(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint8_t *en);
-
-/**
- * dpseci_set_irq_mask() - Set interrupt mask.
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param mask event mask to trigger interrupt;
- * each bit:
- * 0 = ignore event
- * 1 = consider event for asserting IRQ
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t mask);
-
-/**
- * dpseci_get_irq_mask() - Get interrupt mask.
- * Every interrupt can have up to 32 causes and the interrupt model supports
- * masking/unmasking each cause independently
- *
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param mask Returned event mask to trigger interrupt
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_mask(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *mask);
-
-/**
- * dpseci_get_irq_status() - Get the current status of any pending interrupts
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param status Returned interrupts status - one bit per cause:
- * 0 = no interrupt pending
- * 1 = interrupt pending
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t *status);
-
-/**
- * dpseci_clear_irq_status() - Clear a pending interrupt's status
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param irq_index The interrupt index to configure
- * @param status bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
+int dpseci_reset(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token);
/**
* struct dpseci_attr - Structure representing DPSECI attributes
- * @param id: DPSECI object ID
- * @param num_tx_queues: number of queues towards the SEC
- * @param num_rx_queues: number of queues back from the SEC
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: Any combination of the following options:
+ * DPSECI_OPT_HAS_CG
+ * DPSECI_OPT_HAS_OPR
+ * DPSECI_OPT_OPR_SHARED
*/
struct dpseci_attr {
- int id; /* DPSECI object ID */
- uint8_t num_tx_queues; /* number of queues towards the SEC */
- uint8_t num_rx_queues; /* number of queues back from the SEC */
+ int id;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint32_t options;
};
-/**
- * dpseci_get_attributes() - Retrieve DPSECI attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param attr Returned object's attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_attributes(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_attr *attr);
+int dpseci_get_attributes(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_attr *attr);
/**
* enum dpseci_dest - DPSECI destination types
@@ -471,16 +165,16 @@ enum dpseci_dest {
/**
* struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that
+ * channel; not relevant for 'DPSECI_DEST_NONE' option
*/
struct dpseci_dest_cfg {
- enum dpseci_dest dest_type; /* Destination type */
+ enum dpseci_dest dest_type;
int dest_id;
- /* Either DPIO ID or DPCON ID, depending on the destination type */
uint8_t priority;
- /* Priority selection within the DPIO or DPCON channel; valid values
- * are 0-1 or 0-7, depending on the number of priorities in that
- * channel; not relevant for 'DPSECI_DEST_NONE' option
- */
};
/**
@@ -504,243 +198,235 @@ struct dpseci_dest_cfg {
/**
* struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame;
+ * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
+ * @dest_cfg: Queue destination parameters;
+ * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
*/
struct dpseci_rx_queue_cfg {
uint32_t options;
- /* Flags representing the suggested modifications to the queue;
- * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
- */
int order_preservation_en;
- /* order preservation configuration for the rx queue
- * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in
- * 'options'
- */
uint64_t user_ctx;
- /* User context value provided in the frame descriptor of each
- * dequeued frame;
- * valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained in 'options'
- */
struct dpseci_dest_cfg dest_cfg;
- /* Queue destination parameters;
- * valid only if 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
- */
};
-/**
- * dpseci_set_rx_queue() - Set Rx queue configuration
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation; use
- * DPSECI_ALL_QUEUES to configure all Rx queues
- * identically.
- * @param cfg Rx queue configuration
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- const struct dpseci_rx_queue_cfg *cfg);
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ const struct dpseci_rx_queue_cfg *cfg);
/**
* struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration
+ * on the queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
*/
struct dpseci_rx_queue_attr {
uint64_t user_ctx;
- /* User context value provided in the frame descriptor of
- * each dequeued frame
- */
int order_preservation_en;
- /* Status of the order preservation configuration on the queue */
- struct dpseci_dest_cfg dest_cfg;
- /* Queue destination configuration */
+ struct dpseci_dest_cfg dest_cfg;
uint32_t fqid;
- /* Virtual FQID value to be used for dequeue operations */
};
-/**
- * dpseci_get_rx_queue() - Retrieve Rx queue attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation
- * @param attr Returned Rx queue attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_rx_queue_attr *attr);
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_rx_queue_attr *attr);
/**
* struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
*/
struct dpseci_tx_queue_attr {
uint32_t fqid;
- /* Virtual FQID to be used for sending frames to SEC hardware */
uint8_t priority;
- /* SEC hardware processing priority for the queue */
};
-/**
- * dpseci_get_tx_queue() - Retrieve Tx queue attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param queue Select the queue relative to number of
- * priorities configured at DPSECI creation
- * @param attr Returned Tx queue attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t queue,
- struct dpseci_tx_queue_attr *attr);
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ uint8_t queue,
+ struct dpseci_tx_queue_attr *attr);
/**
* struct dpseci_sec_attr - Structure representing attributes of the SEC
- * hardware accelerator
- */
+ * hardware accelerator
+ * @ip_id: ID for SEC.
+ * @major_rev: Major revision number for SEC.
+ * @minor_rev: Minor revision number for SEC.
+ * @era: SEC Era.
+ * @deco_num: The number of copies of the DECO that are implemented
+ * in this version of SEC.
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented
+ * in this version of SEC.
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented
+ * in this version of SEC.
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC.
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC.
+ * @crc_acc_num: The number of copies of the CRC module that are
+ * implemented in this version of SEC.
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC.
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC.
+ * @rng_acc_num: The number of copies of the Random Number Generator that
+ * are implemented in this version of SEC.
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that
+ * are implemented in this version of SEC.
+ * @arc4_acc_num: The number of copies of the ARC4 module that are
+ * implemented in this version of SEC.
+ * @des_acc_num: The number of copies of the DES module that are
+ * implemented in this version of SEC.
+ * @aes_acc_num: The number of copies of the AES module that are
+ * implemented in this version of SEC.
+ **/
struct dpseci_sec_attr {
- uint16_t ip_id; /* ID for SEC */
- uint8_t major_rev; /* Major revision number for SEC */
- uint8_t minor_rev; /* Minor revision number for SEC */
- uint8_t era; /* SEC Era */
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
uint8_t deco_num;
- /* The number of copies of the DECO that are implemented in
- * this version of SEC
- */
uint8_t zuc_auth_acc_num;
- /* The number of copies of ZUCA that are implemented in this
- * version of SEC
- */
uint8_t zuc_enc_acc_num;
- /* The number of copies of ZUCE that are implemented in this
- * version of SEC
- */
uint8_t snow_f8_acc_num;
- /* The number of copies of the SNOW-f8 module that are
- * implemented in this version of SEC
- */
uint8_t snow_f9_acc_num;
- /* The number of copies of the SNOW-f9 module that are
- * implemented in this version of SEC
- */
uint8_t crc_acc_num;
- /* The number of copies of the CRC module that are implemented
- * in this version of SEC
- */
uint8_t pk_acc_num;
- /* The number of copies of the Public Key module that are
- * implemented in this version of SEC
- */
uint8_t kasumi_acc_num;
- /* The number of copies of the Kasumi module that are
- * implemented in this version of SEC
- */
uint8_t rng_acc_num;
- /* The number of copies of the Random Number Generator that are
- * implemented in this version of SEC
- */
uint8_t md_acc_num;
- /* The number of copies of the MDHA (Hashing module) that are
- * implemented in this version of SEC
- */
uint8_t arc4_acc_num;
- /* The number of copies of the ARC4 module that are implemented
- * in this version of SEC
- */
uint8_t des_acc_num;
- /* The number of copies of the DES module that are implemented
- * in this version of SEC
- */
uint8_t aes_acc_num;
- /* The number of copies of the AES module that are implemented
- * in this version of SEC
- */
};
-/**
- * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param attr Returned SEC attributes
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
- */
-int
-dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_attr *attr);
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_attr *attr);
/**
* struct dpseci_sec_counters - Structure representing global SEC counters and
* not per dpseci counters
+ * @dequeued_requests: Number of Requests Dequeued
+ * @ob_enc_requests: Number of Outbound Encrypt Requests
+ * @ib_dec_requests: Number of Inbound Decrypt Requests
+ * @ob_enc_bytes: Number of Outbound Bytes Encrypted
+ * @ob_prot_bytes: Number of Outbound Bytes Protected
+ * @ib_dec_bytes: Number of Inbound Bytes Decrypted
+ * @ib_valid_bytes: Number of Inbound Bytes Validated
*/
struct dpseci_sec_counters {
- uint64_t dequeued_requests; /* Number of Requests Dequeued */
- uint64_t ob_enc_requests; /* Number of Outbound Encrypt Requests */
- uint64_t ib_dec_requests; /* Number of Inbound Decrypt Requests */
- uint64_t ob_enc_bytes; /* Number of Outbound Bytes Encrypted */
- uint64_t ob_prot_bytes; /* Number of Outbound Bytes Protected */
- uint64_t ib_dec_bytes; /* Number of Inbound Bytes Decrypted */
- uint64_t ib_valid_bytes; /* Number of Inbound Bytes Validated */
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
};
+int dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_sec_counters *counters);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t *major_ver,
+ uint16_t *minor_ver);
/**
- * dpseci_get_sec_counters() - Retrieve SEC accelerator counters.
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param token Token of DPSECI object
- * @param counters Returned SEC counters
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
*/
-int
-dpseci_get_sec_counters(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- struct dpseci_sec_counters *counters);
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
/**
- * dpseci_get_api_version() - Get Data Path SEC Interface API version
- * @param mc_io Pointer to MC portal's I/O object
- * @param cmd_flags Command flags; one or more of 'MC_CMD_FLAG_'
- * @param major_ver Major version of data path sec API
- * @param minor_ver Minor version of data path sec API
- *
- * @return:
- * - Return '0' on Success.
- * - Return Error code otherwise.
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
*/
-int
-dpseci_get_api_version(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t *major_ver,
- uint16_t *minor_ver);
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+ uint64_t message_ctx;
+ uint64_t message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ uint16_t notification_mode;
+};
+
+int dpseci_set_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(
+ struct fsl_mc_io *mc_io,
+ uint32_t cmd_flags,
+ uint16_t token,
+ struct dpseci_congestion_notification_cfg *cfg);
#endif /* __FSL_DPSECI_H */
diff --git a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
index 3f9f4743..a100a0ec 100644
--- a/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
+++ b/drivers/crypto/dpaa2_sec/mc/fsl_dpseci_cmd.h
@@ -5,7 +5,7 @@
* BSD LICENSE
*
* Copyright 2013-2016 Freescale Semiconductor Inc.
- * Copyright 2016 NXP.
+ * Copyright 2016-2017 NXP.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,220 +37,187 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
-
#ifndef _FSL_DPSECI_CMD_H
#define _FSL_DPSECI_CMD_H
/* DPSECI Version */
-#define DPSECI_VER_MAJOR 5
-#define DPSECI_VER_MINOR 0
-
-/* Command IDs */
-#define DPSECI_CMDID_CLOSE ((0x800 << 4) | (0x1))
-#define DPSECI_CMDID_OPEN ((0x809 << 4) | (0x1))
-#define DPSECI_CMDID_CREATE ((0x909 << 4) | (0x1))
-#define DPSECI_CMDID_DESTROY ((0x989 << 4) | (0x1))
-#define DPSECI_CMDID_GET_API_VERSION ((0xa09 << 4) | (0x1))
-
-#define DPSECI_CMDID_ENABLE ((0x002 << 4) | (0x1))
-#define DPSECI_CMDID_DISABLE ((0x003 << 4) | (0x1))
-#define DPSECI_CMDID_GET_ATTR ((0x004 << 4) | (0x1))
-#define DPSECI_CMDID_RESET ((0x005 << 4) | (0x1))
-#define DPSECI_CMDID_IS_ENABLED ((0x006 << 4) | (0x1))
-
-#define DPSECI_CMDID_SET_IRQ ((0x010 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ ((0x011 << 4) | (0x1))
-#define DPSECI_CMDID_SET_IRQ_ENABLE ((0x012 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_ENABLE ((0x013 << 4) | (0x1))
-#define DPSECI_CMDID_SET_IRQ_MASK ((0x014 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_MASK ((0x015 << 4) | (0x1))
-#define DPSECI_CMDID_GET_IRQ_STATUS ((0x016 << 4) | (0x1))
-#define DPSECI_CMDID_CLEAR_IRQ_STATUS ((0x017 << 4) | (0x1))
-
-#define DPSECI_CMDID_SET_RX_QUEUE ((0x194 << 4) | (0x1))
-#define DPSECI_CMDID_GET_RX_QUEUE ((0x196 << 4) | (0x1))
-#define DPSECI_CMDID_GET_TX_QUEUE ((0x197 << 4) | (0x1))
-#define DPSECI_CMDID_GET_SEC_ATTR ((0x198 << 4) | (0x1))
-#define DPSECI_CMDID_GET_SEC_COUNTERS ((0x199 << 4) | (0x1))
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_OPEN(cmd, dpseci_id) \
- MC_CMD_OP(cmd, 0, 0, 32, int, dpseci_id)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_CREATE(cmd, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->priorities[0]);\
- MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->priorities[1]);\
- MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->priorities[2]);\
- MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->priorities[3]);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priorities[4]);\
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->priorities[5]);\
- MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->priorities[6]);\
- MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->priorities[7]);\
- MC_CMD_OP(cmd, 1, 0, 8, uint8_t, cfg->num_tx_queues);\
- MC_CMD_OP(cmd, 1, 8, 8, uint8_t, cfg->num_rx_queues);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_IS_ENABLED(cmd, en) \
- MC_RSP_OP(cmd, 0, 0, 1, int, en)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\
- MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ(cmd, type, irq_cfg) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\
- MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \
- MC_RSP_OP(cmd, 2, 32, 32, int, type); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ_ENABLE(cmd, irq_index, enable_state) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 8, uint8_t, enable_state); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_ENABLE(cmd, enable_state) \
- MC_RSP_OP(cmd, 0, 0, 8, uint8_t, enable_state)
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 1
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_MASK(cmd, irq_index) \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_MASK(cmd, mask) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_IRQ_STATUS(cmd, status) \
- MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->id); \
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->num_tx_queues); \
- MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->num_rx_queues); \
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_SET_RX_QUEUE(cmd, queue, cfg) \
-do { \
- MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \
- MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority); \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue); \
- MC_CMD_OP(cmd, 0, 48, 4, enum dpseci_dest, cfg->dest_cfg.dest_type); \
- MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \
- MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\
- MC_CMD_OP(cmd, 2, 32, 1, int, cfg->order_preservation_en);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_RX_QUEUE(cmd, queue) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_RX_QUEUE(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id);\
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\
- MC_RSP_OP(cmd, 0, 48, 4, enum dpseci_dest, attr->dest_cfg.dest_type);\
- MC_RSP_OP(cmd, 1, 0, 8, uint64_t, attr->user_ctx);\
- MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->fqid);\
- MC_RSP_OP(cmd, 2, 32, 1, int, attr->order_preservation_en);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_CMD_GET_TX_QUEUE(cmd, queue) \
- MC_CMD_OP(cmd, 0, 40, 8, uint8_t, queue)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_TX_QUEUE(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 32, 32, uint32_t, attr->fqid);\
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->priority);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_SEC_ATTR(cmd, attr) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, attr->ip_id);\
- MC_RSP_OP(cmd, 0, 16, 8, uint8_t, attr->major_rev);\
- MC_RSP_OP(cmd, 0, 24, 8, uint8_t, attr->minor_rev);\
- MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->era);\
- MC_RSP_OP(cmd, 1, 0, 8, uint8_t, attr->deco_num);\
- MC_RSP_OP(cmd, 1, 8, 8, uint8_t, attr->zuc_auth_acc_num);\
- MC_RSP_OP(cmd, 1, 16, 8, uint8_t, attr->zuc_enc_acc_num);\
- MC_RSP_OP(cmd, 1, 32, 8, uint8_t, attr->snow_f8_acc_num);\
- MC_RSP_OP(cmd, 1, 40, 8, uint8_t, attr->snow_f9_acc_num);\
- MC_RSP_OP(cmd, 1, 48, 8, uint8_t, attr->crc_acc_num);\
- MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->pk_acc_num);\
- MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->kasumi_acc_num);\
- MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->rng_acc_num);\
- MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->md_acc_num);\
- MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->arc4_acc_num);\
- MC_RSP_OP(cmd, 2, 48, 8, uint8_t, attr->des_acc_num);\
- MC_RSP_OP(cmd, 2, 56, 8, uint8_t, attr->aes_acc_num);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_SEC_COUNTERS(cmd, counters) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 64, uint64_t, counters->dequeued_requests);\
- MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counters->ob_enc_requests);\
- MC_RSP_OP(cmd, 2, 0, 64, uint64_t, counters->ib_dec_requests);\
- MC_RSP_OP(cmd, 3, 0, 64, uint64_t, counters->ob_enc_bytes);\
- MC_RSP_OP(cmd, 4, 0, 64, uint64_t, counters->ob_prot_bytes);\
- MC_RSP_OP(cmd, 5, 0, 64, uint64_t, counters->ib_dec_bytes);\
- MC_RSP_OP(cmd, 6, 0, 64, uint64_t, counters->ib_valid_bytes);\
-} while (0)
-
-/* cmd, param, offset, width, type, arg_name */
-#define DPSECI_RSP_GET_API_VERSION(cmd, major, minor) \
-do { \
- MC_RSP_OP(cmd, 0, 0, 16, uint16_t, major);\
- MC_RSP_OP(cmd, 0, 16, 16, uint16_t, minor);\
-} while (0)
+#define DPSECI_CMD_V1(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION)
+#define DPSECI_CMD_V2(id) \
+ ((id << DPSECI_CMD_ID_OFFSET) | DPSECI_CMD_BASE_VERSION_V2)
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_CREATE DPSECI_CMD_V2(0x909)
+#define DPSECI_CMDID_DESTROY DPSECI_CMD_V1(0x989)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_RESET DPSECI_CMD_V1(0x005)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V1(0x198)
+#define DPSECI_CMDID_GET_SEC_COUNTERS DPSECI_CMD_V1(0x199)
+
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+#pragma pack(push, 1)
+struct dpseci_cmd_open {
+ uint32_t dpseci_id;
+};
+
+struct dpseci_cmd_create {
+ uint8_t priorities[8];
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad[6];
+ uint32_t options;
+};
+
+struct dpseci_cmd_destroy {
+ uint32_t dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ /* only the first LSB */
+ uint8_t en;
+};
+
+struct dpseci_rsp_get_attr {
+ uint32_t id;
+ uint32_t pad;
+ uint8_t num_tx_queues;
+ uint8_t num_rx_queues;
+ uint8_t pad1[6];
+ uint32_t options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_set_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t queue;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad;
+ uint64_t user_ctx;
+ uint32_t options;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+};
+
+struct dpseci_cmd_get_queue {
+ uint8_t pad[5];
+ uint8_t queue;
+};
+
+struct dpseci_rsp_get_rx_queue {
+ uint32_t dest_id;
+ uint8_t dest_priority;
+ uint8_t pad1;
+ /* from LSB: dest_type:4 */
+ uint8_t dest_type;
+ uint8_t pad2;
+ uint64_t user_ctx;
+ uint32_t fqid;
+ /* only the LSB */
+ uint8_t order_preservation_en;
+
+};
+
+struct dpseci_rsp_get_tx_queue {
+ uint32_t pad;
+ uint32_t fqid;
+ uint8_t priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ uint16_t ip_id;
+ uint8_t major_rev;
+ uint8_t minor_rev;
+ uint8_t era;
+ uint8_t pad1[3];
+ uint8_t deco_num;
+ uint8_t zuc_auth_acc_num;
+ uint8_t zuc_enc_acc_num;
+ uint8_t pad2;
+ uint8_t snow_f8_acc_num;
+ uint8_t snow_f9_acc_num;
+ uint8_t crc_acc_num;
+ uint8_t pad3;
+ uint8_t pk_acc_num;
+ uint8_t kasumi_acc_num;
+ uint8_t rng_acc_num;
+ uint8_t pad4;
+ uint8_t md_acc_num;
+ uint8_t arc4_acc_num;
+ uint8_t des_acc_num;
+ uint8_t aes_acc_num;
+};
+
+struct dpseci_rsp_get_sec_counters {
+ uint64_t dequeued_requests;
+ uint64_t ob_enc_requests;
+ uint64_t ib_dec_requests;
+ uint64_t ob_enc_bytes;
+ uint64_t ob_prot_bytes;
+ uint64_t ib_dec_bytes;
+ uint64_t ib_valid_bytes;
+};
+
+struct dpseci_rsp_get_api_version {
+ uint16_t major;
+ uint16_t minor;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+#define DPSECI_CG_UNITS_SHIFT 4
+#define DPSECI_CG_UNITS_SIZE 2
+
+struct dpseci_cmd_set_congestion_notification {
+ uint32_t dest_id;
+ uint16_t notification_mode;
+ uint8_t dest_priority;
+ /* from LSB: dest_type: 4 units:2 */
+ uint8_t type_units;
+ uint64_t message_iova;
+ uint64_t message_ctx;
+ uint32_t threshold_entry;
+ uint32_t threshold_exit;
+};
+
+#pragma pack(pop)
#endif /* _FSL_DPSECI_CMD_H */
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
new file mode 100644
index 00000000..17bc79c6
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -0,0 +1,73 @@
+# BSD LICENSE
+#
+# Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+# Copyright 2017 NXP.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Freescale Semiconductor, Inc nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_dpaa_sec.a
+
+# build flags
+CFLAGS += -D _GNU_SOURCE
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
+CFLAGS += -O0 -g
+CFLAGS += "-Wno-error"
+else
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+endif
+
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
+CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa_sec/
+#sharing the hw flib headers from dpaa2_sec pmd
+CFLAGS += -I$(RTE_SDK)/drivers/crypto/dpaa2_sec/
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/common/include
+CFLAGS += -I$(RTE_SDK)/lib/librte_eal/linuxapp/eal
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+
+# versioning export map
+EXPORT_MAP := rte_pmd_dpaa_sec_version.map
+
+# library version
+LIBABIVER := 1
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec.c
+
+# library dependencies
+
+LDLIBS += -lrte_bus_dpaa
+LDLIBS += -lrte_mempool_dpaa
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
new file mode 100644
index 00000000..16155b1a
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -0,0 +1,1548 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright 2017 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sched.h>
+#include <net/if.h>
+
+#include <rte_byteorder.h>
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_cycles.h>
+#include <rte_dev.h>
+#include <rte_kvargs.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+#include <rte_memcpy.h>
+#include <rte_string_fns.h>
+
+#include <fsl_usd.h>
+#include <fsl_qman.h>
+#include <of.h>
+
+/* RTA header files */
+#include <hw/desc/common.h>
+#include <hw/desc/algo.h>
+#include <hw/desc/ipsec.h>
+
+#include <rte_dpaa_bus.h>
+#include <dpaa_sec.h>
+#include <dpaa_sec_log.h>
+
+enum rta_sec_era rta_sec_era;
+
+static uint8_t cryptodev_driver_id;
+
+static __thread struct rte_crypto_op **dpaa_sec_ops;
+static __thread int dpaa_sec_op_nb;
+
+static inline void
+dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
+{
+ if (!ctx->fd_status) {
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else {
+ PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+
+ /* report op status to sym->op and then free the ctx memeory */
+ rte_mempool_put(ctx->ctx_pool, (void *)ctx);
+}
+
+static inline struct dpaa_sec_op_ctx *
+dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
+{
+ struct dpaa_sec_op_ctx *ctx;
+ int retval;
+
+ retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
+ if (!ctx || retval) {
+ PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ return NULL;
+ }
+ /*
+ * Clear SG memory. There are 16 SG entries of 16 Bytes each.
+ * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
+ * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
+ * each packet, memset is costlier than dcbz_64().
+ */
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
+ dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
+
+ ctx->ctx_pool = ses->ctx_pool;
+
+ return ctx;
+}
+
+static inline rte_iova_t
+dpaa_mem_vtop(void *vaddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ uint64_t vaddr_64, paddr;
+ int i;
+
+ vaddr_64 = (uint64_t)vaddr;
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (vaddr_64 >= memseg[i].addr_64 &&
+ vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
+ paddr = memseg[i].phys_addr +
+ (vaddr_64 - memseg[i].addr_64);
+
+ return (rte_iova_t)paddr;
+ }
+ }
+ return (rte_iova_t)(NULL);
+}
+
+static inline void *
+dpaa_mem_ptov(rte_iova_t paddr)
+{
+ const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
+ int i;
+
+ for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
+ if (paddr >= memseg[i].phys_addr &&
+ (char *)paddr < (char *)memseg[i].phys_addr + memseg[i].len)
+ return (void *)(memseg[i].addr_64 +
+ (paddr - memseg[i].phys_addr));
+ }
+ return NULL;
+}
+
+static void
+ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
+ struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
+}
+
+/* initialize the queue with dest chan as caam chan so that
+ * all the packets in this queue could be dispatched into caam
+ */
+static int
+dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
+ uint32_t fqid_out)
+{
+ struct qm_mcc_initfq fq_opts;
+ uint32_t flags;
+ int ret = -1;
+
+ /* Clear FQ options */
+ memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
+
+ flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
+ QMAN_FQ_FLAG_TO_DCPORTAL;
+
+ ret = qman_create_fq(0, flags, fq_in);
+ if (unlikely(ret != 0)) {
+ PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ return ret;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+ fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
+ QM_INITFQ_WE_CONTEXTB;
+
+ qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
+ fq_opts.fqd.context_b = fqid_out;
+ fq_opts.fqd.dest.channel = qm_channel_caam;
+ fq_opts.fqd.dest.wq = 0;
+
+ fq_in->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq_in, flags, &fq_opts);
+ if (unlikely(ret != 0))
+ PMD_INIT_LOG(ERR, "qman_init_fq failed");
+
+ return ret;
+}
+
+/* something is put into in_fq and caam put the crypto result into out_fq */
+static enum qman_cb_dqrr_result
+dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
+ struct qman_fq *fq __always_unused,
+ const struct qm_dqrr_entry *dqrr)
+{
+ const struct qm_fd *fd;
+ struct dpaa_sec_job *job;
+ struct dpaa_sec_op_ctx *ctx;
+
+ if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
+ return qman_cb_dqrr_defer;
+
+ if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
+ return qman_cb_dqrr_consume;
+
+ fd = &dqrr->fd;
+ /* sg is embedded in an op ctx,
+ * sg[0] is for output
+ * sg[1] for input
+ */
+ job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
+ ctx = container_of(job, struct dpaa_sec_op_ctx, job);
+ ctx->fd_status = fd->status;
+ dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
+ dpaa_sec_op_ending(ctx);
+
+ return qman_cb_dqrr_consume;
+}
+
+/* caam result is put into this queue */
+static int
+dpaa_sec_init_tx(struct qman_fq *fq)
+{
+ int ret;
+ struct qm_mcc_initfq opts;
+ uint32_t flags;
+
+ flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
+ QMAN_FQ_FLAG_DYNAMIC_FQID;
+
+ ret = qman_create_fq(0, flags, fq);
+ if (unlikely(ret)) {
+ PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ return ret;
+ }
+
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
+ QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
+
+ /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
+
+ fq->cb.dqrr = dqrr_out_fq_cb_rx;
+ fq->cb.ern = ern_sec_fq_handler;
+
+ ret = qman_init_fq(fq, 0, &opts);
+ if (unlikely(ret)) {
+ PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline int is_cipher_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_auth_only(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_aead(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg == 0) &&
+ (ses->auth_alg == 0) &&
+ (ses->aead_alg != 0));
+}
+
+static inline int is_auth_cipher(dpaa_sec_session *ses)
+{
+ return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
+ (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
+}
+
+static inline int is_encode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_ENC;
+}
+
+static inline int is_decode(dpaa_sec_session *ses)
+{
+ return ses->dir == DIR_DEC;
+}
+
+static inline void
+caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
+{
+ switch (ses->auth_alg) {
+ case RTE_CRYPTO_AUTH_NULL:
+ ses->digest_length = 0;
+ break;
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
+ alginfo_a->algmode = OP_ALG_AAI_HMAC;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ }
+}
+
+static inline void
+caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
+{
+ switch (ses->cipher_alg) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
+ alginfo_c->algmode = OP_ALG_AAI_CBC;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ alginfo_c->algtype = OP_ALG_ALGSEL_AES;
+ alginfo_c->algmode = OP_ALG_AAI_CTR;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ }
+}
+
+static inline void
+caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
+{
+ switch (ses->aead_alg) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ alginfo->algtype = OP_ALG_ALGSEL_AES;
+ alginfo->algmode = OP_ALG_AAI_GCM;
+ break;
+ default:
+ PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ }
+}
+
+
+/* prepare command block of the session */
+static int
+dpaa_sec_prep_cdb(dpaa_sec_session *ses)
+{
+ struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
+ uint32_t shared_desc_len = 0;
+ struct sec_cdb *cdb = &ses->qp->cdb;
+ int err;
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ int swap = false;
+#else
+ int swap = true;
+#endif
+
+ memset(cdb, 0, sizeof(struct sec_cdb));
+
+ if (is_cipher_only(ses)) {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_blkcipher(
+ cdb->sh_desc, true,
+ swap, &alginfo_c,
+ NULL,
+ ses->iv.length,
+ ses->dir);
+ } else if (is_auth_only(ses)) {
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported auth alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
+ swap, &alginfo_a,
+ !ses->dir,
+ ses->digest_length);
+ } else if (is_aead(ses)) {
+ caam_aead_alg(ses, &alginfo);
+ if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported aead alg\n");
+ return -ENOTSUP;
+ }
+ alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.keylen = ses->aead_key.length;
+ alginfo.key_enc_flags = 0;
+ alginfo.key_type = RTA_DATA_IMM;
+
+ if (ses->dir == DIR_ENC)
+ shared_desc_len = cnstr_shdsc_gcm_encap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ else
+ shared_desc_len = cnstr_shdsc_gcm_decap(
+ cdb->sh_desc, true, swap,
+ &alginfo,
+ ses->iv.length,
+ ses->digest_length);
+ } else {
+ caam_cipher_alg(ses, &alginfo_c);
+ if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.keylen = ses->cipher_key.length;
+ alginfo_c.key_enc_flags = 0;
+ alginfo_c.key_type = RTA_DATA_IMM;
+
+ caam_auth_alg(ses, &alginfo_a);
+ if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
+ PMD_TX_LOG(ERR, "not supported auth alg\n");
+ return -ENOTSUP;
+ }
+
+ alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.keylen = ses->auth_key.length;
+ alginfo_a.key_enc_flags = 0;
+ alginfo_a.key_type = RTA_DATA_IMM;
+
+ cdb->sh_desc[0] = alginfo_c.keylen;
+ cdb->sh_desc[1] = alginfo_a.keylen;
+ err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
+ MIN_JOB_DESC_SIZE,
+ (unsigned int *)cdb->sh_desc,
+ &cdb->sh_desc[2], 2);
+
+ if (err < 0) {
+ PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ return err;
+ }
+ if (cdb->sh_desc[2] & 1)
+ alginfo_c.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_c.key = (uint64_t)dpaa_mem_vtop(
+ (void *)alginfo_c.key);
+ alginfo_c.key_type = RTA_DATA_PTR;
+ }
+ if (cdb->sh_desc[2] & (1<<1))
+ alginfo_a.key_type = RTA_DATA_IMM;
+ else {
+ alginfo_a.key = (uint64_t)dpaa_mem_vtop(
+ (void *)alginfo_a.key);
+ alginfo_a.key_type = RTA_DATA_PTR;
+ }
+ cdb->sh_desc[0] = 0;
+ cdb->sh_desc[1] = 0;
+ cdb->sh_desc[2] = 0;
+
+ /* Auth_only_len is set as 0 here and it will be overwritten
+ * in fd for each packet.
+ */
+ shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
+ true, swap, &alginfo_c, &alginfo_a,
+ ses->iv.length, 0,
+ ses->digest_length, ses->dir);
+ }
+ cdb->sh_hdr.hi.field.idlen = shared_desc_len;
+ cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
+ cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
+
+ return 0;
+}
+
+static inline unsigned int
+dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
+{
+ unsigned int pkts = 0;
+ int ret;
+ struct qm_mcr_queryfq_np np;
+ enum qman_fq_state state;
+ uint32_t flags;
+ uint32_t vdqcr;
+
+ qman_query_fq_np(fq, &np);
+ if (np.frm_cnt) {
+ vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
+ if (exact)
+ vdqcr |= QM_VDQCR_EXACT;
+ ret = qman_volatile_dequeue(fq, 0, vdqcr);
+ if (ret)
+ return 0;
+ do {
+ pkts += qman_poll_dqrr(len);
+ qman_fq_state(fq, &state, &flags);
+ } while (flags & QMAN_FQ_STATE_VDQCR);
+ }
+ return pkts;
+}
+
+/* qp is lockless, should be accessed by only one thread */
+static int
+dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
+{
+ struct qman_fq *fq;
+
+ fq = &qp->outq;
+ dpaa_sec_op_nb = 0;
+ dpaa_sec_ops = ops;
+
+ if (unlikely(nb_ops > DPAA_SEC_BURST))
+ nb_ops = DPAA_SEC_BURST;
+
+ return dpaa_volatile_deq(fq, nb_ops, 1);
+}
+
+/**
+ * packet looks like:
+ * |<----data_len------->|
+ * |ip_header|ah_header|icv|payload|
+ * ^
+ * |
+ * mbuf->pkt.data
+ */
+static inline struct dpaa_sec_job *
+build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct rte_mbuf *mbuf = sym->m_src;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t start_addr;
+ uint8_t *old_digest;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+ old_digest = ctx->digest;
+
+ start_addr = rte_pktmbuf_iova(mbuf);
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+ if (is_decode(ses)) {
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ sg->length = sym->auth.data.length + ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ /* hash result or digest, save digest first */
+ rte_memcpy(old_digest, sym->auth.digest.data,
+ ses->digest_length);
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ cpu_to_hw_sg(sg);
+
+ /* let's check digest by hw */
+ start_addr = dpaa_mem_vtop(old_digest);
+ sg++;
+ qm_sg_entry_set64(sg, start_addr);
+ sg->length = ses->digest_length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ src_start_addr = rte_pktmbuf_iova(sym->m_src);
+
+ if (sym->m_dst)
+ dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
+ else
+ dst_start_addr = src_start_addr;
+
+ /* output */
+ sg = &cf->sg[0];
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ /* input */
+ sg = &cf->sg[1];
+
+ /* need to extend the input to a compound frame */
+ sg->extension = 1;
+ sg->final = 1;
+ sg->length = sym->cipher.data.length + ses->iv.length;
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
+ cpu_to_hw_sg(sg);
+
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ uint32_t length = 0;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ if (ses->auth_only_len) {
+ qm_sg_entry_set64(sg,
+ dpaa_mem_vtop(sym->aead.aad.data));
+ sg->length = ses->auth_only_len;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+ sg++;
+ }
+ qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
+ sg->length = sym->aead.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->aead.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg,
+ dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
+ sg->length = sym->aead.data.length + ses->auth_only_len;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static inline struct dpaa_sec_job *
+build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
+{
+ struct rte_crypto_sym_op *sym = op->sym;
+ struct dpaa_sec_job *cf;
+ struct dpaa_sec_op_ctx *ctx;
+ struct qm_sg_entry *sg;
+ rte_iova_t src_start_addr, dst_start_addr;
+ uint32_t length = 0;
+ uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ses->iv.offset);
+
+ src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
+ if (sym->m_dst)
+ dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
+ else
+ dst_start_addr = src_start_addr;
+
+ ctx = dpaa_sec_alloc_ctx(ses);
+ if (!ctx)
+ return NULL;
+
+ cf = &ctx->job;
+ ctx->op = op;
+
+ /* input */
+ rte_prefetch0(cf->sg);
+ sg = &cf->sg[2];
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
+ if (is_encode(ses)) {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ } else {
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
+ sg->length = ses->iv.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ sg++;
+
+ qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
+ sg->length = sym->auth.data.length;
+ length += sg->length;
+ cpu_to_hw_sg(sg);
+
+ memcpy(ctx->digest, sym->auth.digest.data,
+ ses->digest_length);
+ sg++;
+
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
+ sg->length = ses->digest_length;
+ length += sg->length;
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+ }
+ /* input compound frame */
+ cf->sg[1].length = length;
+ cf->sg[1].extension = 1;
+ cf->sg[1].final = 1;
+ cpu_to_hw_sg(&cf->sg[1]);
+
+ /* output */
+ sg++;
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
+ qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
+ sg->length = sym->cipher.data.length;
+ length = sg->length;
+ if (is_encode(ses)) {
+ cpu_to_hw_sg(sg);
+ /* set auth output */
+ sg++;
+ qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
+ sg->length = ses->digest_length;
+ length += sg->length;
+ }
+ sg->final = 1;
+ cpu_to_hw_sg(sg);
+
+ /* output compound frame */
+ cf->sg[0].length = length;
+ cf->sg[0].extension = 1;
+ cpu_to_hw_sg(&cf->sg[0]);
+
+ return cf;
+}
+
+static int
+dpaa_sec_enqueue_op(struct rte_crypto_op *op, struct dpaa_sec_qp *qp)
+{
+ struct dpaa_sec_job *cf;
+ dpaa_sec_session *ses;
+ struct qm_fd fd;
+ int ret;
+ uint32_t auth_only_len = op->sym->auth.data.length -
+ op->sym->cipher.data.length;
+
+ ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
+ cryptodev_driver_id);
+
+ if (unlikely(!qp->ses || qp->ses != ses)) {
+ qp->ses = ses;
+ ses->qp = qp;
+ ret = dpaa_sec_prep_cdb(ses);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Segmented buffer is not supported.
+ */
+ if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return -ENOTSUP;
+ }
+ if (is_auth_only(ses)) {
+ cf = build_auth_only(op, ses);
+ } else if (is_cipher_only(ses)) {
+ cf = build_cipher_only(op, ses);
+ } else if (is_aead(ses)) {
+ cf = build_cipher_auth_gcm(op, ses);
+ auth_only_len = ses->auth_only_len;
+ } else if (is_auth_cipher(ses)) {
+ cf = build_cipher_auth(op, ses);
+ } else {
+ PMD_TX_LOG(ERR, "not supported sec op");
+ return -ENOTSUP;
+ }
+ if (unlikely(!cf))
+ return -ENOMEM;
+
+ memset(&fd, 0, sizeof(struct qm_fd));
+ qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
+ fd._format1 = qm_fd_compound;
+ fd.length29 = 2 * sizeof(struct qm_sg_entry);
+ /* Auth_only_len is set as 0 in descriptor and it is overwritten
+ * here in the fd.cmd which will update the DPOVRD reg.
+ */
+ if (auth_only_len)
+ fd.cmd = 0x80000000 | auth_only_len;
+ do {
+ ret = qman_enqueue(&qp->inq, &fd, 0);
+ } while (ret != 0);
+
+ return 0;
+}
+
+static uint16_t
+dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ /* Function to transmit the frames to given device and queuepair */
+ uint32_t loop;
+ int32_t ret;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+ uint16_t num_tx = 0;
+
+ if (unlikely(nb_ops == 0))
+ return 0;
+
+ /*Prepare each packet which is to be sent*/
+ for (loop = 0; loop < nb_ops; loop++) {
+ if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
+ PMD_TX_LOG(ERR, "sessionless crypto op not supported");
+ return 0;
+ }
+ ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
+ if (!ret)
+ num_tx++;
+ }
+ dpaa_qp->tx_pkts += num_tx;
+ dpaa_qp->tx_errs += nb_ops - num_tx;
+
+ return num_tx;
+}
+
+static uint16_t
+dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t num_rx;
+ struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
+
+ num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
+
+ dpaa_qp->rx_pkts += num_rx;
+ dpaa_qp->rx_errs += nb_ops - num_rx;
+
+ PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+
+ return num_rx;
+}
+
+/** Release queue pair */
+static int
+dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
+ uint16_t qp_id)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_FUNC_TRACE();
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = NULL;
+ dev->data->queue_pairs[qp_id] = NULL;
+
+ return 0;
+}
+
+/** Setup a queue pair */
+static int
+dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
+ __rte_unused int socket_id,
+ __rte_unused struct rte_mempool *session_pool)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp = NULL;
+
+ PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ dev, qp_id, qp_conf);
+
+ internals = dev->data->dev_private;
+ if (qp_id >= internals->max_nb_queue_pairs) {
+ PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ internals->max_nb_queue_pairs);
+ return -EINVAL;
+ }
+
+ qp = &internals->qps[qp_id];
+ qp->internals = internals;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ return 0;
+}
+
+/** Start queue pair */
+static int
+dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Stop queue pair */
+static int
+dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+/** Return the number of allocated queue pairs */
+static uint32_t
+dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of session structure */
+static unsigned int
+dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return sizeof(dpaa_sec_session);
+}
+
+static int
+dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->cipher_alg = xform->cipher.algo;
+ session->iv.length = xform->cipher.iv.length;
+ session->iv.offset = xform->cipher.iv.offset;
+ session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ return -ENOMEM;
+ }
+ session->cipher_key.length = xform->cipher.key.length;
+
+ memcpy(session->cipher_key.data, xform->cipher.key.data,
+ xform->cipher.key.length);
+ session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->auth_alg = xform->auth.algo;
+ session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ return -ENOMEM;
+ }
+ session->auth_key.length = xform->auth.key.length;
+ session->digest_length = xform->auth.digest_length;
+
+ memcpy(session->auth_key.data, xform->auth.key.data,
+ xform->auth.key.length);
+ session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
+ struct rte_crypto_sym_xform *xform,
+ dpaa_sec_session *session)
+{
+ session->aead_alg = xform->aead.algo;
+ session->iv.length = xform->aead.iv.length;
+ session->iv.offset = xform->aead.iv.offset;
+ session->auth_only_len = xform->aead.aad_length;
+ session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
+ RTE_CACHE_LINE_SIZE);
+ if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
+ PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ return -ENOMEM;
+ }
+ session->aead_key.length = xform->aead.key.length;
+ session->digest_length = xform->aead.digest_length;
+
+ memcpy(session->aead_key.data, xform->aead.key.data,
+ xform->aead.key.length);
+ session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ DIR_ENC : DIR_DEC;
+
+ return 0;
+}
+
+static int
+dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+{
+ dpaa_sec_session *sess = ses;
+ struct dpaa_sec_qp *qp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->ses != NULL) {
+ PMD_INIT_LOG(ERR, "qp in-use by another session\n");
+ return -EBUSY;
+ }
+
+ qp->ses = sess;
+ sess->qp = qp;
+
+ return dpaa_sec_prep_cdb(sess);
+}
+
+static int
+dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
+{
+ dpaa_sec_session *sess = ses;
+ struct dpaa_sec_qp *qp;
+
+ PMD_INIT_FUNC_TRACE();
+
+ qp = dev->data->queue_pairs[qp_id];
+ if (qp->ses != NULL) {
+ qp->ses = NULL;
+ sess->qp = NULL;
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "No session attached to qp");
+ return -EINVAL;
+}
+
+static int
+dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform, void *sess)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ dpaa_sec_session *session = sess;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(sess == NULL)) {
+ RTE_LOG(ERR, PMD, "invalid session struct\n");
+ return -EINVAL;
+ }
+
+ /* Default IV length = 0 */
+ session->iv.length = 0;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
+ session->auth_alg = RTE_CRYPTO_AUTH_NULL;
+ dpaa_sec_cipher_init(dev, xform, session);
+
+ /* Authentication Only */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL) {
+ session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
+ dpaa_sec_auth_init(dev, xform, session);
+
+ /* Cipher then Authenticate */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ dpaa_sec_cipher_init(dev, xform, session);
+ dpaa_sec_auth_init(dev, xform->next, session);
+ } else {
+ PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* Authenticate then Cipher */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ dpaa_sec_auth_init(dev, xform, session);
+ dpaa_sec_cipher_init(dev, xform->next, session);
+ } else {
+ PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ return -EINVAL;
+ }
+
+ /* AEAD operation for AES-GCM kind of Algorithms */
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ xform->next == NULL) {
+ dpaa_sec_aead_init(dev, xform, session);
+
+ } else {
+ PMD_DRV_LOG(ERR, "Invalid crypto type");
+ return -EINVAL;
+ }
+ session->ctx_pool = internals->ctx_pool;
+
+ return 0;
+}
+
+static int
+dpaa_sec_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ void *sess_private_data;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CDEV_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
+ if (ret != 0) {
+ PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
+ "session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+/** Clear the memory of session so it doesn't leave key material behind */
+static void
+dpaa_sec_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+ dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
+
+ if (sess_priv) {
+ rte_free(s->cipher_key.data);
+ rte_free(s->auth_key.data);
+ memset(s, 0, sizeof(dpaa_sec_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+static int
+dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+}
+
+static int
+dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+ return 0;
+}
+
+static void
+dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ if (info != NULL) {
+ info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
+ info->feature_flags = dev->feature_flags;
+ info->capabilities = dpaa_sec_capabilities;
+ info->sym.max_nb_sessions = internals->max_nb_sessions;
+ info->sym.max_nb_sessions_per_qp =
+ RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
+ info->driver_id = cryptodev_driver_id;
+ }
+}
+
+static struct rte_cryptodev_ops crypto_ops = {
+ .dev_configure = dpaa_sec_dev_configure,
+ .dev_start = dpaa_sec_dev_start,
+ .dev_stop = dpaa_sec_dev_stop,
+ .dev_close = dpaa_sec_dev_close,
+ .dev_infos_get = dpaa_sec_dev_infos_get,
+ .queue_pair_setup = dpaa_sec_queue_pair_setup,
+ .queue_pair_release = dpaa_sec_queue_pair_release,
+ .queue_pair_start = dpaa_sec_queue_pair_start,
+ .queue_pair_stop = dpaa_sec_queue_pair_stop,
+ .queue_pair_count = dpaa_sec_queue_pair_count,
+ .session_get_size = dpaa_sec_session_get_size,
+ .session_configure = dpaa_sec_session_configure,
+ .session_clear = dpaa_sec_session_clear,
+ .qp_attach_session = dpaa_sec_qp_attach_sess,
+ .qp_detach_session = dpaa_sec_qp_detach_sess,
+};
+
+static int
+dpaa_sec_uninit(struct rte_cryptodev *dev)
+{
+ struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+
+ if (dev == NULL)
+ return -ENODEV;
+
+ rte_mempool_free(internals->ctx_pool);
+ rte_free(internals);
+
+ PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
+ dev->data->name, rte_socket_id());
+
+ return 0;
+}
+
+static int
+dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
+{
+ struct dpaa_sec_dev_private *internals;
+ struct dpaa_sec_qp *qp;
+ uint32_t i;
+ int ret;
+ char str[20];
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev->driver_id = cryptodev_driver_id;
+ cryptodev->dev_ops = &crypto_ops;
+
+ cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
+ cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = cryptodev->data->dev_private;
+ internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
+ internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
+
+ for (i = 0; i < internals->max_nb_queue_pairs; i++) {
+ /* init qman fq for queue pair */
+ qp = &internals->qps[i];
+ ret = dpaa_sec_init_tx(&qp->outq);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ goto init_error;
+ }
+ ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
+ qman_fq_fqid(&qp->outq));
+ if (ret) {
+ PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
+ goto init_error;
+ }
+ }
+
+ sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ goto init_error;
+ }
+
+ PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ return 0;
+
+init_error:
+ PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+
+ dpaa_sec_uninit(cryptodev);
+ return -EFAULT;
+}
+
+static int
+cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
+ struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ int retval;
+
+ sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
+
+ cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
+ if (cryptodev == NULL)
+ return -ENOMEM;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ cryptodev->data->dev_private = rte_zmalloc_socket(
+ "cryptodev private structure",
+ sizeof(struct dpaa_sec_dev_private),
+ RTE_CACHE_LINE_SIZE,
+ rte_socket_id());
+
+ if (cryptodev->data->dev_private == NULL)
+ rte_panic("Cannot allocate memzone for private "
+ "device data");
+ }
+
+ dpaa_dev->crypto_dev = cryptodev;
+ cryptodev->device = &dpaa_dev->device;
+ cryptodev->device->driver = &dpaa_drv->driver;
+
+ /* init user callbacks */
+ TAILQ_INIT(&(cryptodev->link_intr_cbs));
+
+ /* if sec device version is not configured */
+ if (!rta_get_sec_era()) {
+ const struct device_node *caam_node;
+
+ for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
+ const uint32_t *prop = of_get_property(caam_node,
+ "fsl,sec-era",
+ NULL);
+ if (prop) {
+ rta_set_sec_era(
+ INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
+ break;
+ }
+ }
+ }
+
+ /* Invoke PMD device initialization function */
+ retval = dpaa_sec_dev_init(cryptodev);
+ if (retval == 0)
+ return 0;
+
+ /* In case of error, cleanup is done */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ rte_free(cryptodev->data->dev_private);
+
+ rte_cryptodev_pmd_release_device(cryptodev);
+
+ return -ENXIO;
+}
+
+static int
+cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
+{
+ struct rte_cryptodev *cryptodev;
+ int ret;
+
+ cryptodev = dpaa_dev->crypto_dev;
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ ret = dpaa_sec_uninit(cryptodev);
+ if (ret)
+ return ret;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+static struct rte_dpaa_driver rte_dpaa_sec_driver = {
+ .drv_type = FSL_DPAA_CRYPTO,
+ .driver = {
+ .name = "DPAA SEC PMD"
+ },
+ .probe = cryptodev_dpaa_sec_probe,
+ .remove = cryptodev_dpaa_sec_remove,
+};
+
+static struct cryptodev_driver dpaa_sec_crypto_drv;
+
+RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
new file mode 100644
index 00000000..af3f2550
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -0,0 +1,402 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2016 NXP.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA_SEC_H_
+#define _DPAA_SEC_H_
+
+#define NUM_POOL_CHANNELS 4
+#define DPAA_SEC_BURST 32
+#define DPAA_SEC_ALG_UNSUPPORT (-1)
+#define TDES_CBC_IV_LEN 8
+#define AES_CBC_IV_LEN 16
+#define AES_CTR_IV_LEN 16
+#define AES_GCM_IV_LEN 12
+
+/* Minimum job descriptor consists of a oneword job descriptor HEADER and
+ * a pointer to the shared descriptor.
+ */
+#define MIN_JOB_DESC_SIZE (CAAM_CMD_SZ + CAAM_PTR_SZ)
+/* CTX_POOL_NUM_BUFS is set as per the ipsec-secgw application */
+#define CTX_POOL_NUM_BUFS 32000
+#define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx)
+#define CTX_POOL_CACHE_SIZE 512
+
+#define DIR_ENC 1
+#define DIR_DEC 0
+
+enum dpaa_sec_op_type {
+ DPAA_SEC_NONE, /*!< No Cipher operations*/
+ DPAA_SEC_CIPHER,/*!< CIPHER operations */
+ DPAA_SEC_AUTH, /*!< Authentication Operations */
+ DPAA_SEC_AEAD, /*!< Authenticated Encryption with associated data */
+ DPAA_SEC_IPSEC, /*!< IPSEC protocol operations*/
+ DPAA_SEC_PDCP, /*!< PDCP protocol operations*/
+ DPAA_SEC_PKC, /*!< Public Key Cryptographic Operations */
+ DPAA_SEC_MAX
+};
+
+typedef struct dpaa_sec_session_entry {
+ uint8_t dir; /*!< Operation Direction */
+ enum rte_crypto_cipher_algorithm cipher_alg; /*!< Cipher Algorithm*/
+ enum rte_crypto_auth_algorithm auth_alg; /*!< Authentication Algorithm*/
+ enum rte_crypto_aead_algorithm aead_alg; /*!< Authentication Algorithm*/
+ union {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } aead_key;
+ struct {
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } cipher_key;
+ struct {
+ uint8_t *data; /**< pointer to key data */
+ size_t length; /**< key length in bytes */
+ } auth_key;
+ };
+ };
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv; /**< Initialisation vector parameters */
+ uint16_t auth_only_len; /*!< Length of data for Auth only */
+ uint32_t digest_length;
+ struct dpaa_sec_qp *qp;
+ struct rte_mempool *ctx_pool; /* session mempool for dpaa_sec_op_ctx */
+} dpaa_sec_session;
+
+#define DPAA_SEC_MAX_DESC_SIZE 64
+/* code or cmd block to caam */
+struct sec_cdb {
+ struct {
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ uint16_t rsvd63_48;
+ unsigned int rsvd47_39:9;
+ unsigned int idlen:7;
+#else
+ unsigned int idlen:7;
+ unsigned int rsvd47_39:9;
+ uint16_t rsvd63_48;
+#endif
+ } field;
+ } __packed hi;
+
+ union {
+ uint32_t word;
+ struct {
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ unsigned int rsvd31_30:2;
+ unsigned int fsgt:1;
+ unsigned int lng:1;
+ unsigned int offset:2;
+ unsigned int abs:1;
+ unsigned int add_buf:1;
+ uint8_t pool_id;
+ uint16_t pool_buffer_size;
+#else
+ uint16_t pool_buffer_size;
+ uint8_t pool_id;
+ unsigned int add_buf:1;
+ unsigned int abs:1;
+ unsigned int offset:2;
+ unsigned int lng:1;
+ unsigned int fsgt:1;
+ unsigned int rsvd31_30:2;
+#endif
+ } field;
+ } __packed lo;
+ } __packed sh_hdr;
+
+ uint32_t sh_desc[DPAA_SEC_MAX_DESC_SIZE];
+};
+
+struct dpaa_sec_qp {
+ struct dpaa_sec_dev_private *internals;
+ struct sec_cdb cdb; /* cmd block associated with qp */
+ dpaa_sec_session *ses; /* session associated with qp */
+ struct qman_fq inq;
+ struct qman_fq outq;
+ int rx_pkts;
+ int rx_errs;
+ int tx_pkts;
+ int tx_errs;
+};
+
+#define RTE_MAX_NB_SEC_QPS RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
+/* internal sec queue interface */
+struct dpaa_sec_dev_private {
+ void *sec_hw;
+ struct rte_mempool *ctx_pool; /* per dev mempool for dpaa_sec_op_ctx */
+ struct dpaa_sec_qp qps[RTE_MAX_NB_SEC_QPS]; /* i/o queue for sec */
+ unsigned int max_nb_queue_pairs;
+ unsigned int max_nb_sessions;
+};
+
+#define MAX_SG_ENTRIES 16
+#define SG_CACHELINE_0 0
+#define SG_CACHELINE_1 4
+#define SG_CACHELINE_2 8
+#define SG_CACHELINE_3 12
+struct dpaa_sec_job {
+ /* sg[0] output, sg[1] input, others are possible sub frames */
+ struct qm_sg_entry sg[MAX_SG_ENTRIES];
+};
+
+#define DPAA_MAX_NB_MAX_DIGEST 32
+struct dpaa_sec_op_ctx {
+ struct dpaa_sec_job job;
+ struct rte_crypto_op *op;
+ struct rte_mempool *ctx_pool; /* mempool pointer for dpaa_sec_op_ctx */
+ uint32_t fd_status;
+ uint8_t digest[DPAA_MAX_NB_MAX_DIGEST];
+};
+
+static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.auth = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 8,
+ .max = 16,
+ .increment = 4
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 240,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 12,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 16,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+#endif /* _DPAA_SEC_H_ */
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
new file mode 100644
index 00000000..dfe69757
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -0,0 +1,70 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
+ * Copyright NXP 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of NXP nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _DPAA_SEC_LOG_H_
+#define _DPAA_SEC_LOG_H_
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
+
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
+#define PMD_RX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX
+#define PMD_TX_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
+#else
+#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
+#endif
+
+#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
+#define PMD_DRV_LOG_RAW(level, fmt, args...) \
+ RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
+#else
+#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
+#endif
+
+#define PMD_DRV_LOG(level, fmt, args...) \
+ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+
+#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
new file mode 100644
index 00000000..a70bd197
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/rte_pmd_dpaa_sec_version.map
@@ -0,0 +1,4 @@
+DPDK_17.11 {
+
+ local: *;
+};
diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile
index b47cda0c..cf56b7a0 100644
--- a/drivers/crypto/kasumi/Makefile
+++ b/drivers/crypto/kasumi/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_KASUMI_PATH)
CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include
CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build
LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 38cd8a9b..f5db5e32 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -31,12 +31,10 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
@@ -44,7 +42,6 @@
#define KASUMI_KEY_LENGTH 16
#define KASUMI_IV_LENGTH 8
-#define KASUMI_DIGEST_LENGTH 4
#define KASUMI_MAX_BURST 4
#define BYTE_LEN 8
@@ -261,7 +258,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_kasumi_hash_op(struct rte_crypto_op **ops,
+process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops,
struct kasumi_session *session,
uint8_t num_ops)
{
@@ -287,8 +284,7 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
num_bytes = length_in_bits >> 3;
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- KASUMI_DIGEST_LENGTH);
+ dst = qp->temp_digest;
sso_kasumi_f9_1_buffer(&session->pKeySched_hash, src,
num_bytes, dst);
@@ -296,10 +292,6 @@ process_kasumi_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
KASUMI_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- KASUMI_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
@@ -327,16 +319,16 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
session, num_ops);
break;
case KASUMI_OP_ONLY_AUTH:
- processed_ops = process_kasumi_hash_op(ops, session,
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
num_ops);
break;
case KASUMI_OP_CIPHER_AUTH:
processed_ops = process_kasumi_cipher_op(ops, session,
num_ops);
- process_kasumi_hash_op(ops, session, processed_ops);
+ process_kasumi_hash_op(qp, ops, session, processed_ops);
break;
case KASUMI_OP_AUTH_CIPHER:
- processed_ops = process_kasumi_hash_op(ops, session,
+ processed_ops = process_kasumi_hash_op(qp, ops, session,
num_ops);
process_kasumi_cipher_op(ops, session, processed_ops);
break;
@@ -384,15 +376,15 @@ process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session,
session);
break;
case KASUMI_OP_ONLY_AUTH:
- processed_op = process_kasumi_hash_op(&op, session, 1);
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
break;
case KASUMI_OP_CIPHER_AUTH:
processed_op = process_kasumi_cipher_op_bit(op, session);
if (processed_op == 1)
- process_kasumi_hash_op(&op, session, 1);
+ process_kasumi_hash_op(qp, &op, session, 1);
break;
case KASUMI_OP_AUTH_CIPHER:
- processed_op = process_kasumi_hash_op(&op, session, 1);
+ processed_op = process_kasumi_hash_op(qp, &op, session, 1);
if (processed_op == 1)
process_kasumi_cipher_op_bit(op, session);
break;
@@ -559,15 +551,17 @@ static int cryptodev_kasumi_remove(struct rte_vdev_device *vdev);
static int
cryptodev_kasumi_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct kasumi_private *internals;
uint64_t cpu_flags = 0;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ KASUMI_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
/* Check CPU for supported vector instruction set */
if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX))
@@ -575,14 +569,6 @@ cryptodev_kasumi_create(const char *name,
else
cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE;
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct kasumi_private), init_params->socket_id,
- vdev);
- if (dev == NULL) {
- KASUMI_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
- }
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_kasumi_pmd_ops;
@@ -611,11 +597,12 @@ init_error:
static int
cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct kasumi_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -625,17 +612,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_kasumi_create(name, vdev, &init_params);
}
@@ -643,17 +620,18 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev)
static int
cryptodev_kasumi_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
@@ -661,10 +639,13 @@ static struct rte_vdev_driver cryptodev_kasumi_pmd_drv = {
.remove = cryptodev_kasumi_remove
};
+static struct cryptodev_driver kasumi_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_kasumi_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
index 8033114b..4da12f37 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c
@@ -183,7 +183,7 @@ kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"kasumi_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
index 0ce2a2e3..5f7044b8 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h
@@ -58,6 +58,8 @@
#define KASUMI_LOG_DBG(fmt, args...)
#endif
+#define KASUMI_DIGEST_LENGTH 4
+
/** private data structure for each virtual KASUMI device */
struct kasumi_private {
unsigned max_nb_queue_pairs;
@@ -78,6 +80,11 @@ struct kasumi_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[KASUMI_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum kasumi_operation {
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
new file mode 100644
index 00000000..3532f7cf
--- /dev/null
+++ b/drivers/crypto/mrvl/Makefile
@@ -0,0 +1,66 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of the copyright holder nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mrvl_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_ARCH_DMA_ADDR_T_64BIT
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_mrvl_pmd_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
new file mode 100644
index 00000000..c29fa105
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_compat.h
@@ -0,0 +1,50 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c
new file mode 100644
index 00000000..31f3fe58
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd.c
@@ -0,0 +1,857 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_common.h>
+#include <rte_hexdump.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_bus_vdev.h>
+#include <rte_malloc.h>
+#include <rte_cpuflags.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+#define MRVL_MUSDK_DMA_MEMSIZE 41943040
+
+static uint8_t cryptodev_driver_id;
+
+/**
+ * Flag if particular crypto algorithm is supported by PMD/MUSDK.
+ *
+ * The idea is to have Not Supported value as default (0).
+ * This way we need only to define proper map sizes,
+ * non-initialized entries will be by default not supported.
+ */
+enum algo_supported {
+ ALGO_NOT_SUPPORTED = 0,
+ ALGO_SUPPORTED = 1,
+};
+
+/** Map elements for cipher mapping.*/
+struct cipher_params_mapping {
+ enum algo_supported supported; /**< On/Off switch */
+ enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */
+ enum sam_cipher_mode cipher_mode; /**< Cipher mode */
+ unsigned int max_key_len; /**< Maximum key length (in bytes)*/
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/** Map elements for auth mapping.*/
+struct auth_params_mapping {
+ enum algo_supported supported; /**< On/off switch */
+ enum sam_auth_alg auth_alg; /**< Auth algorithm */
+}
+/* We want to squeeze in multiple maps into the cache line. */
+__rte_aligned(32);
+
+/**
+ * Map of supported cipher algorithms.
+ */
+static const
+struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = {
+ [RTE_CRYPTO_CIPHER_3DES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_3DES_ECB] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_3DES,
+ .cipher_mode = SAM_CIPHER_ECB,
+ .max_key_len = BITS2BYTES(192) },
+ [RTE_CRYPTO_CIPHER_AES_CBC] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CBC,
+ .max_key_len = BITS2BYTES(256) },
+ [RTE_CRYPTO_CIPHER_AES_CTR] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_CTR,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/**
+ * Map of supported auth algorithms.
+ */
+static const
+struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = {
+ [RTE_CRYPTO_AUTH_MD5_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_MD5 },
+ [RTE_CRYPTO_AUTH_MD5] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_MD5 },
+ [RTE_CRYPTO_AUTH_SHA1_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA1] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA1 },
+ [RTE_CRYPTO_AUTH_SHA224] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_224 },
+ [RTE_CRYPTO_AUTH_SHA256_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA256] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_256 },
+ [RTE_CRYPTO_AUTH_SHA384_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA384] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_384 },
+ [RTE_CRYPTO_AUTH_SHA512_HMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HMAC_SHA2_512 },
+ [RTE_CRYPTO_AUTH_SHA512] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_HASH_SHA2_512 },
+ [RTE_CRYPTO_AUTH_AES_GMAC] = {
+ .supported = ALGO_SUPPORTED,
+ .auth_alg = SAM_AUTH_AES_GMAC },
+};
+
+/**
+ * Map of supported aead algorithms.
+ */
+static const
+struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = {
+ [RTE_CRYPTO_AEAD_AES_GCM] = {
+ .supported = ALGO_SUPPORTED,
+ .cipher_alg = SAM_CIPHER_AES,
+ .cipher_mode = SAM_CIPHER_GCM,
+ .max_key_len = BITS2BYTES(256) },
+};
+
+/*
+ *-----------------------------------------------------------------------------
+ * Forward declarations.
+ *-----------------------------------------------------------------------------
+ */
+static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev);
+
+/*
+ *-----------------------------------------------------------------------------
+ * Session Preparation.
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Get xform chain order.
+ *
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns Order of crypto operations.
+ */
+static enum mrvl_crypto_chain_order
+mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform)
+{
+ /* Currently, Marvell supports max 2 operations in chain */
+ if (xform->next != NULL && xform->next->next != NULL)
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+
+ if (xform->next != NULL) {
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER))
+ return MRVL_CRYPTO_CHAIN_AUTH_CIPHER;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH))
+ return MRVL_CRYPTO_CHAIN_CIPHER_AUTH;
+ } else {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return MRVL_CRYPTO_CHAIN_AUTH_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return MRVL_CRYPTO_CHAIN_CIPHER_ONLY;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return MRVL_CRYPTO_CHAIN_COMBINED;
+ }
+ return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED;
+}
+
+/**
+ * Set session parameters for cipher part.
+ *
+ * @param sess Crypto session pointer.
+ * @param cipher_xform Pointer to configuration structure for cipher operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *cipher_xform)
+{
+ /* Make sure we've got proper struct */
+ if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) ||
+ (cipher_map[cipher_xform->cipher.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->cipher_iv_offset = cipher_xform->cipher.iv.offset;
+
+ sess->sam_sess_params.dir =
+ (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ cipher_map[cipher_xform->cipher.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ cipher_map[cipher_xform->cipher.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (cipher_xform->cipher.key.length >
+ cipher_map[cipher_xform->cipher.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length;
+ sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for authentication part.
+ *
+ * @param sess Crypto session pointer.
+ * @param auth_xform Pointer to configuration structure for auth operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ /* Make sure we've got proper struct */
+ if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((auth_xform->auth.algo > RTE_DIM(auth_map)) ||
+ (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.auth_alg =
+ auth_map[auth_xform->auth.algo].auth_alg;
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ auth_xform->auth.digest_length;
+ /* auth_key must be NULL if auth algorithm does not use HMAC */
+ sess->sam_sess_params.auth_key = auth_xform->auth.key.length ?
+ auth_xform->auth.key.data : NULL;
+ sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length;
+
+ return 0;
+}
+
+/**
+ * Set session parameters for aead part.
+ *
+ * @param sess Crypto session pointer.
+ * @param aead_xform Pointer to configuration structure for aead operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *aead_xform)
+{
+ /* Make sure we've got proper struct */
+ if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
+ MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!");
+ return -EINVAL;
+ }
+
+ /* See if map data is present and valid */
+ if ((aead_xform->aead.algo > RTE_DIM(aead_map)) ||
+ (aead_map[aead_xform->aead.algo].supported
+ != ALGO_SUPPORTED)) {
+ MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.dir =
+ (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT;
+ sess->sam_sess_params.cipher_alg =
+ aead_map[aead_xform->aead.algo].cipher_alg;
+ sess->sam_sess_params.cipher_mode =
+ aead_map[aead_xform->aead.algo].cipher_mode;
+
+ /* Assume IV will be passed together with data. */
+ sess->sam_sess_params.cipher_iv = NULL;
+
+ /* Get max key length. */
+ if (aead_xform->aead.key.length >
+ aead_map[aead_xform->aead.algo].max_key_len) {
+ MRVL_CRYPTO_LOG_ERR("Wrong key length!");
+ return -EINVAL;
+ }
+
+ sess->sam_sess_params.cipher_key = aead_xform->aead.key.data;
+ sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length;
+
+ if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM)
+ sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM;
+
+ sess->sam_sess_params.u.basic.auth_icv_len =
+ aead_xform->aead.digest_length;
+
+ sess->sam_sess_params.u.basic.auth_aad_len =
+ aead_xform->aead.aad_length;
+
+ return 0;
+}
+
+/**
+ * Parse crypto transform chain and setup session parameters.
+ *
+ * @param dev Pointer to crypto device
+ * @param sess Poiner to crypto session
+ * @param xform Pointer to configuration structure chain for crypto operations.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+
+ /* Filter out spurious/broken requests */
+ if (xform == NULL)
+ return -EINVAL;
+
+ sess->chain_order = mrvl_crypto_get_chain_order(xform);
+ switch (sess->chain_order) {
+ case MRVL_CRYPTO_CHAIN_CIPHER_AUTH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case MRVL_CRYPTO_CHAIN_CIPHER_ONLY:
+ cipher_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_AUTH_ONLY:
+ auth_xform = xform;
+ break;
+ case MRVL_CRYPTO_CHAIN_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((cipher_xform != NULL) &&
+ (mrvl_crypto_set_cipher_session_parameters(
+ sess, cipher_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters");
+ return -EINVAL;
+ }
+
+ if ((auth_xform != NULL) &&
+ (mrvl_crypto_set_auth_session_parameters(
+ sess, auth_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters");
+ return -EINVAL;
+ }
+
+ if ((aead_xform != NULL) &&
+ (mrvl_crypto_set_aead_session_parameters(
+ sess, aead_xform) < 0)) {
+ MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * Process Operations
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Prepare a single request.
+ *
+ * This function basically translates DPDK crypto request into one
+ * understandable by MUDSK's SAM. If this is a first request in a session,
+ * it starts the session.
+ *
+ * @param request Pointer to pre-allocated && reset request buffer [Out].
+ * @param src_bd Pointer to pre-allocated source descriptor [Out].
+ * @param dst_bd Pointer to pre-allocated destination descriptor [Out].
+ * @param op Pointer to DPDK crypto operation struct [In].
+ */
+static inline int
+mrvl_request_prepare(struct sam_cio_op_params *request,
+ struct sam_buf_info *src_bd,
+ struct sam_buf_info *dst_bd,
+ struct rte_crypto_op *op)
+{
+ struct mrvl_crypto_session *sess;
+ struct rte_mbuf *dst_mbuf;
+ uint8_t *digest;
+
+ if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
+ MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session "
+ "oriented requests, op (%p) is sessionless.",
+ op);
+ return -EINVAL;
+ }
+
+ sess = (struct mrvl_crypto_session *)get_session_private_data(
+ op->sym->session, cryptodev_driver_id);
+ if (unlikely(sess == NULL)) {
+ MRVL_CRYPTO_LOG_ERR("Session was not created for this device");
+ return -EINVAL;
+ }
+
+ /*
+ * If application delivered us null dst buffer, it means it expects
+ * us to deliver the result in src buffer.
+ */
+ dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+
+ request->sa = sess->sam_sess;
+ request->cookie = op;
+
+ /* Single buffers only, sorry. */
+ request->num_bufs = 1;
+ request->src = src_bd;
+ src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *);
+ src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src);
+ src_bd->len = rte_pktmbuf_data_len(op->sym->m_src);
+
+ /* Empty source. */
+ if (rte_pktmbuf_data_len(op->sym->m_src) == 0) {
+ /* EIP does not support 0 length buffers. */
+ MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!");
+ return -1;
+ }
+
+ /* Empty destination. */
+ if (rte_pktmbuf_data_len(dst_mbuf) == 0) {
+ /* Make dst buffer fit at least source data. */
+ if (rte_pktmbuf_append(dst_mbuf,
+ rte_pktmbuf_data_len(op->sym->m_src)) == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!");
+ return -1;
+ }
+ }
+
+ request->dst = dst_bd;
+ dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *);
+ dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf);
+
+ /*
+ * We can use all available space in dst_mbuf,
+ * not only what's used currently.
+ */
+ dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf);
+
+ if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) {
+ request->cipher_len = op->sym->aead.data.length;
+ request->cipher_offset = op->sym->aead.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_aad = op->sym->aead.aad.data;
+ request->auth_offset = request->cipher_offset;
+ request->auth_len = request->cipher_len;
+ } else {
+ request->cipher_len = op->sym->cipher.data.length;
+ request->cipher_offset = op->sym->cipher.data.offset;
+ request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *,
+ sess->cipher_iv_offset);
+
+ request->auth_offset = op->sym->auth.data.offset;
+ request->auth_len = op->sym->auth.data.length;
+ }
+
+ digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ?
+ op->sym->aead.digest.data : op->sym->auth.digest.data;
+ if (digest == NULL) {
+ /* No auth - no worry. */
+ return 0;
+ }
+
+ request->auth_icv_offset = request->auth_offset + request->auth_len;
+
+ /*
+ * EIP supports only scenarios where ICV(digest buffer) is placed at
+ * auth_icv_offset. Any other placement means risking errors.
+ */
+ if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) {
+ /*
+ * This should be the most common case anyway,
+ * EIP will overwrite DST buffer at auth_icv_offset.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ dst_mbuf, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */
+ /*
+ * EIP will look for digest at auth_icv_offset
+ * offset in SRC buffer.
+ */
+ if (rte_pktmbuf_mtod_offset(
+ op->sym->m_src, uint8_t *,
+ request->auth_icv_offset) == digest) {
+ return 0;
+ }
+ }
+
+ /*
+ * If we landed here it means that digest pointer is
+ * at different than expected place.
+ */
+ return -1;
+}
+
+/*
+ *-----------------------------------------------------------------------------
+ * PMD Framework handlers
+ *-----------------------------------------------------------------------------
+ */
+
+/**
+ * Enqueue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements consumed from ops.
+ */
+static uint16_t
+mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ uint16_t iter_ops = 0;
+ uint16_t to_enq = 0;
+ uint16_t consumed = 0;
+ int ret;
+ struct sam_cio_op_params requests[nb_ops];
+ /*
+ * DPDK uses single fragment buffers, so we can KISS descriptors.
+ * SAM does not store bd pointers, so on-stack scope will be enough.
+ */
+ struct sam_buf_info src_bd[nb_ops];
+ struct sam_buf_info dst_bd[nb_ops];
+ struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair;
+
+ if (nb_ops == 0)
+ return 0;
+
+ /* Prepare the burst. */
+ memset(&requests, 0, sizeof(requests));
+
+ /* Iterate through */
+ for (; iter_ops < nb_ops; ++iter_ops) {
+ if (mrvl_request_prepare(&requests[iter_ops],
+ &src_bd[iter_ops],
+ &dst_bd[iter_ops],
+ ops[iter_ops]) < 0) {
+ MRVL_CRYPTO_LOG_ERR(
+ "Error while parameters preparation!");
+ qp->stats.enqueue_err_count++;
+ ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+
+ /*
+ * Number of handled ops is increased
+ * (even if the result of handling is error).
+ */
+ ++consumed;
+ break;
+ }
+
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ /* Increase the number of ops to enqueue. */
+ ++to_enq;
+ } /* for (; iter_ops < nb_ops;... */
+
+ if (to_enq > 0) {
+ /* Send the burst */
+ ret = sam_cio_enq(qp->cio, requests, &to_enq);
+ consumed += to_enq;
+ if (ret < 0) {
+ /*
+ * Trust SAM that in this case returned value will be at
+ * some point correct (now it is returned unmodified).
+ */
+ qp->stats.enqueue_err_count += to_enq;
+ for (iter_ops = 0; iter_ops < to_enq; ++iter_ops)
+ ops[iter_ops]->status =
+ RTE_CRYPTO_OP_STATUS_ERROR;
+ }
+ }
+
+ qp->stats.enqueued_count += to_enq;
+ return consumed;
+}
+
+/**
+ * Dequeue burst.
+ *
+ * @param queue_pair Pointer to queue pair.
+ * @param ops Pointer to ops requests array.
+ * @param nb_ops Number of elements in ops requests array.
+ * @returns Number of elements dequeued.
+ */
+static uint16_t
+mrvl_crypto_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ int ret;
+ struct mrvl_crypto_qp *qp = queue_pair;
+ struct sam_cio *cio = qp->cio;
+ struct sam_cio_op_result results[nb_ops];
+ uint16_t i;
+
+ ret = sam_cio_deq(cio, results, &nb_ops);
+ if (ret < 0) {
+ /* Count all dequeued as error. */
+ qp->stats.dequeue_err_count += nb_ops;
+
+ /* But act as they were dequeued anyway*/
+ qp->stats.dequeued_count += nb_ops;
+
+ return 0;
+ }
+
+ /* Unpack and check results. */
+ for (i = 0; i < nb_ops; ++i) {
+ ops[i] = results[i].cookie;
+
+ switch (results[i].status) {
+ case SAM_CIO_OK:
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case SAM_CIO_ERR_ICV:
+ MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV.");
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ break;
+ default:
+ MRVL_CRYPTO_LOG_DBG(
+ "CIO returned Error: %d", results[i].status);
+ ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ break;
+ }
+ }
+
+ qp->stats.dequeued_count += nb_ops;
+ return nb_ops;
+}
+
+/**
+ * Create a new crypto device.
+ *
+ * @param name Driver name.
+ * @param vdev Pointer to device structure.
+ * @param init_params Pointer to initialization parameters.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct mrvl_crypto_private *internals;
+ struct sam_init_params sam_params;
+ int ret;
+
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
+ if (dev == NULL) {
+ MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ dev->driver_id = cryptodev_driver_id;
+ dev->dev_ops = rte_mrvl_crypto_pmd_ops;
+
+ /* Register rx/tx burst functions for data path. */
+ dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst;
+ dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED;
+
+ /* Set vector instructions mode supported */
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+
+ /*
+ * ret == -EEXIST is correct, it means DMA
+ * has been already initialized.
+ */
+ ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
+ if (ret < 0) {
+ if (ret != -EEXIST)
+ return ret;
+
+ MRVL_CRYPTO_LOG_INFO(
+ "DMA memory has been already initialized by a different driver.");
+ }
+
+ sam_params.max_num_sessions = internals->max_nb_sessions;
+
+ return sam_init(&sam_params);
+
+init_error:
+ MRVL_CRYPTO_LOG_ERR(
+ "driver %s: %s failed", init_params->name, __func__);
+
+ cryptodev_mrvl_crypto_uninit(vdev);
+ return -EFAULT;
+}
+
+/**
+ * Initialize the crypto device.
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = { };
+ const char *name, *args;
+ int ret;
+
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+ args = rte_vdev_device_args(vdev);
+
+ init_params.private_data_size = sizeof(struct mrvl_crypto_private);
+ init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM;
+ init_params.max_nb_sessions =
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
+ init_params.socket_id = rte_socket_id();
+
+ ret = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (ret) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n",
+ args);
+ return -EINVAL;
+ }
+
+ return cryptodev_mrvl_crypto_create(name, vdev, &init_params);
+}
+
+/**
+ * Uninitialize the crypto device
+ *
+ * @param vdev Pointer to device structure.
+ * @returns 0 in case of success, negative value otherwise.
+ */
+static int
+cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev)
+{
+ struct rte_cryptodev *cryptodev;
+ const char *name = rte_vdev_device_name(vdev);
+
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD,
+ "Closing Marvell crypto device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ sam_deinit();
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return rte_cryptodev_pmd_destroy(cryptodev);
+}
+
+/**
+ * Basic driver handlers for use in the constructor.
+ */
+static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = {
+ .probe = cryptodev_mrvl_crypto_init,
+ .remove = cryptodev_mrvl_crypto_uninit
+};
+
+static struct cryptodev_driver mrvl_crypto_drv;
+
+/* Register the driver in constructor. */
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
new file mode 100644
index 00000000..434cf850
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
@@ -0,0 +1,778 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "rte_mrvl_pmd_private.h"
+
+/**
+ * Capabilities list to be used in reporting to DPDK.
+ */
+static const struct rte_cryptodev_capabilities
+ mrvl_crypto_pmd_capabilities[] = {
+ { /* MD5 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* MD5 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_MD5,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA1,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 20,
+ .max = 20,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA224 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA224,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 28,
+ .max = 28,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
+ .block_size = 64,
+ .key_size = {
+ .min = 1,
+ .max = 64,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA256,
+ .block_size = 64,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 32,
+ .max = 32,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA384,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 48,
+ .max = 48,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 HMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
+ .block_size = 128,
+ .key_size = {
+ .min = 1,
+ .max = 128,
+ .increment = 1
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_SHA512,
+ .block_size = 128,
+ .key_size = {
+ .min = 0,
+ .max = 0,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 64,
+ .max = 64,
+ .increment = 0
+ },
+ }, }
+ }, }
+ },
+ { /* AES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* AES GCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_GCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .aad_size = {
+ .min = 8,
+ .max = 12,
+ .increment = 4
+ },
+ .iv_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* AES GMAC (AUTH) */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_GMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 65532,
+ .increment = 4
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+ { /* 3DES CTR */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CTR,
+ .block_size = 8,
+ .key_size = {
+ .min = 24,
+ .max = 24,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
+
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+
+/**
+ * Configure device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param config Pointer to configuration structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused struct rte_cryptodev_config *config)
+{
+ return 0;
+}
+
+/**
+ * Start device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev)
+{
+ return 0;
+}
+
+/**
+ * Stop device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev)
+{
+}
+
+/**
+ * Get device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param stats Pointer to statistics structure [out].
+ */
+static void
+mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->stats.enqueued_count;
+ stats->dequeued_count += qp->stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->stats.dequeue_err_count;
+ }
+}
+
+/**
+ * Reset device statistics (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ */
+static void
+mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ }
+}
+
+/**
+ * Get device info (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param dev_info Pointer to the device info structure [out].
+ */
+static void
+mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct mrvl_crypto_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = mrvl_crypto_pmd_capabilities;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+/**
+ * Release queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of Queue Pair to release.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct mrvl_crypto_qp *qp =
+ (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id];
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ sam_cio_flush(qp->cio);
+ sam_cio_deinit(qp->cio);
+ rte_free(dev->data->queue_pairs[qp_id]);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+
+ return 0;
+}
+
+/**
+ * Close device (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static int
+mrvl_crypto_pmd_close(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ return 0;
+}
+
+/**
+ * Setup a queue pair (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @param qp_conf Queue pair configuration (nb of descriptors).
+ * @param socket_id NUMA socket to allocate memory on.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct mrvl_crypto_qp *qp = NULL;
+ char match[RTE_CRYPTODEV_NAME_LEN];
+ unsigned int n;
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL)
+ return -ENOMEM;
+
+ /* Free old qp prior setup if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ mrvl_crypto_pmd_qp_release(dev, qp_id);
+
+ do { /* Error handling block */
+
+ /*
+ * This extra check is necessary due to a bug in
+ * crypto library.
+ */
+ int num = sam_get_num_inst();
+ if (num == 0) {
+ MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n");
+ return -1;
+ }
+
+ /*
+ * In case two crypto engines are enabled qps will
+ * be evenly spread among them. Even and odd qps will
+ * be handled by cio-0 and cio-1 respectively. qp-cio mapping
+ * will look as follows:
+ *
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1
+ *
+ * qp: 4 5 6 7
+ * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3
+ *
+ * In case just one engine is enabled mapping will look as
+ * follows:
+ * qp: 0 1 2 3
+ * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3
+ */
+ n = snprintf(match, sizeof(match), "cio-%u:%u",
+ qp_id % num, qp_id / num);
+
+ if (n >= sizeof(match))
+ break;
+
+ qp->cio_params.match = match;
+ qp->cio_params.size = qp_conf->nb_descriptors;
+
+ if (sam_cio_init(&qp->cio_params, &qp->cio) < 0)
+ break;
+
+ qp->sess_mp = session_pool;
+
+ memset(&qp->stats, 0, sizeof(qp->stats));
+ dev->data->queue_pairs[qp_id] = qp;
+ return 0;
+ } while (0);
+
+ rte_free(qp);
+ return -1;
+}
+
+/** Start queue pair (PMD ops callback) - not supported.
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @returns -ENOTSUP. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Stop queue pair (PMD ops callback) - not supported.
+ *
+ * @param dev Pointer to the device structure.
+ * @param qp_id ID of the Queue Pair.
+ * @returns -ENOTSUP. Always.
+ */
+static int
+mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
+ __rte_unused uint16_t queue_pair_id)
+{
+ return -ENOTSUP;
+}
+
+/** Return the number of allocated queue pairs (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @returns Number of allocated queue pairs.
+ */
+static uint32_t
+mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+/** Returns the size of the session structure (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure [Unused].
+ * @returns Size of Marvell crypto session.
+ */
+static unsigned
+mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev)
+{
+ return sizeof(struct mrvl_crypto_session);
+}
+
+/** Configure the session from a crypto xform chain (PMD ops callback).
+ *
+ * @param dev Pointer to the device structure.
+ * @param xform Pointer to the crytpo configuration structure.
+ * @param sess Pointer to the empty session structure.
+ * @returns 0 upon success, negative value otherwise.
+ */
+static int
+mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mp)
+{
+ struct mrvl_crypto_session *mrvl_sess;
+ void *sess_private_data;
+ int ret;
+
+ if (sess == NULL) {
+ MRVL_CRYPTO_LOG_ERR("Invalid session struct.");
+ return -EINVAL;
+ }
+
+ if (rte_mempool_get(mp, &sess_private_data)) {
+ CDEV_LOG_ERR("Couldn't get object from session mempool.");
+ return -ENOMEM;
+ }
+
+ ret = mrvl_crypto_set_session_parameters(sess_private_data, xform);
+ if (ret != 0) {
+ MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters.");
+
+ /* Return session to mempool */
+ rte_mempool_put(mp, sess_private_data);
+ return ret;
+ }
+
+ set_session_private_data(sess, dev->driver_id, sess_private_data);
+
+ mrvl_sess = (struct mrvl_crypto_session *)sess_private_data;
+ if (sam_session_create(&mrvl_sess->sam_sess_params,
+ &mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_DBG("Failed to create session!");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * Clear the memory of session so it doesn't leave key material behind.
+ *
+ * @param dev Pointer to the device structure.
+ * @returns 0. Always.
+ */
+static void
+mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ /* Zero out the whole structure */
+ if (sess_priv) {
+ struct mrvl_crypto_session *mrvl_sess =
+ (struct mrvl_crypto_session *)sess_priv;
+
+ if (mrvl_sess->sam_sess &&
+ sam_session_destroy(mrvl_sess->sam_sess) < 0) {
+ MRVL_CRYPTO_LOG_INFO("Error while destroying session!");
+ }
+
+ memset(sess, 0, sizeof(struct mrvl_crypto_session));
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+ set_session_private_data(sess, index, NULL);
+ rte_mempool_put(sess_mp, sess_priv);
+ }
+}
+
+/**
+ * PMD handlers for crypto ops.
+ */
+static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = {
+ .dev_configure = mrvl_crypto_pmd_config,
+ .dev_start = mrvl_crypto_pmd_start,
+ .dev_stop = mrvl_crypto_pmd_stop,
+ .dev_close = mrvl_crypto_pmd_close,
+
+ .dev_infos_get = mrvl_crypto_pmd_info_get,
+
+ .stats_get = mrvl_crypto_pmd_stats_get,
+ .stats_reset = mrvl_crypto_pmd_stats_reset,
+
+ .queue_pair_setup = mrvl_crypto_pmd_qp_setup,
+ .queue_pair_release = mrvl_crypto_pmd_qp_release,
+ .queue_pair_start = mrvl_crypto_pmd_qp_start,
+ .queue_pair_stop = mrvl_crypto_pmd_qp_stop,
+ .queue_pair_count = mrvl_crypto_pmd_qp_count,
+
+ .session_get_size = mrvl_crypto_pmd_session_get_size,
+ .session_configure = mrvl_crypto_pmd_session_configure,
+ .session_clear = mrvl_crypto_pmd_session_clear
+};
+
+struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops;
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
new file mode 100644
index 00000000..923faaf9
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
@@ -0,0 +1,123 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of the copyright holder nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RTE_MRVL_PMD_PRIVATE_H_
+#define _RTE_MRVL_PMD_PRIVATE_H_
+
+#include "rte_mrvl_compat.h"
+
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+/**< Marvell PMD device name */
+
+#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \
+ __func__, __LINE__, ## args)
+
+#else
+#define MRVL_CRYPTO_LOG_INFO(fmt, args...)
+#define MRVL_CRYPTO_LOG_DBG(fmt, args...)
+#endif
+
+/**
+ * Handy bits->bytes conversion macro.
+ */
+#define BITS2BYTES(x) ((x) >> 3)
+
+/** The operation order mode enumerator. */
+enum mrvl_crypto_chain_order {
+ MRVL_CRYPTO_CHAIN_CIPHER_ONLY,
+ MRVL_CRYPTO_CHAIN_AUTH_ONLY,
+ MRVL_CRYPTO_CHAIN_CIPHER_AUTH,
+ MRVL_CRYPTO_CHAIN_AUTH_CIPHER,
+ MRVL_CRYPTO_CHAIN_COMBINED,
+ MRVL_CRYPTO_CHAIN_NOT_SUPPORTED,
+};
+
+/** Private data structure for each crypto device. */
+struct mrvl_crypto_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+};
+
+/** MRVL crypto queue pair structure. */
+struct mrvl_crypto_qp {
+ /** SAM CIO (MUSDK Queue Pair equivalent).*/
+ struct sam_cio *cio;
+
+ /** Session Mempool. */
+ struct rte_mempool *sess_mp;
+
+ /** Queue pair statistics. */
+ struct rte_cryptodev_stats stats;
+
+ /** CIO initialization parameters.*/
+ struct sam_cio_params cio_params;
+} __rte_cache_aligned;
+
+/** MRVL crypto private session structure. */
+struct mrvl_crypto_session {
+ /** Crypto operations chain order. */
+ enum mrvl_crypto_chain_order chain_order;
+
+ /** Session initialization parameters. */
+ struct sam_session_params sam_sess_params;
+
+ /** SAM session pointer. */
+ struct sam_sa *sam_sess;
+
+ /** Cipher IV offset. */
+ uint16_t cipher_iv_offset;
+} __rte_cache_aligned;
+
+/** Set and validate MRVL crypto session parameters */
+extern int
+mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
+/** device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops;
+
+#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
new file mode 100644
index 00000000..a7530317
--- /dev/null
+++ b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
@@ -0,0 +1,3 @@
+DPDK_17.11 {
+ local: *;
+};
diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile
index bc2724b3..49ada097 100644
--- a/drivers/crypto/null/Makefile
+++ b/drivers/crypto/null/Makefile
@@ -37,6 +37,9 @@ LIB = librte_pmd_null_crypto.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library version
LIBABIVER := 1
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index 2c827257..f031d3b8 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -31,10 +31,8 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include "null_crypto_pmd_private.h"
@@ -183,28 +181,19 @@ null_crypto_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
return nb_dequeued;
}
-static int cryptodev_null_remove(const char *name);
-
/** Create crypto device */
static int
cryptodev_null_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct null_crypto_private *internals;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct null_crypto_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev");
- goto init_error;
+ return -EFAULT;
}
dev->driver_id = cryptodev_driver_id;
@@ -224,61 +213,53 @@ cryptodev_null_create(const char *name,
internals->max_nb_sessions = init_params->max_nb_sessions;
return 0;
-
-init_error:
- NULL_CRYPTO_LOG_ERR("driver %s: cryptodev_null_create failed",
- init_params->name);
- cryptodev_null_remove(init_params->name);
-
- return -EFAULT;
}
/** Initialise null crypto device */
static int
cryptodev_null_probe(struct rte_vdev_device *dev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct null_crypto_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
- const char *name;
+ const char *name, *args;
+ int retval;
name = rte_vdev_device_name(dev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
- name, init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ args = rte_vdev_device_args(dev);
+
+ retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
+ if (retval) {
+ RTE_LOG(ERR, PMD,
+ "Failed to parse initialisation arguments[%s]\n", args);
+ return -EINVAL;
+ }
return cryptodev_null_create(name, dev, &init_params);
}
-/** Uninitialise null crypto device */
static int
-cryptodev_null_remove(const char *name)
+cryptodev_null_remove_dev(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
+ const char *name;
+
+ name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing null crypto device %s on numa socket %u\n",
- name, rte_socket_id());
-
- return 0;
-}
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
-static int
-cryptodev_null_remove_dev(struct rte_vdev_device *dev)
-{
- return cryptodev_null_remove(rte_vdev_device_name(dev));
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_null_pmd_drv = {
@@ -286,10 +267,13 @@ static struct rte_vdev_driver cryptodev_null_pmd_drv = {
.remove = cryptodev_null_remove_dev,
};
+static struct cryptodev_driver null_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_null_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c
index 76153203..c427050a 100644
--- a/drivers/crypto/null/null_crypto_pmd_ops.c
+++ b/drivers/crypto/null/null_crypto_pmd_ops.c
@@ -175,7 +175,7 @@ null_crypto_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"null_crypto_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h
index 4d1c3c9e..7c7852fb 100644
--- a/drivers/crypto/null/null_crypto_pmd_private.h
+++ b/drivers/crypto/null/null_crypto_pmd_private.h
@@ -33,8 +33,6 @@
#ifndef _NULL_CRYPTO_PMD_PRIVATE_H_
#define _NULL_CRYPTO_PMD_PRIVATE_H_
-#include "rte_config.h"
-
#define CRYPTODEV_NAME_NULL_PMD crypto_null
/**< Null crypto PMD device name */
diff --git a/drivers/crypto/openssl/Makefile b/drivers/crypto/openssl/Makefile
index e5fdfb59..1a006432 100644
--- a/drivers/crypto/openssl/Makefile
+++ b/drivers/crypto/openssl/Makefile
@@ -45,6 +45,9 @@ EXPORT_MAP := rte_pmd_openssl_version.map
# external library dependencies
LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += rte_openssl_pmd.c
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 0bd5f98e..06e1a6de 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -34,11 +34,11 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
+#include <openssl/hmac.h>
#include <openssl/evp.h>
#include "rte_openssl_pmd_private.h"
@@ -47,6 +47,25 @@
static uint8_t cryptodev_driver_id;
+#if (OPENSSL_VERSION_NUMBER < 0x10100000L)
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx = OPENSSL_malloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ if (ctx != NULL) {
+ HMAC_CTX_cleanup(ctx);
+ OPENSSL_free(ctx);
+ }
+}
+#endif
+
static int cryptodev_openssl_remove(struct rte_vdev_device *vdev);
/*----------------------------------------------------------------------------*/
@@ -267,6 +286,21 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
res = -EINVAL;
}
break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ switch (keylen) {
+ case 16:
+ *algo = EVP_aes_128_ccm();
+ break;
+ case 24:
+ *algo = EVP_aes_192_ccm();
+ break;
+ case 32:
+ *algo = EVP_aes_256_ccm();
+ break;
+ default:
+ res = -EINVAL;
+ }
+ break;
default:
res = -EINVAL;
break;
@@ -278,6 +312,125 @@ get_aead_algo(enum rte_crypto_aead_algorithm sess_algo, size_t keylen,
return res;
}
+/* Set session AEAD encryption parameters */
+static int
+openssl_set_sess_aead_enc_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_GENERATE;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ do_ccm = 0;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type, sess->iv.length,
+ NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_EncryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Set session AEAD decryption parameters */
+static int
+openssl_set_sess_aead_dec_param(struct openssl_session *sess,
+ enum rte_crypto_aead_algorithm algo,
+ uint8_t tag_len, uint8_t *key)
+{
+ int iv_type = 0;
+ unsigned int do_ccm = 0;
+
+ sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* Select AEAD algo */
+ switch (algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ iv_type = EVP_CTRL_GCM_SET_IVLEN;
+ if (tag_len != 16)
+ return -EINVAL;
+ break;
+ case RTE_CRYPTO_AEAD_AES_CCM:
+ iv_type = EVP_CTRL_CCM_SET_IVLEN;
+ /* Digest size can be 4, 6, 8, 10, 12, 14 or 16 bytes */
+ if (tag_len < 4 || tag_len > 16 || (tag_len & 1) == 1)
+ return -EINVAL;
+ do_ccm = 1;
+ break;
+ default:
+ return -ENOTSUP;
+ }
+
+ sess->cipher.mode = OPENSSL_CIPHER_LIB;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+
+ if (get_aead_algo(algo, sess->cipher.key.length,
+ &sess->cipher.evp_algo) != 0)
+ return -EINVAL;
+
+ get_cipher_key(key, sess->cipher.key.length, sess->cipher.key.data);
+
+ sess->chain_order = OPENSSL_CHAIN_COMBINED;
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, sess->cipher.evp_algo,
+ NULL, NULL, NULL) <= 0)
+ return -EINVAL;
+
+ if (EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, iv_type,
+ sess->iv.length, NULL) <= 0)
+ return -EINVAL;
+
+ if (do_ccm)
+ EVP_CIPHER_CTX_ctrl(sess->cipher.ctx, EVP_CTRL_CCM_SET_TAG,
+ tag_len, NULL);
+
+ if (EVP_DecryptInit_ex(sess->cipher.ctx, NULL, NULL, key, NULL) <= 0)
+ return -EINVAL;
+
+ return 0;
+}
+
/** Set session cipher parameters */
static int
openssl_set_session_cipher_parameters(struct openssl_session *sess,
@@ -307,6 +460,22 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
break;
@@ -319,6 +488,33 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
sess->cipher.key.data) != 0)
return -EINVAL;
break;
+
+ case RTE_CRYPTO_CIPHER_DES_CBC:
+ sess->cipher.algo = xform->cipher.algo;
+ sess->cipher.ctx = EVP_CIPHER_CTX_new();
+ sess->cipher.evp_algo = EVP_des_cbc();
+
+ get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
+ sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
+ break;
+
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
sess->cipher.algo = xform->cipher.algo;
sess->chain_order = OPENSSL_CHAIN_CIPHER_BPI;
@@ -333,6 +529,23 @@ openssl_set_session_cipher_parameters(struct openssl_session *sess,
get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
sess->cipher.key.data);
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (EVP_EncryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ } else if (sess->cipher.direction ==
+ RTE_CRYPTO_CIPHER_OP_DECRYPT) {
+ if (EVP_DecryptInit_ex(sess->cipher.ctx,
+ sess->cipher.evp_algo,
+ NULL, xform->cipher.key.data,
+ NULL) != 1) {
+ return -EINVAL;
+ }
+ }
+
break;
default:
sess->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
@@ -351,36 +564,31 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
sess->auth.operation = xform->auth.op;
sess->auth.algo = xform->auth.algo;
+ sess->auth.digest_length = xform->auth.digest_length;
+
/* Select auth algo */
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_AES_GMAC:
- sess->chain_order = OPENSSL_CHAIN_COMBINED;
-
- /* Set IV parameters */
- sess->iv.offset = xform->auth.iv.offset;
- sess->iv.length = xform->auth.iv.length;
-
/*
* OpenSSL requires GMAC to be a GCM operation
* with no cipher data length
*/
- sess->cipher.mode = OPENSSL_CIPHER_LIB;
- if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
- sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
- else
- sess->cipher.direction = RTE_CRYPTO_CIPHER_OP_DECRYPT;
-
sess->cipher.key.length = xform->auth.key.length;
- sess->cipher.ctx = EVP_CIPHER_CTX_new();
- if (get_aead_algo(RTE_CRYPTO_AEAD_AES_GCM,
- sess->cipher.key.length,
- &sess->cipher.evp_algo) != 0)
- return -EINVAL;
-
- get_cipher_key(xform->auth.key.data, xform->auth.key.length,
- sess->cipher.key.data);
+ /* Set IV parameters */
+ sess->iv.offset = xform->auth.iv.offset;
+ sess->iv.length = xform->auth.iv.length;
+ if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_GENERATE)
+ return openssl_set_sess_aead_enc_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess,
+ RTE_CRYPTO_AEAD_AES_GCM,
+ xform->auth.digest_length,
+ xform->auth.key.data);
break;
case RTE_CRYPTO_AUTH_MD5:
@@ -403,20 +611,22 @@ openssl_set_session_auth_parameters(struct openssl_session *sess,
case RTE_CRYPTO_AUTH_SHA384_HMAC:
case RTE_CRYPTO_AUTH_SHA512_HMAC:
sess->auth.mode = OPENSSL_AUTH_AS_HMAC;
- sess->auth.hmac.ctx = EVP_MD_CTX_create();
+ sess->auth.hmac.ctx = HMAC_CTX_new();
if (get_auth_algo(xform->auth.algo,
&sess->auth.hmac.evp_algo) != 0)
return -EINVAL;
- sess->auth.hmac.pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL,
- xform->auth.key.data, xform->auth.key.length);
+
+ if (HMAC_Init_ex(sess->auth.hmac.ctx,
+ xform->auth.key.data,
+ xform->auth.key.length,
+ sess->auth.hmac.evp_algo, NULL) != 1)
+ return -EINVAL;
break;
default:
return -ENOTSUP;
}
- sess->auth.digest_length = xform->auth.digest_length;
-
return 0;
}
@@ -425,43 +635,33 @@ static int
openssl_set_session_aead_parameters(struct openssl_session *sess,
const struct rte_crypto_sym_xform *xform)
{
- /* Select cipher direction */
- sess->cipher.direction = xform->cipher.op;
/* Select cipher key */
sess->cipher.key.length = xform->aead.key.length;
/* Set IV parameters */
- sess->iv.offset = xform->aead.iv.offset;
- sess->iv.length = xform->aead.iv.length;
-
- /* Select auth generate/verify */
- sess->auth.operation = xform->auth.op;
- sess->auth.algo = xform->auth.algo;
-
- /* Select auth algo */
- switch (xform->aead.algo) {
- case RTE_CRYPTO_AEAD_AES_GCM:
- sess->cipher.mode = OPENSSL_CIPHER_LIB;
- sess->aead_algo = xform->aead.algo;
- sess->cipher.ctx = EVP_CIPHER_CTX_new();
-
- if (get_aead_algo(sess->aead_algo, sess->cipher.key.length,
- &sess->cipher.evp_algo) != 0)
- return -EINVAL;
-
- get_cipher_key(xform->cipher.key.data, sess->cipher.key.length,
- sess->cipher.key.data);
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM)
+ /*
+ * For AES-CCM, the actual IV is placed
+ * one byte after the start of the IV field,
+ * according to the API.
+ */
+ sess->iv.offset = xform->aead.iv.offset + 1;
+ else
+ sess->iv.offset = xform->aead.iv.offset;
- sess->chain_order = OPENSSL_CHAIN_COMBINED;
- break;
- default:
- return -ENOTSUP;
- }
+ sess->iv.length = xform->aead.iv.length;
sess->auth.aad_length = xform->aead.aad_length;
sess->auth.digest_length = xform->aead.digest_length;
- return 0;
+ sess->aead_algo = xform->aead.algo;
+ /* Select cipher direction */
+ if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
+ return openssl_set_sess_aead_enc_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
+ else
+ return openssl_set_sess_aead_dec_param(sess, xform->aead.algo,
+ xform->aead.digest_length, xform->aead.key.data);
}
/** Parse crypto xform chain and set private session parameters */
@@ -547,7 +747,7 @@ openssl_reset_session(struct openssl_session *sess)
break;
case OPENSSL_AUTH_AS_HMAC:
EVP_PKEY_free(sess->auth.hmac.pkey);
- EVP_MD_CTX_destroy(sess->auth.hmac.ctx);
+ HMAC_CTX_free(sess->auth.hmac.ctx);
break;
default:
break;
@@ -693,12 +893,11 @@ process_openssl_decryption_update(struct rte_mbuf *mbuf_src, int offset,
/** Process standard openssl cipher encryption */
static int
process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
- int offset, uint8_t *iv, uint8_t *key, int srclen,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
{
int totlen;
- if (EVP_EncryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_cipher_encrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
@@ -743,12 +942,11 @@ process_cipher_encrypt_err:
/** Process standard openssl cipher decryption */
static int
process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst,
- int offset, uint8_t *iv, uint8_t *key, int srclen,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int offset, uint8_t *iv, int srclen, EVP_CIPHER_CTX *ctx)
{
int totlen;
- if (EVP_DecryptInit_ex(ctx, algo, NULL, key, iv) <= 0)
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_cipher_decrypt_err;
EVP_CIPHER_CTX_set_padding(ctx, 0);
@@ -823,23 +1021,16 @@ process_cipher_des3ctr_err:
return -EINVAL;
}
-/** Process auth/encription aes-gcm algorithm */
+/** Process AES-GCM encrypt algorithm */
static int
process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset,
- int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag,
- EVP_CIPHER_CTX *ctx, const EVP_CIPHER *algo)
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
- if (EVP_EncryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
- goto process_auth_encryption_gcm_err;
-
- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
- goto process_auth_encryption_gcm_err;
-
- if (EVP_EncryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_auth_encryption_gcm_err;
if (aadlen > 0)
@@ -868,25 +1059,60 @@ process_auth_encryption_gcm_err:
return -EINVAL;
}
+/** Process AES-CCM encrypt algorithm */
+static int
+process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t taglen, EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_EncryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_encryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_EncryptFinal_ex(ctx, dst, &len) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_GET_TAG, taglen, tag) <= 0)
+ goto process_auth_encryption_ccm_err;
+
+ return 0;
+
+process_auth_encryption_ccm_err:
+ OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed");
+ return -EINVAL;
+}
+
+/** Process AES-GCM decrypt algorithm */
static int
process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
- int srclen, uint8_t *aad, int aadlen, uint8_t *iv, int ivlen,
- uint8_t *key, uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx,
- const EVP_CIPHER *algo)
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, EVP_CIPHER_CTX *ctx)
{
int len = 0, unused = 0;
uint8_t empty[] = {};
- if (EVP_DecryptInit_ex(ctx, algo, NULL, NULL, NULL) <= 0)
- goto process_auth_decryption_gcm_err;
-
- if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN, ivlen, NULL) <= 0)
- goto process_auth_decryption_gcm_err;
-
if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag) <= 0)
goto process_auth_decryption_gcm_err;
- if (EVP_DecryptInit_ex(ctx, NULL, NULL, key, iv) <= 0)
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
goto process_auth_decryption_gcm_err;
if (aadlen > 0)
@@ -903,16 +1129,52 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset,
goto process_auth_decryption_gcm_err;
if (EVP_DecryptFinal_ex(ctx, dst, &len) <= 0)
- goto process_auth_decryption_gcm_final_err;
+ return -EFAULT;
return 0;
process_auth_decryption_gcm_err:
- OPENSSL_LOG_ERR("Process openssl auth description gcm failed");
+ OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed");
return -EINVAL;
+}
-process_auth_decryption_gcm_final_err:
- return -EFAULT;
+/** Process AES-CCM decrypt algorithm */
+static int
+process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset,
+ int srclen, uint8_t *aad, int aadlen, uint8_t *iv,
+ uint8_t *dst, uint8_t *tag, uint8_t tag_len,
+ EVP_CIPHER_CTX *ctx)
+{
+ int len = 0;
+
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG, tag_len, tag) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (EVP_DecryptUpdate(ctx, NULL, &len, NULL, srclen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (aadlen > 0)
+ /*
+ * For AES-CCM, the actual AAD is placed
+ * 18 bytes after the start of the AAD field,
+ * according to the API.
+ */
+ if (EVP_DecryptUpdate(ctx, NULL, &len, aad + 18, aadlen) <= 0)
+ goto process_auth_decryption_ccm_err;
+
+ if (srclen > 0)
+ if (process_openssl_decryption_update(mbuf_src, offset, &dst,
+ srclen, ctx))
+ return -EFAULT;
+
+ return 0;
+
+process_auth_decryption_ccm_err:
+ OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed");
+ return -EINVAL;
}
/** Process standard openssl auth algorithms */
@@ -971,10 +1233,9 @@ process_auth_err:
/** Process standard openssl auth algorithms with hmac */
static int
process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
- __rte_unused uint8_t *iv, EVP_PKEY *pkey,
- int srclen, EVP_MD_CTX *ctx, const EVP_MD *algo)
+ int srclen, HMAC_CTX *ctx)
{
- size_t dstlen;
+ unsigned int dstlen;
struct rte_mbuf *m;
int l, n = srclen;
uint8_t *src;
@@ -986,19 +1247,16 @@ process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
if (m == 0)
goto process_auth_err;
- if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
- goto process_auth_err;
-
src = rte_pktmbuf_mtod_offset(m, uint8_t *, offset);
l = rte_pktmbuf_data_len(m) - offset;
if (srclen <= l) {
- if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, srclen) != 1)
goto process_auth_err;
goto process_auth_final;
}
- if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
goto process_auth_err;
n -= l;
@@ -1006,13 +1264,16 @@ process_openssl_auth_hmac(struct rte_mbuf *mbuf_src, uint8_t *dst, int offset,
for (m = m->next; (m != NULL) && (n > 0); m = m->next) {
src = rte_pktmbuf_mtod(m, uint8_t *);
l = rte_pktmbuf_data_len(m) < n ? rte_pktmbuf_data_len(m) : n;
- if (EVP_DigestSignUpdate(ctx, (char *)src, l) <= 0)
+ if (HMAC_Update(ctx, (unsigned char *)src, l) != 1)
goto process_auth_err;
n -= l;
}
process_auth_final:
- if (EVP_DigestSignFinal(ctx, dst, &dstlen) <= 0)
+ if (HMAC_Final(ctx, dst, &dstlen) != 1)
+ goto process_auth_err;
+
+ if (unlikely(HMAC_Init_ex(ctx, NULL, 0, NULL, NULL) != 1))
goto process_auth_err;
return 0;
@@ -1032,8 +1293,9 @@ process_openssl_combined_op
{
/* cipher */
uint8_t *dst = NULL, *iv, *tag, *aad;
- int srclen, ivlen, aadlen, status = -1;
+ int srclen, aadlen, status = -1;
uint32_t offset;
+ uint8_t taglen;
/*
* Segmented destination buffer is not supported for
@@ -1046,7 +1308,6 @@ process_openssl_combined_op
iv = rte_crypto_op_ctod_offset(op, uint8_t *,
sess->iv.offset);
- ivlen = sess->iv.length;
if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC) {
srclen = 0;
offset = op->sym->auth.data.offset;
@@ -1070,18 +1331,34 @@ process_openssl_combined_op
offset + srclen);
}
- if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
- status = process_openssl_auth_encryption_gcm(
- mbuf_src, offset, srclen,
- aad, aadlen, iv, ivlen, sess->cipher.key.data,
- dst, tag, sess->cipher.ctx,
- sess->cipher.evp_algo);
- else
- status = process_openssl_auth_decryption_gcm(
- mbuf_src, offset, srclen,
- aad, aadlen, iv, ivlen, sess->cipher.key.data,
- dst, tag, sess->cipher.ctx,
- sess->cipher.evp_algo);
+ taglen = sess->auth.digest_length;
+
+ if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_encryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_encryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+
+ } else {
+ if (sess->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC ||
+ sess->aead_algo == RTE_CRYPTO_AEAD_AES_GCM)
+ status = process_openssl_auth_decryption_gcm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, sess->cipher.ctx);
+ else
+ status = process_openssl_auth_decryption_ccm(
+ mbuf_src, offset, srclen,
+ aad, aadlen, iv,
+ dst, tag, taglen, sess->cipher.ctx);
+ }
if (status != 0) {
if (status == (-EFAULT) &&
@@ -1122,15 +1399,11 @@ process_openssl_cipher_op
if (sess->cipher.direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
status = process_openssl_cipher_encrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
else
status = process_openssl_cipher_decrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
else
status = process_openssl_cipher_des3ctr(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
@@ -1174,8 +1447,7 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/* Encrypt with the block aligned stream with CBC mode */
status = process_openssl_cipher_encrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx, sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
if (last_block_len) {
/* Point at last block */
dst += srclen;
@@ -1225,9 +1497,7 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/* Decrypt with CBC mode */
status |= process_openssl_cipher_decrypt(mbuf_src, dst,
op->sym->cipher.data.offset, iv,
- sess->cipher.key.data, srclen,
- sess->cipher.ctx,
- sess->cipher.evp_algo);
+ srclen, sess->cipher.ctx);
}
}
@@ -1237,9 +1507,9 @@ process_openssl_docsis_bpi_op(struct rte_crypto_op *op,
/** Process auth operation */
static void
-process_openssl_auth_op
- (struct rte_crypto_op *op, struct openssl_session *sess,
- struct rte_mbuf *mbuf_src, struct rte_mbuf *mbuf_dst)
+process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op,
+ struct openssl_session *sess, struct rte_mbuf *mbuf_src,
+ struct rte_mbuf *mbuf_dst)
{
uint8_t *dst;
int srclen, status;
@@ -1247,8 +1517,7 @@ process_openssl_auth_op
srclen = op->sym->auth.data.length;
if (sess->auth.operation == RTE_CRYPTO_AUTH_OP_VERIFY)
- dst = (uint8_t *)rte_pktmbuf_append(mbuf_src,
- sess->auth.digest_length);
+ dst = qp->temp_digest;
else {
dst = op->sym->auth.digest.data;
if (dst == NULL)
@@ -1265,9 +1534,8 @@ process_openssl_auth_op
break;
case OPENSSL_AUTH_AS_HMAC:
status = process_openssl_auth_hmac(mbuf_src, dst,
- op->sym->auth.data.offset, NULL,
- sess->auth.hmac.pkey, srclen,
- sess->auth.hmac.ctx, sess->auth.hmac.evp_algo);
+ op->sym->auth.data.offset, srclen,
+ sess->auth.hmac.ctx);
break;
default:
status = -1;
@@ -1279,8 +1547,6 @@ process_openssl_auth_op
sess->auth.digest_length) != 0) {
op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
}
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(mbuf_src, sess->auth.digest_length);
}
if (status != 0)
@@ -1289,7 +1555,7 @@ process_openssl_auth_op
/** Process crypto operation for mbuf */
static int
-process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
+process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
struct openssl_session *sess)
{
struct rte_mbuf *msrc, *mdst;
@@ -1305,14 +1571,14 @@ process_op(const struct openssl_qp *qp, struct rte_crypto_op *op,
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_ONLY_AUTH:
- process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_CIPHER_AUTH:
process_openssl_cipher_op(op, sess, msrc, mdst);
- process_openssl_auth_op(op, sess, mdst, mdst);
+ process_openssl_auth_op(qp, op, sess, mdst, mdst);
break;
case OPENSSL_CHAIN_AUTH_CIPHER:
- process_openssl_auth_op(op, sess, msrc, mdst);
+ process_openssl_auth_op(qp, op, sess, msrc, mdst);
process_openssl_cipher_op(op, sess, msrc, mdst);
break;
case OPENSSL_CHAIN_COMBINED:
@@ -1401,19 +1667,12 @@ openssl_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
static int
cryptodev_openssl_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct openssl_private *internals;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct openssl_private),
- init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
OPENSSL_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -1451,11 +1710,12 @@ init_error:
static int
cryptodev_openssl_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct openssl_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -1465,17 +1725,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_openssl_create(name, vdev, &init_params);
}
@@ -1484,17 +1734,18 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev)
static int
cryptodev_openssl_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD,
- "Closing OPENSSL crypto device %s on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
@@ -1502,10 +1753,13 @@ static struct rte_vdev_driver cryptodev_openssl_pmd_drv = {
.remove = cryptodev_openssl_remove
};
+static struct cryptodev_driver openssl_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD,
cryptodev_openssl_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_openssl_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
index 8cdd0b2e..c5722399 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c
@@ -362,6 +362,36 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* AES CCM */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
+ {.aead = {
+ .algo = RTE_CRYPTO_AEAD_AES_CCM,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 32,
+ .increment = 8
+ },
+ .digest_size = {
+ .min = 4,
+ .max = 16,
+ .increment = 2
+ },
+ .aad_size = {
+ .min = 0,
+ .max = 65535,
+ .increment = 1
+ },
+ .iv_size = {
+ .min = 7,
+ .max = 13,
+ .increment = 1
+ },
+ }, }
+ }, }
+ },
{ /* AES GMAC (AUTH) */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -427,6 +457,26 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = {
}, }
}, }
},
+ { /* DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* DES DOCSIS BPI */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -549,7 +599,7 @@ openssl_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"openssl_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h
index b7f74752..26bf862c 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd_private.h
+++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h
@@ -34,6 +34,7 @@
#define _OPENSSL_PMD_PRIVATE_H_
#include <openssl/evp.h>
+#include <openssl/hmac.h>
#include <openssl/des.h>
#define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl
@@ -59,6 +60,8 @@
#define OPENSSL_LOG_DBG(fmt, args...)
#endif
+/* Maximum length for digest (SHA-512 needs 64 bytes) */
+#define DIGEST_LENGTH_MAX 64
/** OPENSSL operation order mode enumerator */
enum openssl_chain_order {
@@ -103,6 +106,11 @@ struct openssl_qp {
/**< Session Mempool */
struct rte_cryptodev_stats stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
/** OPENSSL crypto private session structure */
@@ -164,7 +172,7 @@ struct openssl_session {
/**< pointer to EVP key */
const EVP_MD *evp_algo;
/**< pointer to EVP algorithm function */
- EVP_MD_CTX *ctx;
+ HMAC_CTX *ctx;
/**< pointer to EVP context structure */
} hmac;
};
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 7322ffe4..745d0197 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -43,6 +43,9 @@ CFLAGS += -O3
# external library include paths
CFLAGS += -I$(SRCDIR)/qat_adf
LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index ebe245f6..d03688c7 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -301,6 +301,26 @@ enum icp_qat_hw_cipher_convert {
#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ
+/* These defines describe position of the bit-fields
+ * in the flags byte in B0
+ */
+#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6
+#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3
+
+#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \
+ ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \
+ | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \
+ | ((q) - 1))
+
+#define ICP_QAT_HW_CCM_NQ_CONST 15
+#define ICP_QAT_HW_CCM_AAD_B0_LEN 16
+#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2
+#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \
+ ICP_QAT_HW_CCM_AAD_LEN_INFO)
+#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16
+#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4
+#define ICP_QAT_HW_CCM_NONCE_OFFSET 1
+
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_config cipher_config;
uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ];
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 2c8e03c0..802ba95d 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -104,8 +104,8 @@ struct qat_alg_buf_list {
struct qat_crypto_op_cookie {
struct qat_alg_buf_list qat_sgl_list_src;
struct qat_alg_buf_list qat_sgl_list_dst;
- phys_addr_t qat_sgl_src_phys_addr;
- phys_addr_t qat_sgl_dst_phys_addr;
+ rte_iova_t qat_sgl_src_phys_addr;
+ rte_iova_t qat_sgl_dst_phys_addr;
};
/* Common content descriptor */
@@ -124,7 +124,7 @@ struct qat_session {
void *bpi_ctx;
struct qat_alg_cd cd;
uint8_t *cd_cur_ptr;
- phys_addr_t cd_paddr;
+ rte_iova_t cd_paddr;
struct icp_qat_fw_la_bulk_req fw_req;
uint8_t aad_len;
struct qat_crypto_instance *inst;
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 2d16c9e2..db6c9a32 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -124,6 +124,9 @@ static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg)
case ICP_QAT_HW_AUTH_ALGO_NULL:
return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ,
QAT_HW_DEFAULT_ALIGNMENT);
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ,
+ QAT_HW_DEFAULT_ALIGNMENT);
case ICP_QAT_HW_AUTH_ALGO_DELIMITER:
/* return maximum state1 size in this case */
return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ,
@@ -876,6 +879,31 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc,
ICP_QAT_HW_AUTH_ALGO_NULL);
state2_size = ICP_QAT_HW_NULL_STATE2_SZ;
break;
+ case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC:
+ qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM;
+ state1_size = qat_hash_get_state1_size(
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC);
+ state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ +
+ ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ;
+
+ if (aad_length > 0) {
+ aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN +
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ auth_param->u2.aad_sz =
+ RTE_ALIGN_CEIL(aad_length,
+ ICP_QAT_HW_CCM_AAD_ALIGNMENT);
+ } else {
+ auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN;
+ }
+
+ cdesc->aad_len = aad_length;
+ hash->auth_counter.counter = 0;
+
+ hash_cd_ctrl->outer_prefix_sz = digestsize;
+ auth_param->hash_state_sz = digestsize;
+
+ memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen);
+ break;
case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9:
state1_size = qat_hash_get_state1_size(
ICP_QAT_HW_AUTH_ALGO_KASUMI_F9);
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 1f52cabf..a572967c 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -44,15 +44,12 @@
#include <rte_log.h>
#include <rte_debug.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_tailq.h>
-#include <rte_ether.h>
#include <rte_malloc.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
@@ -60,7 +57,10 @@
#include <rte_spinlock.h>
#include <rte_hexdump.h>
#include <rte_crypto_sym.h>
-#include <rte_cryptodev_pci.h>
+#include <rte_byteorder.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
#include <openssl/evp.h>
#include "qat_logs.h"
@@ -253,10 +253,21 @@ qat_get_cmd_id(const struct rte_crypto_sym_xform *xform)
/* AEAD */
if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ /* AES-GCM and AES-CCM works with different direction
+ * GCM first encrypts and generate hash where AES-CCM
+ * first generate hash and encrypts. Similar relation
+ * applies to decryption.
+ */
if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT)
- return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ else
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
else
- return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM)
+ return ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ else
+ return ICP_QAT_FW_LA_CMD_CIPHER_HASH;
}
if (xform->next == NULL)
@@ -516,7 +527,7 @@ qat_crypto_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
/* Set context descriptor physical address */
- session->cd_paddr = rte_mempool_virt2phy(NULL, session) +
+ session->cd_paddr = rte_mempool_virt2iova(session) +
offsetof(struct qat_session, cd);
session->min_qat_dev_gen = QAT_GEN1;
@@ -736,6 +747,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
struct qat_session *session)
{
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
+ enum rte_crypto_auth_operation crypto_operation;
/*
* Store AEAD IV parameters as cipher IV,
@@ -755,21 +767,33 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported AEAD alg %u",
- aead_xform->algo);
- return -ENOTSUP;
+ if (qat_alg_validate_aes_key(aead_xform->key.length,
+ &session->qat_cipher_alg) != 0) {
+ PMD_DRV_LOG(ERR, "Invalid AES key size");
+ return -EINVAL;
+ }
+ session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE;
+ session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC;
+ break;
default:
PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n",
aead_xform->algo);
return -EINVAL;
}
- if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) ||
+ (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT &&
+ aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) {
session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT;
/*
* It needs to create cipher desc content first,
* then authentication
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY;
+
if (qat_alg_aead_session_create_content_desc_cipher(session,
aead_xform->key.data,
aead_xform->key.length))
@@ -780,7 +804,7 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_GENERATE))
+ crypto_operation))
return -EINVAL;
} else {
session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT;
@@ -788,12 +812,16 @@ qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform,
* It needs to create authentication desc content first,
* then cipher
*/
+
+ crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ?
+ RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE;
+
if (qat_alg_aead_session_create_content_desc_auth(session,
aead_xform->key.data,
aead_xform->key.length,
aead_xform->aad_length,
aead_xform->digest_length,
- RTE_CRYPTO_AUTH_OP_VERIFY))
+ crypto_operation))
return -EINVAL;
if (qat_alg_aead_session_create_content_desc_cipher(session,
@@ -923,6 +951,14 @@ qat_bpicipher_postprocess(struct qat_session *ctx,
return sym_op->cipher.data.length - last_block_len;
}
+static inline void
+txq_write_tail(struct qat_qp *qp, struct qat_queue *q) {
+ WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, q->tail);
+ q->nb_pending_requests = 0;
+ q->csr_tail = q->tail;
+}
+
uint16_t
qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
@@ -946,10 +982,10 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
tail = queue->tail;
/* Find how many can actually fit on the ring */
- overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_ops)
- - queue->max_inflights;
+ tmp_qp->inflights16 += nb_ops;
+ overflow = tmp_qp->inflights16 - queue->max_inflights;
if (overflow > 0) {
- rte_atomic16_sub(&tmp_qp->inflights16, overflow);
+ tmp_qp->inflights16 -= overflow;
nb_ops_possible = nb_ops - overflow;
if (nb_ops_possible == 0)
return 0;
@@ -964,8 +1000,7 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
* This message cannot be enqueued,
* decrease number of ops that wasn't sent
*/
- rte_atomic16_sub(&tmp_qp->inflights16,
- nb_ops_possible - nb_ops_sent);
+ tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent;
if (nb_ops_sent == 0)
return 0;
goto kick_tail;
@@ -976,26 +1011,59 @@ qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops,
cur_op++;
}
kick_tail:
- WRITE_CSR_RING_TAIL(tmp_qp->mmap_bar_addr, queue->hw_bundle_number,
- queue->hw_queue_number, tail);
queue->tail = tail;
tmp_qp->stats.enqueued_count += nb_ops_sent;
+ queue->nb_pending_requests += nb_ops_sent;
+ if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH ||
+ queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) {
+ txq_write_tail(tmp_qp, queue);
+ }
return nb_ops_sent;
}
+static inline
+void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q)
+{
+ uint32_t old_head, new_head;
+ uint32_t max_head;
+
+ old_head = q->csr_head;
+ new_head = q->head;
+ max_head = qp->nb_descriptors * q->msg_size;
+
+ /* write out free descriptors */
+ void *cur_desc = (uint8_t *)q->base_addr + old_head;
+
+ if (new_head < old_head) {
+ memset(cur_desc, ADF_RING_EMPTY_SIG, max_head - old_head);
+ memset(q->base_addr, ADF_RING_EMPTY_SIG, new_head);
+ } else {
+ memset(cur_desc, ADF_RING_EMPTY_SIG, new_head - old_head);
+ }
+ q->nb_processed_responses = 0;
+ q->csr_head = new_head;
+
+ /* write current head to CSR */
+ WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number,
+ q->hw_queue_number, new_head);
+}
+
uint16_t
qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
uint16_t nb_ops)
{
- struct qat_queue *queue;
+ struct qat_queue *rx_queue, *tx_queue;
struct qat_qp *tmp_qp = (struct qat_qp *)qp;
uint32_t msg_counter = 0;
struct rte_crypto_op *rx_op;
struct icp_qat_fw_comn_resp *resp_msg;
+ uint32_t head;
- queue = &(tmp_qp->rx_q);
+ rx_queue = &(tmp_qp->rx_q);
+ tx_queue = &(tmp_qp->tx_q);
+ head = rx_queue->head;
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr + queue->head);
+ ((uint8_t *)rx_queue->base_addr + head);
while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG &&
msg_counter != nb_ops) {
@@ -1005,7 +1073,6 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX
rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg,
sizeof(struct icp_qat_fw_comn_resp));
-
#endif
if (ICP_QAT_FW_COMN_STATUS_FLAG_OK !=
ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(
@@ -1022,23 +1089,26 @@ qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops,
rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
}
- *(uint32_t *)resp_msg = ADF_RING_EMPTY_SIG;
- queue->head = adf_modulo(queue->head +
- queue->msg_size,
- ADF_RING_SIZE_MODULO(queue->queue_size));
+ head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo);
resp_msg = (struct icp_qat_fw_comn_resp *)
- ((uint8_t *)queue->base_addr +
- queue->head);
+ ((uint8_t *)rx_queue->base_addr + head);
*ops = rx_op;
ops++;
msg_counter++;
}
if (msg_counter > 0) {
- WRITE_CSR_RING_HEAD(tmp_qp->mmap_bar_addr,
- queue->hw_bundle_number,
- queue->hw_queue_number, queue->head);
- rte_atomic16_sub(&tmp_qp->inflights16, msg_counter);
+ rx_queue->head = head;
tmp_qp->stats.dequeued_count += msg_counter;
+ rx_queue->nb_processed_responses += msg_counter;
+ tmp_qp->inflights16 -= msg_counter;
+
+ if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH)
+ rxq_free_desc(tmp_qp, rx_queue);
+ }
+ /* also check if tail needs to be advanced */
+ if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH &&
+ tx_queue->tail != tx_queue->csr_tail) {
+ txq_write_tail(tmp_qp, tx_queue);
}
return msg_counter;
}
@@ -1049,7 +1119,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
{
int nr = 1;
- uint32_t buf_len = rte_pktmbuf_mtophys(buf) -
+ uint32_t buf_len = rte_pktmbuf_iova(buf) -
buff_start + rte_pktmbuf_data_len(buf);
list->bufers[0].addr = buff_start;
@@ -1073,7 +1143,7 @@ qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start,
list->bufers[nr].len = rte_pktmbuf_data_len(buf);
list->bufers[nr].resrvd = 0;
- list->bufers[nr].addr = rte_pktmbuf_mtophys(buf);
+ list->bufers[nr].addr = rte_pktmbuf_iova(buf);
buf_len += list->bufers[nr].len;
buf = buf->next;
@@ -1112,6 +1182,29 @@ set_cipher_iv(uint16_t iv_length, uint16_t iv_offset,
}
}
+/** Set IV for CCM is special case, 0th byte is set to q-1
+ * where q is padding of nonce in 16 byte block
+ */
+static inline void
+set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset,
+ struct icp_qat_fw_la_cipher_req_params *cipher_param,
+ struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz)
+{
+ rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+ *(uint8_t *)&cipher_param->u.cipher_IV_array[0] =
+ q - ICP_QAT_HW_CCM_NONCE_OFFSET;
+
+ if (aad_len_field_sz)
+ rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET],
+ rte_crypto_op_ctod_offset(op, uint8_t *,
+ iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET,
+ iv_length);
+}
+
static inline int
qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp)
@@ -1156,6 +1249,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
return -EINVAL;
}
+
+
qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg;
rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req));
qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op;
@@ -1164,9 +1259,13 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER ||
ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) {
- /* AES-GCM */
+ /* AES-GCM or AES-CCM */
if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
- ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64) {
+ ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 ||
+ (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128
+ && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE
+ && ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) {
do_aead = 1;
} else {
do_auth = 1;
@@ -1273,6 +1372,11 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_aead) {
+ /*
+ * This address may used for setting AAD physical pointer
+ * into IV offset from op
+ */
+ rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr;
if (ctx->qat_hash_alg ==
ICP_QAT_HW_AUTH_ALGO_GALOIS_128 ||
ctx->qat_hash_alg ==
@@ -1286,6 +1390,87 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS);
}
+ set_cipher_iv(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, qat_req);
+
+ } else if (ctx->qat_hash_alg ==
+ ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
+
+ /* In case of AES-CCM this may point to user selected memory
+ * or iv offset in cypto_op
+ */
+ uint8_t *aad_data = op->sym->aead.aad.data;
+ /* This is true AAD length, it not includes 18 bytes of
+ * preceding data
+ */
+ uint8_t aad_ccm_real_len = 0;
+
+ uint8_t aad_len_field_sz = 0;
+ uint32_t msg_len_be =
+ rte_bswap32(op->sym->aead.data.length);
+
+ if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) {
+ aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ aad_ccm_real_len = ctx->aad_len -
+ ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ICP_QAT_HW_CCM_AAD_LEN_INFO;
+ } else {
+ /*
+ * aad_len not greater than 18, so no actual aad data,
+ * then use IV after op for B0 block
+ */
+ aad_data = rte_crypto_op_ctod_offset(op, uint8_t *,
+ ctx->cipher_iv.offset);
+ aad_phys_addr_aead =
+ rte_crypto_op_ctophys_offset(op,
+ ctx->cipher_iv.offset);
+ }
+
+ uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length;
+
+ aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz,
+ ctx->digest_length, q);
+
+ if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET
+ + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE),
+ (uint8_t *)&msg_len_be,
+ ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE);
+ } else {
+ memcpy(aad_data + ctx->cipher_iv.length +
+ ICP_QAT_HW_CCM_NONCE_OFFSET,
+ (uint8_t *)&msg_len_be
+ + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE
+ - q), q);
+ }
+
+ if (aad_len_field_sz > 0) {
+ *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN]
+ = rte_bswap16(aad_ccm_real_len);
+
+ if ((aad_ccm_real_len + aad_len_field_sz)
+ % ICP_QAT_HW_CCM_AAD_B0_LEN) {
+ uint8_t pad_len = 0;
+ uint8_t pad_idx = 0;
+
+ pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN -
+ ((aad_ccm_real_len + aad_len_field_sz) %
+ ICP_QAT_HW_CCM_AAD_B0_LEN);
+ pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN +
+ aad_ccm_real_len + aad_len_field_sz;
+ memset(&aad_data[pad_idx],
+ 0, pad_len);
+ }
+
+ }
+
+ set_cipher_iv_ccm(ctx->cipher_iv.length,
+ ctx->cipher_iv.offset,
+ cipher_param, op, q,
+ aad_len_field_sz);
+
}
cipher_len = op->sym->aead.data.length;
@@ -1293,10 +1478,8 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
auth_len = op->sym->aead.data.length;
auth_ofs = op->sym->aead.data.offset;
- auth_param->u1.aad_adr = op->sym->aead.aad.phys_addr;
+ auth_param->u1.aad_adr = aad_phys_addr_aead;
auth_param->auth_res_addr = op->sym->aead.digest.phys_addr;
- set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset,
- cipher_param, op, qat_req);
min_ofs = op->sym->aead.data.offset;
}
@@ -1316,26 +1499,26 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
* so as not to overwrite data in dest buffer
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs);
dst_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_dst, min_ofs);
+ rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs);
} else {
/* In-place operation
* Start DMA at nearest aligned address below min_ofs
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src, min_ofs)
+ rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs)
& QAT_64_BTYE_ALIGN_MASK;
- if (unlikely((rte_pktmbuf_mtophys(op->sym->m_src) -
+ if (unlikely((rte_pktmbuf_iova(op->sym->m_src) -
rte_pktmbuf_headroom(op->sym->m_src))
> src_buf_start)) {
/* alignment has pushed addr ahead of start of mbuf
* so revert and take the performance hit
*/
src_buf_start =
- rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ rte_pktmbuf_iova_offset(op->sym->m_src,
min_ofs);
}
dst_buf_start = src_buf_start;
@@ -1343,7 +1526,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
if (do_cipher || do_aead) {
cipher_param->cipher_offset =
- (uint32_t)rte_pktmbuf_mtophys_offset(
+ (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, cipher_ofs) - src_buf_start;
cipher_param->cipher_length = cipher_len;
} else {
@@ -1352,7 +1535,7 @@ qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg,
}
if (do_auth || do_aead) {
- auth_param->auth_off = (uint32_t)rte_pktmbuf_mtophys_offset(
+ auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset(
op->sym->m_src, auth_ofs) - src_buf_start;
auth_param->auth_len = auth_len;
} else {
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index 3f35a00e..c64d7756 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -50,6 +50,13 @@
(((num) + (align) - 1) & ~((align) - 1))
#define QAT_64_BTYE_ALIGN_MASK (~0x3f)
+#define QAT_CSR_HEAD_WRITE_THRESH 32U
+/* number of requests to accumulate before writing head CSR */
+#define QAT_CSR_TAIL_WRITE_THRESH 32U
+/* number of requests to accumulate before writing tail CSR */
+#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
+/* number of inflights below which no tail write coalescing should occur */
+
struct qat_session;
enum qat_device_gen {
@@ -63,7 +70,7 @@ enum qat_device_gen {
struct qat_queue {
char memz_name[RTE_MEMZONE_NAMESIZE];
void *base_addr; /* Base address */
- phys_addr_t base_phys_addr; /* Queue physical address */
+ rte_iova_t base_phys_addr; /* Queue physical address */
uint32_t head; /* Shadow copy of the head */
uint32_t tail; /* Shadow copy of the tail */
uint32_t modulo;
@@ -73,11 +80,17 @@ struct qat_queue {
uint8_t hw_bundle_number;
uint8_t hw_queue_number;
/* HW queue aka ring offset on bundle */
+ uint32_t csr_head; /* last written head value */
+ uint32_t csr_tail; /* last written tail value */
+ uint16_t nb_processed_responses;
+ /* number of responses processed since last CSR head write */
+ uint16_t nb_pending_requests;
+ /* number of requests pending since last CSR tail write */
};
struct qat_qp {
void *mmap_bar_addr;
- rte_atomic16_t inflights16;
+ uint16_t inflights16;
struct qat_queue tx_q;
struct qat_queue rx_q;
struct rte_cryptodev_stats stats;
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 70120072..89ba27d4 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -48,9 +48,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 20, \
+ .min = 1, \
.max = 20, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -69,9 +69,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 28, \
+ .min = 1, \
.max = 28, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -90,9 +90,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 32, \
+ .min = 1, \
.max = 32, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -111,9 +111,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 48, \
+ .min = 1, \
.max = 48, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -132,9 +132,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 64, \
+ .min = 1, \
.max = 64, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -153,9 +153,9 @@
.increment = 1 \
}, \
.digest_size = { \
- .min = 16, \
+ .min = 1, \
.max = 16, \
- .increment = 0 \
+ .increment = 1 \
}, \
.iv_size = { 0 } \
}, } \
@@ -183,6 +183,36 @@
}, } \
}, } \
}, \
+ { /* AES CCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_CCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 4, \
+ .max = 16, \
+ .increment = 2 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 224, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 7, \
+ .max = 13, \
+ .increment = 1 \
+ }, \
+ }, } \
+ }, } \
+ }, \
{ /* AES GCM */ \
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
{.sym = { \
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 5048d214..ced3aa6a 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -37,6 +37,7 @@
#include <rte_memzone.h>
#include <rte_cryptodev_pmd.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_atomic.h>
#include <rte_prefetch.h>
@@ -122,14 +123,9 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
break;
default:
memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
-}
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(queue_name, queue_size,
- socket_id, 0, RTE_CACHE_LINE_SIZE, RTE_PGSIZE_2M);
-#else
+ }
return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
memzone_flags, queue_size);
-#endif
}
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
@@ -186,7 +182,7 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
RTE_CACHE_LINE_SIZE);
qp->mmap_bar_addr = pci_dev->mem_resource[0].addr;
- rte_atomic16_init(&qp->inflights16);
+ qp->inflights16 = 0;
if (qat_tx_queue_create(dev, &(qp->tx_q),
queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) {
@@ -232,14 +228,12 @@ int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
qp->op_cookies[i];
sql_cookie->qat_sgl_src_phys_addr =
- rte_mempool_virt2phy(qp->op_cookie_pool,
- sql_cookie) +
+ rte_mempool_virt2iova(sql_cookie) +
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_src);
sql_cookie->qat_sgl_dst_phys_addr =
- rte_mempool_virt2phy(qp->op_cookie_pool,
- sql_cookie) +
+ rte_mempool_virt2iova(sql_cookie) +
offsetof(struct qat_crypto_op_cookie,
qat_sgl_list_dst);
}
@@ -269,7 +263,7 @@ int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
}
/* Don't free memory if there are still responses to be processed */
- if (rte_atomic16_read(&(qp->inflights16)) == 0) {
+ if (qp->inflights16 == 0) {
qat_queue_delete(&(qp->tx_q));
qat_queue_delete(&(qp->rx_q));
} else {
@@ -377,7 +371,7 @@ qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue,
}
queue->base_addr = (char *)qp_mz->addr;
- queue->base_phys_addr = qp_mz->phys_addr;
+ queue->base_phys_addr = qp_mz->iova;
if (qat_qp_check_queue_alignment(queue->base_phys_addr,
queue_size_bytes)) {
PMD_DRV_LOG(ERR, "Invalid alignment on queue create "
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index 7d56fca4..4f8e4bfe 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2015-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <rte_bus_pci.h>
#include <rte_common.h>
#include <rte_dev.h>
#include <rte_malloc.h>
+#include <rte_pci.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_pci.h>
#include "qat_crypto.h"
#include "qat_logs.h"
@@ -97,15 +98,18 @@ static const struct rte_pci_id pci_id_qat_map[] = {
};
static int
-crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
+crypto_qat_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
{
+ struct rte_cryptodev *cryptodev;
struct qat_pmd_private *internals;
PMD_INIT_FUNC_TRACE();
- PMD_DRV_LOG(DEBUG, "Found crypto device at %02x:%02x.%x",
- RTE_DEV_TO_PCI(cryptodev->device)->addr.bus,
- RTE_DEV_TO_PCI(cryptodev->device)->addr.devid,
- RTE_DEV_TO_PCI(cryptodev->device)->addr.function);
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
cryptodev->driver_id = cryptodev_qat_driver_id;
cryptodev->dev_ops = &crypto_qat_ops;
@@ -119,8 +123,8 @@ crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
internals = cryptodev->data->dev_private;
- internals->max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS;
- switch (RTE_DEV_TO_PCI(cryptodev->device)->id.device_id) {
+ internals->max_nb_sessions = init_params->max_nb_sessions;
+ switch (pci_dev->id.device_id) {
case 0x0443:
internals->qat_dev_gen = QAT_GEN1;
internals->qat_dev_capabilities = qat_gen1_capabilities;
@@ -151,15 +155,43 @@ crypto_qat_dev_init(struct rte_cryptodev *cryptodev)
}
static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
- struct rte_pci_device *pci_dev)
+ struct rte_pci_device *pci_dev)
{
- return rte_cryptodev_pci_generic_probe(pci_dev,
- sizeof(struct qat_pmd_private), crypto_qat_dev_init);
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct qat_pmd_private),
+ .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_qat_create(name, pci_dev, &init_params);
}
static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_cryptodev_pci_generic_remove(pci_dev, NULL);
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ /* free crypto device */
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_pci_driver rte_qat_pmd = {
@@ -169,6 +201,9 @@ static struct rte_pci_driver rte_qat_pmd = {
.remove = crypto_qat_pci_remove
};
+static struct cryptodev_driver qat_crypto_drv;
+
RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(rte_qat_pmd, cryptodev_qat_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
+ cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/Makefile b/drivers/crypto/scheduler/Makefile
index b045410c..123b0f6d 100644
--- a/drivers/crypto/scheduler/Makefile
+++ b/drivers/crypto/scheduler/Makefile
@@ -36,6 +36,9 @@ LIB = librte_pmd_crypto_scheduler.a
# build flags
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev -lrte_kvargs -lrte_reorder
+LDLIBS += -lrte_bus_vdev
# library version
LIBABIVER := 1
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 400fc4f1..acdf6361 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -33,8 +33,7 @@
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include <rte_reorder.h>
@@ -45,7 +44,7 @@
uint8_t cryptodev_driver_id;
struct scheduler_init_params {
- struct rte_crypto_vdev_init_params def_p;
+ struct rte_cryptodev_pmd_init_params def_p;
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
@@ -107,21 +106,18 @@ cryptodev_scheduler_create(const char *name,
uint32_t i;
int ret;
- if (init_params->def_p.name[0] == '\0')
- snprintf(init_params->def_p.name,
- sizeof(init_params->def_p.name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->def_p.name,
- sizeof(struct scheduler_ctx),
- init_params->def_p.socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device,
+ &init_params->def_p);
if (dev == NULL) {
CS_LOG_ERR("driver %s: failed to create cryptodev vdev",
name);
return -EFAULT;
}
+ if (init_params->wcmask != 0)
+ RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
+ init_params->wcmask);
+
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -240,10 +236,7 @@ cryptodev_scheduler_remove(struct rte_vdev_device *vdev)
sched_ctx->slaves[i].dev_id);
}
- RTE_LOG(INFO, PMD, "Closing Crypto Scheduler device %s on numa "
- "socket %u\n", name, rte_socket_id());
-
- return 0;
+ return rte_cryptodev_pmd_destroy(dev);
}
/** Parse integer from integer argument */
@@ -304,7 +297,7 @@ static int
parse_name_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
- struct rte_crypto_vdev_init_params *params = extra_args;
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
CS_LOG_ERR("Invalid name %s, should be less than "
@@ -462,10 +455,11 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
{
struct scheduler_init_params init_params = {
.def_p = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ "",
+ sizeof(struct scheduler_ctx),
rte_socket_id(),
- ""
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
},
.nb_slaves = 0,
.mode = CDEV_SCHED_MODE_NOT_SET,
@@ -481,19 +475,6 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev)
scheduler_parse_init_params(&init_params,
rte_vdev_device_args(vdev));
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n",
- name,
- init_params.def_p.socket_id);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.def_p.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.def_p.max_nb_sessions);
- if (init_params.def_p.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.def_p.name);
- if (init_params.wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params.wcmask);
return cryptodev_scheduler_create(name,
vdev,
@@ -505,6 +486,8 @@ static struct rte_vdev_driver cryptodev_scheduler_pmd_drv = {
.remove = cryptodev_scheduler_remove
};
+static struct cryptodev_driver scheduler_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD,
cryptodev_scheduler_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
@@ -512,5 +495,6 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"max_nb_sessions=<int> "
"socket_id=<int> "
"slave=<name>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_scheduler_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
+ cryptodev_scheduler_pmd_drv,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index d3795346..d9b52352 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -31,13 +31,11 @@
*/
#include <string.h>
-#include <rte_config.h>
#include <rte_common.h>
#include <rte_malloc.h>
#include <rte_dev.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
#include <rte_reorder.h>
#include "scheduler_pmd_private.h"
@@ -347,7 +345,7 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev,
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
uint32_t max_nb_sessions = sched_ctx->nb_slaves ?
- UINT32_MAX : RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS;
+ UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS;
uint32_t i;
if (!dev_info)
diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile
index ecee80df..183c6ce9 100644
--- a/drivers/crypto/snow3g/Makefile
+++ b/drivers/crypto/snow3g/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_SNOW3G_PATH)
CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include
CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build
LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index dad45068..4cc9a94f 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -31,19 +31,16 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_snow3g_pmd_private.h"
#define SNOW3G_IV_LENGTH 16
-#define SNOW3G_DIGEST_LENGTH 4
#define SNOW3G_MAX_BURST 8
#define BYTE_LEN 8
@@ -263,7 +260,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_snow3g_hash_op(struct rte_crypto_op **ops,
+process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops,
struct snow3g_session *session,
uint8_t num_ops)
{
@@ -289,8 +286,7 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- SNOW3G_DIGEST_LENGTH);
+ dst = qp->temp_digest;
sso_snow3g_f9_1_buffer(&session->pKeySched_hash,
iv, src,
@@ -299,10 +295,6 @@ process_snow3g_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
SNOW3G_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- SNOW3G_DIGEST_LENGTH);
} else {
dst = ops[i]->sym->auth.digest.data;
@@ -346,16 +338,16 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
session, num_ops);
break;
case SNOW3G_OP_ONLY_AUTH:
- processed_ops = process_snow3g_hash_op(ops, session,
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
num_ops);
break;
case SNOW3G_OP_CIPHER_AUTH:
processed_ops = process_snow3g_cipher_op(ops, session,
num_ops);
- process_snow3g_hash_op(ops, session, processed_ops);
+ process_snow3g_hash_op(qp, ops, session, processed_ops);
break;
case SNOW3G_OP_AUTH_CIPHER:
- processed_ops = process_snow3g_hash_op(ops, session,
+ processed_ops = process_snow3g_hash_op(qp, ops, session,
num_ops);
process_snow3g_cipher_op(ops, session, processed_ops);
break;
@@ -403,15 +395,15 @@ process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session,
session);
break;
case SNOW3G_OP_ONLY_AUTH:
- processed_op = process_snow3g_hash_op(&op, session, 1);
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
break;
case SNOW3G_OP_CIPHER_AUTH:
processed_op = process_snow3g_cipher_op_bit(op, session);
if (processed_op == 1)
- process_snow3g_hash_op(&op, session, 1);
+ process_snow3g_hash_op(qp, &op, session, 1);
break;
case SNOW3G_OP_AUTH_CIPHER:
- processed_op = process_snow3g_hash_op(&op, session, 1);
+ processed_op = process_snow3g_hash_op(qp, &op, session, 1);
if (processed_op == 1)
process_snow3g_cipher_op_bit(op, session);
break;
@@ -565,19 +557,13 @@ static int cryptodev_snow3g_remove(struct rte_vdev_device *vdev);
static int
cryptodev_snow3g_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct snow3g_private *internals;
uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
-
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct snow3g_private), init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
SNOW3G_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -611,11 +597,12 @@ init_error:
static int
cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct snow3g_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -625,17 +612,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_snow3g_create(name, vdev, &init_params);
}
@@ -643,17 +620,18 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev)
static int
cryptodev_snow3g_remove(struct rte_vdev_device *vdev)
{
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing SNOW 3G crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
@@ -661,10 +639,13 @@ static struct rte_vdev_driver cryptodev_snow3g_pmd_drv = {
.remove = cryptodev_snow3g_remove
};
+static struct cryptodev_driver snow3g_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_snow3g_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
index ae9569c8..157a2de3 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c
@@ -184,7 +184,7 @@ snow3g_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"snow3g_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
index fba3cb86..7b9729f0 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h
@@ -58,6 +58,8 @@
#define SNOW3G_LOG_DBG(fmt, args...)
#endif
+#define SNOW3G_DIGEST_LENGTH 4
+
/** private data structure for each virtual SNOW 3G device */
struct snow3g_private {
unsigned max_nb_queue_pairs;
@@ -78,6 +80,11 @@ struct snow3g_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[SNOW3G_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum snow3g_operation {
diff --git a/drivers/crypto/zuc/Makefile b/drivers/crypto/zuc/Makefile
index f543b407..af77bc8a 100644
--- a/drivers/crypto/zuc/Makefile
+++ b/drivers/crypto/zuc/Makefile
@@ -54,6 +54,9 @@ CFLAGS += -I$(LIBSSO_ZUC_PATH)
CFLAGS += -I$(LIBSSO_ZUC_PATH)/include
CFLAGS += -I$(LIBSSO_ZUC_PATH)/build
LDLIBS += -L$(LIBSSO_ZUC_PATH)/build -lsso_zuc
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
# library source files
SRCS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += rte_zuc_pmd.c
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index b301711e..590224bf 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -31,18 +31,15 @@
*/
#include <rte_common.h>
-#include <rte_config.h>
#include <rte_hexdump.h>
#include <rte_cryptodev.h>
#include <rte_cryptodev_pmd.h>
-#include <rte_cryptodev_vdev.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_malloc.h>
#include <rte_cpuflags.h>
#include "rte_zuc_pmd_private.h"
-#define ZUC_DIGEST_LENGTH 4
#define ZUC_MAX_BURST 8
#define BYTE_LEN 8
@@ -258,7 +255,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
/** Generate/verify hash from mbufs with same hash key. */
static int
-process_zuc_hash_op(struct rte_crypto_op **ops,
+process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
struct zuc_session *session,
uint8_t num_ops)
{
@@ -285,8 +282,7 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
session->auth_iv_offset);
if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
- dst = (uint32_t *)rte_pktmbuf_append(ops[i]->sym->m_src,
- ZUC_DIGEST_LENGTH);
+ dst = (uint32_t *)qp->temp_digest;
sso_zuc_eia3_1_buffer(session->pKey_hash,
iv, src,
@@ -295,10 +291,6 @@ process_zuc_hash_op(struct rte_crypto_op **ops,
if (memcmp(dst, ops[i]->sym->auth.digest.data,
ZUC_DIGEST_LENGTH) != 0)
ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
-
- /* Trim area used for digest from mbuf. */
- rte_pktmbuf_trim(ops[i]->sym->m_src,
- ZUC_DIGEST_LENGTH);
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
@@ -327,16 +319,16 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
session, num_ops);
break;
case ZUC_OP_ONLY_AUTH:
- processed_ops = process_zuc_hash_op(ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, session,
num_ops);
break;
case ZUC_OP_CIPHER_AUTH:
processed_ops = process_zuc_cipher_op(ops, session,
num_ops);
- process_zuc_hash_op(ops, session, processed_ops);
+ process_zuc_hash_op(qp, ops, session, processed_ops);
break;
case ZUC_OP_AUTH_CIPHER:
- processed_ops = process_zuc_hash_op(ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, session,
num_ops);
process_zuc_cipher_op(ops, session, processed_ops);
break;
@@ -469,19 +461,14 @@ static int cryptodev_zuc_remove(struct rte_vdev_device *vdev);
static int
cryptodev_zuc_create(const char *name,
struct rte_vdev_device *vdev,
- struct rte_crypto_vdev_init_params *init_params)
+ struct rte_cryptodev_pmd_init_params *init_params)
{
struct rte_cryptodev *dev;
struct zuc_private *internals;
uint64_t cpu_flags = RTE_CRYPTODEV_FF_CPU_SSE;
- if (init_params->name[0] == '\0')
- snprintf(init_params->name, sizeof(init_params->name),
- "%s", name);
- dev = rte_cryptodev_vdev_pmd_init(init_params->name,
- sizeof(struct zuc_private), init_params->socket_id,
- vdev);
+ dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
ZUC_LOG_ERR("failed to create cryptodev vdev");
goto init_error;
@@ -515,11 +502,12 @@ init_error:
static int
cryptodev_zuc_probe(struct rte_vdev_device *vdev)
{
- struct rte_crypto_vdev_init_params init_params = {
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS,
+ struct rte_cryptodev_pmd_init_params init_params = {
+ "",
+ sizeof(struct zuc_private),
rte_socket_id(),
- {0}
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
};
const char *name;
const char *input_args;
@@ -529,17 +517,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
return -EINVAL;
input_args = rte_vdev_device_args(vdev);
- rte_cryptodev_vdev_parse_init_params(&init_params, input_args);
-
- RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
- init_params.socket_id);
- if (init_params.name[0] != '\0')
- RTE_LOG(INFO, PMD, " User defined name = %s\n",
- init_params.name);
- RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n",
- init_params.max_nb_queue_pairs);
- RTE_LOG(INFO, PMD, " Max number of sessions = %d\n",
- init_params.max_nb_sessions);
+ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
return cryptodev_zuc_create(name, vdev, &init_params);
}
@@ -547,17 +525,19 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev)
static int
cryptodev_zuc_remove(struct rte_vdev_device *vdev)
{
+
+ struct rte_cryptodev *cryptodev;
const char *name;
name = rte_vdev_device_name(vdev);
if (name == NULL)
return -EINVAL;
- RTE_LOG(INFO, PMD, "Closing ZUC crypto device %s"
- " on numa socket %u\n",
- name, rte_socket_id());
+ cryptodev = rte_cryptodev_pmd_get_named_dev(name);
+ if (cryptodev == NULL)
+ return -ENODEV;
- return 0;
+ return rte_cryptodev_pmd_destroy(cryptodev);
}
static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
@@ -565,9 +545,12 @@ static struct rte_vdev_driver cryptodev_zuc_pmd_drv = {
.remove = cryptodev_zuc_remove
};
+static struct cryptodev_driver zuc_crypto_drv;
+
RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(cryptodev_zuc_pmd_drv, cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv,
+ cryptodev_driver_id);
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
index 52c6aed8..243c0991 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c
@@ -184,7 +184,7 @@ zuc_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
"zuc_pmd_%u_qp_%u",
dev->data->dev_id, qp->id);
- if (n > sizeof(qp->name))
+ if (n >= sizeof(qp->name))
return -1;
return 0;
diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h
index b706e0aa..a57b8cd0 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd_private.h
+++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h
@@ -59,6 +59,8 @@
#endif
#define ZUC_IV_KEY_LENGTH 16
+#define ZUC_DIGEST_LENGTH 4
+
/** private data structure for each virtual ZUC device */
struct zuc_private {
unsigned max_nb_queue_pairs;
@@ -79,6 +81,11 @@ struct zuc_qp {
/**< Session Mempool */
struct rte_cryptodev_stats qp_stats;
/**< Queue pair statistics */
+ uint8_t temp_digest[ZUC_DIGEST_LENGTH];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
} __rte_cache_aligned;
enum zuc_operation {