From b63264c8342e6a1b6971c79550d2af2024b6a4de Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Tue, 14 Aug 2018 18:52:30 +0100 Subject: New upstream version 18.08 Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a Signed-off-by: Luca Boccassi --- drivers/crypto/Makefile | 9 +- drivers/crypto/aesni_gcm/Makefile | 10 +- drivers/crypto/aesni_gcm/aesni_gcm_ops.h | 3 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd.c | 53 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c | 54 +- drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h | 36 +- drivers/crypto/aesni_mb/Makefile | 10 +- drivers/crypto/aesni_mb/aesni_mb_ops.h | 31 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c | 178 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c | 116 +- drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h | 78 +- drivers/crypto/armv8/rte_armv8_pmd.c | 15 +- drivers/crypto/armv8/rte_armv8_pmd_ops.c | 47 +- drivers/crypto/armv8/rte_armv8_pmd_private.h | 2 - drivers/crypto/ccp/Makefile | 35 + drivers/crypto/ccp/ccp_crypto.c | 2951 ++++++++++++++++++++ drivers/crypto/ccp/ccp_crypto.h | 388 +++ drivers/crypto/ccp/ccp_dev.c | 810 ++++++ drivers/crypto/ccp/ccp_dev.h | 495 ++++ drivers/crypto/ccp/ccp_pci.c | 236 ++ drivers/crypto/ccp/ccp_pci.h | 27 + drivers/crypto/ccp/ccp_pmd_ops.c | 833 ++++++ drivers/crypto/ccp/ccp_pmd_private.h | 107 + drivers/crypto/ccp/meson.build | 21 + drivers/crypto/ccp/rte_ccp_pmd.c | 397 +++ drivers/crypto/ccp/rte_pmd_ccp_version.map | 4 + drivers/crypto/dpaa2_sec/Makefile | 5 - drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c | 564 ++-- drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h | 62 +- drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 28 +- drivers/crypto/dpaa2_sec/meson.build | 14 + drivers/crypto/dpaa_sec/Makefile | 5 - drivers/crypto/dpaa_sec/dpaa_sec.c | 412 ++- drivers/crypto/dpaa_sec/dpaa_sec.h | 36 +- drivers/crypto/dpaa_sec/dpaa_sec_log.h | 65 +- drivers/crypto/dpaa_sec/meson.build | 13 + drivers/crypto/kasumi/rte_kasumi_pmd.c | 50 +- drivers/crypto/kasumi/rte_kasumi_pmd_ops.c | 53 +- drivers/crypto/kasumi/rte_kasumi_pmd_private.h | 28 +- drivers/crypto/meson.build | 4 +- drivers/crypto/mrvl/Makefile | 67 - drivers/crypto/mrvl/rte_mrvl_compat.h | 51 - drivers/crypto/mrvl/rte_mrvl_pmd.c | 857 ------ drivers/crypto/mrvl/rte_mrvl_pmd_ops.c | 778 ------ drivers/crypto/mrvl/rte_mrvl_pmd_private.h | 123 - drivers/crypto/mrvl/rte_pmd_mrvl_version.map | 3 - drivers/crypto/mvsam/Makefile | 42 + drivers/crypto/mvsam/meson.build | 21 + drivers/crypto/mvsam/rte_mrvl_compat.h | 23 + drivers/crypto/mvsam/rte_mrvl_pmd.c | 937 +++++++ drivers/crypto/mvsam/rte_mrvl_pmd_ops.c | 722 +++++ drivers/crypto/mvsam/rte_mrvl_pmd_private.h | 95 + drivers/crypto/mvsam/rte_pmd_mvsam_version.map | 3 + drivers/crypto/null/null_crypto_pmd.c | 28 +- drivers/crypto/null/null_crypto_pmd_ops.c | 68 +- drivers/crypto/null/null_crypto_pmd_private.h | 24 +- drivers/crypto/openssl/compat.h | 108 + drivers/crypto/openssl/rte_openssl_pmd.c | 527 +++- drivers/crypto/openssl/rte_openssl_pmd_ops.c | 581 +++- drivers/crypto/openssl/rte_openssl_pmd_private.h | 55 +- drivers/crypto/qat/Makefile | 35 - drivers/crypto/qat/README | 7 + drivers/crypto/qat/meson.build | 24 +- .../qat/qat_adf/adf_transport_access_macros.h | 176 -- drivers/crypto/qat/qat_adf/icp_qat_fw.h | 316 --- drivers/crypto/qat/qat_adf/icp_qat_fw_la.h | 404 --- drivers/crypto/qat/qat_adf/icp_qat_hw.h | 329 --- drivers/crypto/qat/qat_adf/qat_algs.h | 169 -- drivers/crypto/qat/qat_adf/qat_algs_build_desc.c | 1059 ------- drivers/crypto/qat/qat_crypto.c | 1696 ----------- drivers/crypto/qat/qat_crypto.h | 150 - drivers/crypto/qat/qat_crypto_capabilities.h | 557 ---- drivers/crypto/qat/qat_logs.h | 49 - drivers/crypto/qat/qat_qp.c | 470 ---- drivers/crypto/qat/qat_sym.c | 569 ++++ drivers/crypto/qat/qat_sym.h | 174 ++ drivers/crypto/qat/qat_sym_capabilities.h | 557 ++++ drivers/crypto/qat/qat_sym_pmd.c | 331 +++ drivers/crypto/qat/qat_sym_pmd.h | 41 + drivers/crypto/qat/qat_sym_session.c | 1725 ++++++++++++ drivers/crypto/qat/qat_sym_session.h | 145 + drivers/crypto/qat/rte_pmd_qat_version.map | 3 - drivers/crypto/qat/rte_qat_cryptodev.c | 180 -- drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 101 +- drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 3 +- drivers/crypto/scheduler/scheduler_failover.c | 4 +- drivers/crypto/scheduler/scheduler_multicore.c | 60 +- .../crypto/scheduler/scheduler_pkt_size_distr.c | 18 +- drivers/crypto/scheduler/scheduler_pmd.c | 182 +- drivers/crypto/scheduler/scheduler_pmd_ops.c | 101 +- drivers/crypto/scheduler/scheduler_pmd_private.h | 26 +- drivers/crypto/scheduler/scheduler_roundrobin.c | 2 +- drivers/crypto/snow3g/rte_snow3g_pmd.c | 42 +- drivers/crypto/snow3g/rte_snow3g_pmd_ops.c | 51 +- drivers/crypto/snow3g/rte_snow3g_pmd_private.h | 30 +- drivers/crypto/virtio/Makefile | 35 + drivers/crypto/virtio/meson.build | 8 + .../virtio/rte_pmd_virtio_crypto_version.map | 3 + drivers/crypto/virtio/virtio_crypto_algs.h | 28 + drivers/crypto/virtio/virtio_crypto_capabilities.h | 51 + drivers/crypto/virtio/virtio_cryptodev.c | 1505 ++++++++++ drivers/crypto/virtio/virtio_cryptodev.h | 64 + drivers/crypto/virtio/virtio_logs.h | 91 + drivers/crypto/virtio/virtio_pci.c | 462 +++ drivers/crypto/virtio/virtio_pci.h | 253 ++ drivers/crypto/virtio/virtio_ring.h | 137 + drivers/crypto/virtio/virtio_rxtx.c | 527 ++++ drivers/crypto/virtio/virtqueue.c | 43 + drivers/crypto/virtio/virtqueue.h | 171 ++ drivers/crypto/zuc/rte_zuc_pmd.c | 152 +- drivers/crypto/zuc/rte_zuc_pmd_ops.c | 52 +- drivers/crypto/zuc/rte_zuc_pmd_private.h | 29 +- 112 files changed, 17814 insertions(+), 8981 deletions(-) create mode 100644 drivers/crypto/ccp/Makefile create mode 100644 drivers/crypto/ccp/ccp_crypto.c create mode 100644 drivers/crypto/ccp/ccp_crypto.h create mode 100644 drivers/crypto/ccp/ccp_dev.c create mode 100644 drivers/crypto/ccp/ccp_dev.h create mode 100644 drivers/crypto/ccp/ccp_pci.c create mode 100644 drivers/crypto/ccp/ccp_pci.h create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h create mode 100644 drivers/crypto/ccp/meson.build create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map create mode 100644 drivers/crypto/dpaa2_sec/meson.build create mode 100644 drivers/crypto/dpaa_sec/meson.build delete mode 100644 drivers/crypto/mrvl/Makefile delete mode 100644 drivers/crypto/mrvl/rte_mrvl_compat.h delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd.c delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd_ops.c delete mode 100644 drivers/crypto/mrvl/rte_mrvl_pmd_private.h delete mode 100644 drivers/crypto/mrvl/rte_pmd_mrvl_version.map create mode 100644 drivers/crypto/mvsam/Makefile create mode 100644 drivers/crypto/mvsam/meson.build create mode 100644 drivers/crypto/mvsam/rte_mrvl_compat.h create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd.c create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd_ops.c create mode 100644 drivers/crypto/mvsam/rte_mrvl_pmd_private.h create mode 100644 drivers/crypto/mvsam/rte_pmd_mvsam_version.map create mode 100644 drivers/crypto/openssl/compat.h delete mode 100644 drivers/crypto/qat/Makefile create mode 100644 drivers/crypto/qat/README delete mode 100644 drivers/crypto/qat/qat_adf/adf_transport_access_macros.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_fw.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_fw_la.h delete mode 100644 drivers/crypto/qat/qat_adf/icp_qat_hw.h delete mode 100644 drivers/crypto/qat/qat_adf/qat_algs.h delete mode 100644 drivers/crypto/qat/qat_adf/qat_algs_build_desc.c delete mode 100644 drivers/crypto/qat/qat_crypto.c delete mode 100644 drivers/crypto/qat/qat_crypto.h delete mode 100644 drivers/crypto/qat/qat_crypto_capabilities.h delete mode 100644 drivers/crypto/qat/qat_logs.h delete mode 100644 drivers/crypto/qat/qat_qp.c create mode 100644 drivers/crypto/qat/qat_sym.c create mode 100644 drivers/crypto/qat/qat_sym.h create mode 100644 drivers/crypto/qat/qat_sym_capabilities.h create mode 100644 drivers/crypto/qat/qat_sym_pmd.c create mode 100644 drivers/crypto/qat/qat_sym_pmd.h create mode 100644 drivers/crypto/qat/qat_sym_session.c create mode 100644 drivers/crypto/qat/qat_sym_session.h delete mode 100644 drivers/crypto/qat/rte_pmd_qat_version.map delete mode 100644 drivers/crypto/qat/rte_qat_cryptodev.c create mode 100644 drivers/crypto/virtio/Makefile create mode 100644 drivers/crypto/virtio/meson.build create mode 100644 drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map create mode 100644 drivers/crypto/virtio/virtio_crypto_algs.h create mode 100644 drivers/crypto/virtio/virtio_crypto_capabilities.h create mode 100644 drivers/crypto/virtio/virtio_cryptodev.c create mode 100644 drivers/crypto/virtio/virtio_cryptodev.h create mode 100644 drivers/crypto/virtio/virtio_logs.h create mode 100644 drivers/crypto/virtio/virtio_pci.c create mode 100644 drivers/crypto/virtio/virtio_pci.h create mode 100644 drivers/crypto/virtio/virtio_ring.h create mode 100644 drivers/crypto/virtio/virtio_rxtx.c create mode 100644 drivers/crypto/virtio/virtqueue.c create mode 100644 drivers/crypto/virtio/virtqueue.h (limited to 'drivers/crypto') diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 628bd142..c480cbd3 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -6,15 +6,20 @@ include $(RTE_SDK)/mk/rte.vars.mk DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8 +DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl -DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc -DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl +DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null +ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec +endif +ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y) DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec +endif +DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index d06c5444..0a5c1a87 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -3,12 +3,6 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifneq ($(MAKECMDGOALS),clean) -ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) -$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") -endif -endif - # library name LIB = librte_pmd_aesni_gcm.a @@ -23,9 +17,7 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_gcm_version.map # external library dependencies -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include -LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +LDLIBS += -lIPSec_MB LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev LDLIBS += -lrte_bus_vdev diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h index 59e504ee..45061669 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h @@ -9,8 +9,7 @@ #define LINUX #endif -#include -#include +#include /** Supported vector modes */ enum aesni_gcm_vector_mode { diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c index 83e54480..752e0cd6 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c @@ -31,8 +31,8 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { auth_xform = xform; if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_AES_GMAC) { - GCM_LOG_ERR("Only AES GMAC is supported as an " - "authentication only algorithm"); + AESNI_GCM_LOG(ERR, "Only AES GMAC is supported as an " + "authentication only algorithm"); return -ENOTSUP; } /* Set IV parameters */ @@ -54,7 +54,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, aead_xform = xform; if (aead_xform->aead.algo != RTE_CRYPTO_AEAD_AES_GCM) { - GCM_LOG_ERR("The only combined operation " + AESNI_GCM_LOG(ERR, "The only combined operation " "supported is AES GCM"); return -ENOTSUP; } @@ -75,7 +75,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, sess->aad_length = aead_xform->aead.aad_length; digest_length = aead_xform->aead.digest_length; } else { - GCM_LOG_ERR("Wrong xform type, has to be AEAD or authentication"); + AESNI_GCM_LOG(ERR, "Wrong xform type, has to be AEAD or authentication"); return -ENOTSUP; } @@ -83,7 +83,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, /* IV check */ if (sess->iv.length != 16 && sess->iv.length != 12 && sess->iv.length != 0) { - GCM_LOG_ERR("Wrong IV length"); + AESNI_GCM_LOG(ERR, "Wrong IV length"); return -EINVAL; } @@ -99,7 +99,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, sess->key = AESNI_GCM_KEY_256; break; default: - GCM_LOG_ERR("Invalid key length"); + AESNI_GCM_LOG(ERR, "Invalid key length"); return -EINVAL; } @@ -109,7 +109,7 @@ aesni_gcm_set_session_parameters(const struct aesni_gcm_ops *gcm_ops, if (digest_length != 16 && digest_length != 12 && digest_length != 8) { - GCM_LOG_ERR("digest"); + AESNI_GCM_LOG(ERR, "Invalid digest length"); return -EINVAL; } sess->digest_length = digest_length; @@ -127,7 +127,7 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(sym_op->session != NULL)) sess = (struct aesni_gcm_session *) - get_session_private_data( + get_sym_session_private_data( sym_op->session, cryptodev_driver_id); } else { @@ -149,8 +149,8 @@ aesni_gcm_get_session(struct aesni_gcm_qp *qp, struct rte_crypto_op *op) sess = NULL; } sym_op->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(sym_op->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(sym_op->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -352,7 +352,7 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp, session->op == AESNI_GMAC_OP_VERIFY) { uint8_t *digest; - uint8_t *tag = (uint8_t *)&qp->temp_digest; + uint8_t *tag = qp->temp_digest; if (session->op == AESNI_GMAC_OP_VERIFY) digest = op->sym->auth.digest.data; @@ -392,7 +392,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp, if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct aesni_gcm_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -464,13 +464,13 @@ aesni_gcm_create(const char *name, /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - GCM_LOG_ERR("AES instructions not supported by CPU"); + AESNI_GCM_LOG(ERR, "AES instructions not supported by CPU"); return -EFAULT; } - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - GCM_LOG_ERR("driver %s: create failed", init_params->name); + AESNI_GCM_LOG(ERR, "driver %s: create failed", + init_params->name); return -ENODEV; } @@ -492,7 +492,8 @@ aesni_gcm_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_AESNI | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; switch (vector_mode) { case RTE_AESNI_GCM_SSE: @@ -513,7 +514,13 @@ aesni_gcm_create(const char *name, internals->vector_mode = vector_mode; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", + imb_get_version_str()); +#else + AESNI_GCM_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n"); +#endif return 0; } @@ -525,8 +532,7 @@ aesni_gcm_probe(struct rte_vdev_device *vdev) "", sizeof(struct aesni_gcm_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -568,7 +574,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_GCM_PMD, aesni_gcm_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_GCM_PMD, cryptodev_aesni_gcm_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver, cryptodev_driver_id); + + +RTE_INIT(aesni_gcm_init_log) +{ + aesni_gcm_logtype_driver = rte_log_register("pmd.crypto.aesni_gcm"); +} diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c index 6f542137..b6b4dd02 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_ops.c @@ -143,7 +143,8 @@ aesni_gcm_pmd_info_get(struct rte_cryptodev *dev, dev_info->capabilities = aesni_gcm_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -183,12 +184,11 @@ aesni_gcm_pmd_qp_create_processed_pkts_ring(struct aesni_gcm_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - GCM_LOG_INFO("Reusing existing ring %s for processed" - " packets", qp->name); + AESNI_GCM_LOG(INFO, "Reusing existing ring %s for processed" + " packets", qp->name); return r; } - - GCM_LOG_ERR("Unable to reuse existing ring %s for processed" + AESNI_GCM_LOG(ERR, "Unable to reuse existing ring %s for processed" " packets", qp->name); return NULL; } @@ -242,22 +242,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -aesni_gcm_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -aesni_gcm_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) @@ -267,14 +251,14 @@ aesni_gcm_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the aesni gcm session structure */ static unsigned -aesni_gcm_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +aesni_gcm_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct aesni_gcm_session); } /** Configure a aesni gcm session from a crypto xform chain */ static int -aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +aesni_gcm_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -284,26 +268,26 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, struct aesni_gcm_private *internals = dev->data->dev_private; if (unlikely(sess == NULL)) { - GCM_LOG_ERR("invalid session struct"); + AESNI_GCM_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + AESNI_GCM_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = aesni_gcm_set_session_parameters(gcm_ops[internals->vector_mode], sess_private_data, xform); if (ret != 0) { - GCM_LOG_ERR("failed configure session parameters"); + AESNI_GCM_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -311,17 +295,17 @@ aesni_gcm_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_gcm_pmd_session_clear(struct rte_cryptodev *dev, +aesni_gcm_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct aesni_gcm_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -339,13 +323,11 @@ struct rte_cryptodev_ops aesni_gcm_pmd_ops = { .queue_pair_setup = aesni_gcm_pmd_qp_setup, .queue_pair_release = aesni_gcm_pmd_qp_release, - .queue_pair_start = aesni_gcm_pmd_qp_start, - .queue_pair_stop = aesni_gcm_pmd_qp_stop, .queue_pair_count = aesni_gcm_pmd_qp_count, - .session_get_size = aesni_gcm_pmd_session_get_size, - .session_configure = aesni_gcm_pmd_session_configure, - .session_clear = aesni_gcm_pmd_session_clear + .sym_session_get_size = aesni_gcm_pmd_sym_session_get_size, + .sym_session_configure = aesni_gcm_pmd_sym_session_configure, + .sym_session_clear = aesni_gcm_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_aesni_gcm_pmd_ops = &aesni_gcm_pmd_ops; diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h index 3d60583b..c13a12a5 100644 --- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h +++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd_private.h @@ -7,28 +7,24 @@ #include "aesni_gcm_ops.h" +/* + * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50, + * so if macro is not defined, it means that the version is 0.49. + */ +#if !defined(IMB_VERSION_NUM) +#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0) +#endif + #define CRYPTODEV_NAME_AESNI_GCM_PMD crypto_aesni_gcm /**< AES-NI GCM PMD device name */ -#define GCM_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_AESNI_MB_DEBUG -#define GCM_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) - -#define GCM_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_GCM_PMD), \ - __func__, __LINE__, ## args) -#else -#define GCM_LOG_INFO(fmt, args...) -#define GCM_LOG_DBG(fmt, args...) -#endif +/** AES-NI GCM PMD LOGTYPE DRIVER */ +int aesni_gcm_logtype_driver; +#define AESNI_GCM_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, aesni_gcm_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /* Maximum length for digest */ #define DIGEST_LENGTH_MAX 16 @@ -39,8 +35,6 @@ struct aesni_gcm_private { /**< Vector mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; struct aesni_gcm_qp { diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile index d9f8fb98..806a95eb 100644 --- a/drivers/crypto/aesni_mb/Makefile +++ b/drivers/crypto/aesni_mb/Makefile @@ -3,12 +3,6 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifneq ($(MAKECMDGOALS),clean) -ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) -$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") -endif -endif - # library name LIB = librte_pmd_aesni_mb.a @@ -23,9 +17,7 @@ LIBABIVER := 1 EXPORT_MAP := rte_pmd_aesni_mb_version.map # external library dependencies -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) -CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include -LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB +LDLIBS += -lIPSec_MB LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_cryptodev LDLIBS += -lrte_bus_vdev diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h index 4d596e85..5a1cba6c 100644 --- a/drivers/crypto/aesni_mb/aesni_mb_ops.h +++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h @@ -9,8 +9,7 @@ #define LINUX #endif -#include -#include +#include enum aesni_mb_vector_mode { RTE_AESNI_MB_NOT_SUPPORTED = 0, @@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t) (const void *key, void *enc_exp_keys, void *dec_exp_keys); typedef void (*aes_keyexp_256_t) (const void *key, void *enc_exp_keys, void *dec_exp_keys); - typedef void (*aes_xcbc_expand_key_t) (const void *key, void *exp_k1, void *k2, void *k3); +typedef void (*aes_cmac_sub_key_gen_t) + (const void *exp_key, void *k2, void *k3); +typedef void (*aes_cmac_keyexp_t) + (const void *key, void *keyexp); /** Multi-buffer library function pointer table */ struct aesni_mb_op_fns { @@ -78,9 +80,12 @@ struct aesni_mb_op_fns { /**< AES192 key expansions */ aes_keyexp_256_t aes256; /**< AES256 key expansions */ - aes_xcbc_expand_key_t aes_xcbc; - /**< AES XCBC key expansions */ + /**< AES XCBC key epansions */ + aes_cmac_sub_key_gen_t aes_cmac_subkey; + /**< AES CMAC subkey expansions */ + aes_cmac_keyexp_t aes_cmac_expkey; + /**< AES CMAC key expansions */ } keyexp; /**< Key expansion functions */ } aux; @@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_sse, aes_keyexp_192_sse, aes_keyexp_256_sse, - aes_xcbc_expand_key_sse + aes_xcbc_expand_key_sse, + aes_cmac_subkey_gen_sse, + aes_keyexp_128_enc_sse } } }, @@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx, aes_keyexp_192_avx, aes_keyexp_256_avx, - aes_xcbc_expand_key_avx + aes_xcbc_expand_key_avx, + aes_cmac_subkey_gen_avx, + aes_keyexp_128_enc_avx } } }, @@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx2, aes_keyexp_192_avx2, aes_keyexp_256_avx2, - aes_xcbc_expand_key_avx2 + aes_xcbc_expand_key_avx2, + aes_cmac_subkey_gen_avx2, + aes_keyexp_128_enc_avx2 } } }, @@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = { aes_keyexp_128_avx512, aes_keyexp_192_avx512, aes_keyexp_256_avx512, - aes_xcbc_expand_key_avx512 + aes_xcbc_expand_key_avx512, + aes_cmac_subkey_gen_avx512, + aes_keyexp_128_enc_avx512 } } } diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 636c6c37..93dc7a44 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -2,7 +2,7 @@ * Copyright(c) 2015-2017 Intel Corporation */ -#include +#include #include #include @@ -108,7 +108,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, } if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { - MB_LOG_ERR("Crypto xform struct not of type auth"); + AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth"); return -1; } @@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, return 0; } + if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) { + sess->auth.algo = AES_CMAC; + (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data, + sess->auth.cmac.expkey); + + (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey, + sess->auth.cmac.skey1, sess->auth.cmac.skey2); + return 0; + } + + switch (xform->auth.algo) { case RTE_CRYPTO_AUTH_MD5_HMAC: sess->auth.algo = MD5; @@ -150,7 +161,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops, hash_oneblock_fn = mb_ops->aux.one_block.sha512; break; default: - MB_LOG_ERR("Unsupported authentication algorithm selection"); + AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection"); return -ENOTSUP; } @@ -171,6 +182,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, const struct rte_crypto_sym_xform *xform) { uint8_t is_aes = 0; + uint8_t is_3DES = 0; aes_keyexp_t aes_keyexp_fn; if (xform == NULL) { @@ -179,7 +191,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, } if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { - MB_LOG_ERR("Crypto xform struct not of type cipher"); + AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher"); return -EINVAL; } @@ -192,7 +204,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, sess->cipher.direction = DECRYPT; break; default: - MB_LOG_ERR("Invalid cipher operation parameter"); + AESNI_MB_LOG(ERR, "Invalid cipher operation parameter"); return -EINVAL; } @@ -216,8 +228,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: sess->cipher.mode = DOCSIS_DES; break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + sess->cipher.mode = DES3; + is_3DES = 1; + break; default: - MB_LOG_ERR("Unsupported cipher mode parameter"); + AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter"); return -ENOTSUP; } @@ -241,7 +257,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, aes_keyexp_fn = mb_ops->aux.keyexp.aes256; break; default: - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } @@ -250,9 +266,52 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops, sess->cipher.expanded_aes_keys.encode, sess->cipher.expanded_aes_keys.decode); + } else if (is_3DES) { + uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0], + sess->cipher.exp_3des_keys.key[1], + sess->cipher.exp_3des_keys.key[2]}; + + switch (xform->cipher.key.length) { + case 24: + des_key_schedule(keys[0], xform->cipher.key.data); + des_key_schedule(keys[1], xform->cipher.key.data+8); + des_key_schedule(keys[2], xform->cipher.key.data+16); + + /* Initialize keys - 24 bytes: [K1-K2-K3] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2]; + break; + case 16: + des_key_schedule(keys[0], xform->cipher.key.data); + des_key_schedule(keys[1], xform->cipher.key.data+8); + + /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; + break; + case 8: + des_key_schedule(keys[0], xform->cipher.key.data); + + /* Initialize keys - 8 bytes: [K1 = K2 = K3] */ + sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0]; + sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0]; + break; + default: + AESNI_MB_LOG(ERR, "Invalid cipher key length"); + return -EINVAL; + } + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + sess->cipher.key_length_in_bytes = 24; +#else + sess->cipher.key_length_in_bytes = 8; +#endif } else { if (xform->cipher.key.length != 8) { - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } sess->cipher.key_length_in_bytes = 8; @@ -283,7 +342,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY; break; default: - MB_LOG_ERR("Invalid aead operation parameter"); + AESNI_MB_LOG(ERR, "Invalid aead operation parameter"); return -EINVAL; } @@ -293,7 +352,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, sess->auth.algo = AES_CCM; break; default: - MB_LOG_ERR("Unsupported aead mode parameter"); + AESNI_MB_LOG(ERR, "Unsupported aead mode parameter"); return -ENOTSUP; } @@ -309,7 +368,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops, aes_keyexp_fn = mb_ops->aux.keyexp.aes128; break; default: - MB_LOG_ERR("Invalid cipher key length"); + AESNI_MB_LOG(ERR, "Invalid cipher key length"); return -EINVAL; } @@ -338,16 +397,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, sess->chain_order = HASH_CIPHER; auth_xform = xform; cipher_xform = xform->next; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_CIPHER_HASH: sess->chain_order = CIPHER_HASH; auth_xform = xform->next; cipher_xform = xform; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_HASH_ONLY: sess->chain_order = HASH_CIPHER; auth_xform = xform; cipher_xform = NULL; + sess->auth.digest_len = xform->auth.digest_length; break; case AESNI_MB_OP_CIPHER_ONLY: /* @@ -366,18 +428,18 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, case AESNI_MB_OP_AEAD_CIPHER_HASH: sess->chain_order = CIPHER_HASH; sess->aead.aad_len = xform->aead.aad_length; - sess->aead.digest_len = xform->aead.digest_length; + sess->auth.digest_len = xform->aead.digest_length; aead_xform = xform; break; case AESNI_MB_OP_AEAD_HASH_CIPHER: sess->chain_order = HASH_CIPHER; sess->aead.aad_len = xform->aead.aad_length; - sess->aead.digest_len = xform->aead.digest_length; + sess->auth.digest_len = xform->aead.digest_length; aead_xform = xform; break; case AESNI_MB_OP_NOT_SUPPORTED: default: - MB_LOG_ERR("Unsupported operation chain order parameter"); + AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -386,14 +448,14 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported authentication parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters"); return ret; } ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess, cipher_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported cipher parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters"); return ret; } @@ -401,7 +463,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops, ret = aesni_mb_set_session_aead_parameters(mb_ops, sess, aead_xform); if (ret != 0) { - MB_LOG_ERR("Invalid/unsupported aead parameters"); + AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters"); return ret; } } @@ -444,7 +506,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct aesni_mb_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -466,8 +528,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -510,22 +572,39 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, job->cipher_mode = session->cipher.mode; job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes; - job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode; - job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode; + + if (job->cipher_mode == DES3) { + job->aes_enc_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + job->aes_dec_key_expanded = + session->cipher.exp_3des_keys.ks_ptr; + } else { + job->aes_enc_key_expanded = + session->cipher.expanded_aes_keys.encode; + job->aes_dec_key_expanded = + session->cipher.expanded_aes_keys.decode; + } + + /* Set authentication parameters */ job->hash_alg = session->auth.algo; if (job->hash_alg == AES_XCBC) { - job->_k1_expanded = session->auth.xcbc.k1_expanded; - job->_k2 = session->auth.xcbc.k2; - job->_k3 = session->auth.xcbc.k3; + job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded; + job->u.XCBC._k2 = session->auth.xcbc.k2; + job->u.XCBC._k3 = session->auth.xcbc.k3; } else if (job->hash_alg == AES_CCM) { job->u.CCM.aad = op->sym->aead.aad.data + 18; job->u.CCM.aad_len_in_bytes = session->aead.aad_len; + } else if (job->hash_alg == AES_CMAC) { + job->u.CMAC._key_expanded = session->auth.cmac.expkey; + job->u.CMAC._skey1 = session->auth.cmac.skey1; + job->u.CMAC._skey2 = session->auth.cmac.skey2; + } else { - job->hashed_auth_key_xor_ipad = session->auth.pads.inner; - job->hashed_auth_key_xor_opad = session->auth.pads.outer; + job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner; + job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer; } /* Mutable crypto operation parameters */ @@ -536,7 +615,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, char *odata = rte_pktmbuf_append(m_dst, rte_pktmbuf_data_len(op->sym->m_src)); if (odata == NULL) { - MB_LOG_ERR("failed to allocate space in destination " + AESNI_MB_LOG(ERR, "failed to allocate space in destination " "mbuf for source data"); op->status = RTE_CRYPTO_OP_STATUS_ERROR; return -1; @@ -568,11 +647,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp, * Multi-buffer library current only support returning a truncated * digest length as specified in the relevant IPsec RFCs */ - if (job->hash_alg != AES_CCM) + if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC) job->auth_tag_output_len_in_bytes = get_truncated_digest_byte_length(job->hash_alg); else - job->auth_tag_output_len_in_bytes = session->aead.digest_len; + job->auth_tag_output_len_in_bytes = session->auth.digest_len; /* Set IV parameters */ @@ -639,7 +718,7 @@ static inline struct rte_crypto_op * post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) { struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data; - struct aesni_mb_session *sess = get_session_private_data( + struct aesni_mb_session *sess = get_sym_session_private_data( op->sym->session, cryptodev_driver_id); @@ -663,7 +742,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job) if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct aesni_mb_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -702,7 +781,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job, if (processed_jobs == nb_ops) break; - job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr); } return processed_jobs; @@ -715,7 +794,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops, int processed_ops = 0; /* Flush the remaining jobs */ - JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr); + JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr); if (job) processed_ops += handle_completed_jobs(qp, job, @@ -760,14 +839,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, break; /* Get next free mb job struct from mb manager */ - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); if (unlikely(job == NULL)) { /* if no free mb job structs we need to flush mb_mgr */ processed_jobs += flush_mb_mgr(qp, &ops[processed_jobs], (nb_ops - processed_jobs) - 1); - job = (*qp->op_fns->job.get_next)(&qp->mb_mgr); + job = (*qp->op_fns->job.get_next)(qp->mb_mgr); } retval = set_mb_job_params(job, qp, op, &digest_idx); @@ -777,7 +856,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, } /* Submit job to multi-buffer for processing */ - job = (*qp->op_fns->job.submit)(&qp->mb_mgr); + job = (*qp->op_fns->job.submit)(qp->mb_mgr); /* * If submit returns a processed job then handle it, @@ -813,13 +892,13 @@ cryptodev_aesni_mb_create(const char *name, /* Check CPU for support for AES instruction set */ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) { - MB_LOG_ERR("AES instructions not supported by CPU"); + AESNI_MB_LOG(ERR, "AES instructions not supported by CPU"); return -EFAULT; } dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - MB_LOG_ERR("failed to create cryptodev vdev"); + AESNI_MB_LOG(ERR, "failed to create cryptodev vdev"); return -ENODEV; } @@ -866,7 +945,13 @@ cryptodev_aesni_mb_create(const char *name, internals->vector_mode = vector_mode; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; + +#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0) + AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n", + imb_get_version_str()); +#else + AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n"); +#endif return 0; } @@ -878,8 +963,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev) "", sizeof(struct aesni_mb_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name, *args; int retval; @@ -892,7 +976,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev) retval = rte_cryptodev_pmd_parse_input_args(&init_params, args); if (retval) { - MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n", + AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]", args); return -EINVAL; } @@ -928,8 +1012,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv, - cryptodev_aesni_mb_pmd_drv, + cryptodev_aesni_mb_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(aesni_mb_init_log) +{ + aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb"); +} diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 9d685a09..ab26e5ae 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -239,6 +239,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 8, + .max = 24, + .increment = 8 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, { /* DES DOCSIS BPI */ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, {.sym = { @@ -289,8 +309,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, - - + { /* AES CMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_CMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 12, + .max = 16, + .increment = 4 + }, + .iv_size = { 0 } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -368,7 +407,8 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev, dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = aesni_mb_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -383,6 +423,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) r = rte_ring_lookup(qp->name); if (r) rte_ring_free(r); + if (qp->mb_mgr) + free_mb_mgr(qp->mb_mgr); rte_free(qp); dev->data->queue_pairs[qp_id] = NULL; } @@ -422,12 +464,12 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp, r = rte_ring_lookup(ring_name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - MB_LOG_INFO("Reusing existing ring %s for processed ops", + AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops", ring_name); return r; } - MB_LOG_ERR("Unable to reuse existing ring %s for processed ops", + AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops", ring_name); return NULL; } @@ -444,6 +486,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, { struct aesni_mb_qp *qp = NULL; struct aesni_mb_private *internals = dev->data->dev_private; + int ret = -1; /* Free memory prior to re-allocation if needed. */ if (dev->data->queue_pairs[qp_id] != NULL) @@ -462,12 +505,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, goto qp_setup_cleanup; + qp->mb_mgr = alloc_mb_mgr(0); + if (qp->mb_mgr == NULL) { + ret = -ENOMEM; + goto qp_setup_cleanup; + } + qp->op_fns = &job_ops[internals->vector_mode]; qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp, "ingress", qp_conf->nb_descriptors, socket_id); - if (qp->ingress_queue == NULL) + if (qp->ingress_queue == NULL) { + ret = -1; goto qp_setup_cleanup; + } qp->sess_mp = session_pool; @@ -479,30 +530,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, "digest_mp_%u_%u", dev->data->dev_id, qp_id); /* Initialise multi-buffer manager */ - (*qp->op_fns->job.init_mgr)(&qp->mb_mgr); + (*qp->op_fns->job.init_mgr)(qp->mb_mgr); return 0; qp_setup_cleanup: - if (qp) + if (qp) { + if (qp->mb_mgr == NULL) + free_mb_mgr(qp->mb_mgr); rte_free(qp); + } - return -1; -} - -/** Start queue pair */ -static int -aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; + return ret; } /** Return the number of allocated queue pairs */ @@ -514,14 +552,14 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the aesni multi-buffer session structure */ static unsigned -aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct aesni_mb_session); } /** Configure a aesni multi-buffer session from a crypto xform chain */ static int -aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, +aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -531,27 +569,27 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, int ret; if (unlikely(sess == NULL)) { - MB_LOG_ERR("invalid session struct"); + AESNI_MB_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + AESNI_MB_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode], sess_private_data, xform); if (ret != 0) { - MB_LOG_ERR("failed configure session parameters"); + AESNI_MB_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -559,17 +597,17 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -aesni_mb_pmd_session_clear(struct rte_cryptodev *dev, +aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct aesni_mb_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -587,13 +625,11 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = { .queue_pair_setup = aesni_mb_pmd_qp_setup, .queue_pair_release = aesni_mb_pmd_qp_release, - .queue_pair_start = aesni_mb_pmd_qp_start, - .queue_pair_stop = aesni_mb_pmd_qp_stop, .queue_pair_count = aesni_mb_pmd_qp_count, - .session_get_size = aesni_mb_pmd_session_get_size, - .session_configure = aesni_mb_pmd_session_configure, - .session_clear = aesni_mb_pmd_session_clear + .sym_session_get_size = aesni_mb_pmd_sym_session_get_size, + .sym_session_configure = aesni_mb_pmd_sym_session_configure, + .sym_session_clear = aesni_mb_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops; diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h index 948e091c..70e9d18e 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h @@ -7,28 +7,26 @@ #include "aesni_mb_ops.h" +/* + * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50, + * so if macro is not defined, it means that the version is 0.49. + */ +#if !defined(IMB_VERSION_NUM) +#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c)) +#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0) +#endif + #define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb /**< AES-NI Multi buffer PMD device name */ -#define MB_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_AESNI_MB_DEBUG -#define MB_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_MB_PMD, \ - __func__, __LINE__, ## args) - -#define MB_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - CRYPTODEV_NAME_AESNI_MB_PMD, \ - __func__, __LINE__, ## args) -#else -#define MB_LOG_INFO(fmt, args...) -#define MB_LOG_DBG(fmt, args...) -#endif +/** AESNI_MB PMD LOGTYPE DRIVER */ +int aesni_mb_logtype_driver; + +#define AESNI_MB_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) + #define HMAC_IPAD_VALUE (0x36) #define HMAC_OPAD_VALUE (0x5C) @@ -66,8 +64,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = { [SHA_384] = 24, [SHA_512] = 32, [AES_XCBC] = 12, + [AES_CMAC] = 16, [AES_CCM] = 8, - [NULL_HASH] = 0 + [NULL_HASH] = 0 }; /** @@ -91,7 +90,8 @@ static const unsigned auth_digest_byte_lengths[] = { [SHA_384] = 48, [SHA_512] = 64, [AES_XCBC] = 16, - [NULL_HASH] = 0 + [AES_CMAC] = 16, + [NULL_HASH] = 0 }; /** @@ -122,8 +122,6 @@ struct aesni_mb_private { /**< CPU vector instruction set mode */ unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** AESNI Multi buffer queue pair */ @@ -134,7 +132,7 @@ struct aesni_mb_qp { /**< Unique Queue Pair Name */ const struct aesni_mb_op_fns *op_fns; /**< Vector mode dependent pointer table of the multi-buffer APIs */ - MB_MGR mb_mgr; + MB_MGR *mb_mgr; /**< Multi-buffer instance */ struct rte_ring *ingress_queue; /**< Ring for placing operations ready for processing */ @@ -171,12 +169,18 @@ struct aesni_mb_session { uint64_t key_length_in_bytes; - struct { - uint32_t encode[60] __rte_aligned(16); - /**< encode key */ - uint32_t decode[60] __rte_aligned(16); - /**< decode key */ - } expanded_aes_keys; + union { + struct { + uint32_t encode[60] __rte_aligned(16); + /**< encode key */ + uint32_t decode[60] __rte_aligned(16); + /**< decode key */ + } expanded_aes_keys; + struct { + const void *ks_ptr[3]; + uint64_t key[3][16]; + } exp_3des_keys; + }; /**< Expanded AES keys - Allocating space to * contain the maximum expanded key size which * is 240 bytes for 256 bit AES, calculate by: @@ -211,14 +215,24 @@ struct aesni_mb_session { uint8_t k3[16] __rte_aligned(16); /**< k3. */ } xcbc; + + struct { + uint32_t expkey[60] __rte_aligned(16); + /**< k1 (expanded key). */ + uint32_t skey1[4] __rte_aligned(16); + /**< k2. */ + uint32_t skey2[4] __rte_aligned(16); + /**< k3. */ + } cmac; /**< Expanded XCBC authentication keys */ }; + /** digest size */ + uint16_t digest_len; + } auth; struct { /** AAD data length */ uint16_t aad_len; - /** digest size */ - uint16_t digest_len; } aead; } __rte_cache_aligned; diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c index 59fffcf1..9d15fee5 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd.c +++ b/drivers/crypto/armv8/rte_armv8_pmd.c @@ -502,7 +502,7 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) /* get existing session */ if (likely(op->sym->session != NULL)) { sess = (struct armv8_crypto_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } @@ -526,8 +526,8 @@ get_session(struct armv8_crypto_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -654,7 +654,7 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op, if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(sess, 0, sizeof(struct armv8_crypto_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -779,7 +779,6 @@ cryptodev_armv8_crypto_create(const char *name, internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; @@ -800,8 +799,7 @@ cryptodev_armv8_crypto_init(struct rte_vdev_device *vdev) "", sizeof(struct armv8_crypto_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -848,7 +846,6 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ARMV8_PMD, armv8_crypto_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_ARMV8_PMD, cryptodev_armv8_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver, cryptodev_driver_id); diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c index 3817ad7b..ae03117e 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c +++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c @@ -27,9 +27,9 @@ static const struct rte_cryptodev_capabilities .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities .increment = 1 }, .digest_size = { - .min = 32, + .min = 1, .max = 32, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -154,7 +154,8 @@ armv8_crypto_pmd_info_get(struct rte_cryptodev *dev, dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = armv8_crypto_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -257,22 +258,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -armv8_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -armv8_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev) @@ -282,14 +267,14 @@ armv8_crypto_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the session structure */ static unsigned -armv8_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +armv8_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct armv8_crypto_session); } /** Configure the session from a crypto xform chain */ static int -armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, +armv8_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -317,7 +302,7 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -325,17 +310,17 @@ armv8_crypto_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -armv8_crypto_pmd_session_clear(struct rte_cryptodev *dev, +armv8_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct armv8_crypto_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -353,13 +338,11 @@ struct rte_cryptodev_ops armv8_crypto_pmd_ops = { .queue_pair_setup = armv8_crypto_pmd_qp_setup, .queue_pair_release = armv8_crypto_pmd_qp_release, - .queue_pair_start = armv8_crypto_pmd_qp_start, - .queue_pair_stop = armv8_crypto_pmd_qp_stop, .queue_pair_count = armv8_crypto_pmd_qp_count, - .session_get_size = armv8_crypto_pmd_session_get_size, - .session_configure = armv8_crypto_pmd_session_configure, - .session_clear = armv8_crypto_pmd_session_clear + .sym_session_get_size = armv8_crypto_pmd_sym_session_get_size, + .sym_session_configure = armv8_crypto_pmd_sym_session_configure, + .sym_session_clear = armv8_crypto_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_armv8_crypto_pmd_ops = &armv8_crypto_pmd_ops; diff --git a/drivers/crypto/armv8/rte_armv8_pmd_private.h b/drivers/crypto/armv8/rte_armv8_pmd_private.h index b8966e93..7feb021d 100644 --- a/drivers/crypto/armv8/rte_armv8_pmd_private.h +++ b/drivers/crypto/armv8/rte_armv8_pmd_private.h @@ -106,8 +106,6 @@ typedef void (*crypto_key_sched_t)(uint8_t *, const uint8_t *); struct armv8_crypto_private { unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; - /**< Max number of sessions */ }; /** ARMv8 crypto queue pair */ diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile new file mode 100644 index 00000000..f51d170f --- /dev/null +++ b/drivers/crypto/ccp/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +# library name +LIB = librte_pmd_ccp.a + +# build flags +CFLAGS += -O3 +CFLAGS += -I$(SRCDIR) +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# external library include paths +LDLIBS += -lcrypto +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_pci -lrte_bus_pci +LDLIBS += -lrte_bus_vdev +LDLIBS += -lrte_kvargs + +# versioning export map +EXPORT_MAP := rte_pmd_ccp_version.map + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c new file mode 100644 index 00000000..19ae9153 --- /dev/null +++ b/drivers/crypto/ccp/ccp_crypto.c @@ -0,0 +1,2951 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /*sub key apis*/ +#include /*sub key apis*/ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" +#include "ccp_crypto.h" +#include "ccp_pci.h" +#include "ccp_pmd_private.h" + +#include +#include +#include + +/* SHA initial context values */ +static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA1_H4, SHA1_H3, + SHA1_H2, SHA1_H1, + SHA1_H0, 0x0U, + 0x0U, 0x0U, +}; + +uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA224_H7, SHA224_H6, + SHA224_H5, SHA224_H4, + SHA224_H3, SHA224_H2, + SHA224_H1, SHA224_H0, +}; + +uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = { + SHA256_H7, SHA256_H6, + SHA256_H5, SHA256_H4, + SHA256_H3, SHA256_H2, + SHA256_H1, SHA256_H0, +}; + +uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = { + SHA384_H7, SHA384_H6, + SHA384_H5, SHA384_H4, + SHA384_H3, SHA384_H2, + SHA384_H1, SHA384_H0, +}; + +uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = { + SHA512_H7, SHA512_H6, + SHA512_H5, SHA512_H4, + SHA512_H3, SHA512_H2, + SHA512_H1, SHA512_H0, +}; + +#if defined(_MSC_VER) +#define SHA3_CONST(x) x +#else +#define SHA3_CONST(x) x##L +#endif + +/** 'Words' here refers to uint64_t */ +#define SHA3_KECCAK_SPONGE_WORDS \ + (((1600) / 8) / sizeof(uint64_t)) +typedef struct sha3_context_ { + uint64_t saved; + /** + * The portion of the input message that we + * didn't consume yet + */ + union { + uint64_t s[SHA3_KECCAK_SPONGE_WORDS]; + /* Keccak's state */ + uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8]; + /**total 200 ctx size**/ + }; + unsigned int byteIndex; + /** + * 0..7--the next byte after the set one + * (starts from 0; 0--none are buffered) + */ + unsigned int wordIndex; + /** + * 0..24--the next word to integrate input + * (starts from 0) + */ + unsigned int capacityWords; + /** + * the double size of the hash output in + * words (e.g. 16 for Keccak 512) + */ +} sha3_context; + +#ifndef SHA3_ROTL64 +#define SHA3_ROTL64(x, y) \ + (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y)))) +#endif + +static const uint64_t keccakf_rndc[24] = { + SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL), + SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL), + SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL), + SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL), + SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL), + SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL), + SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL), + SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL), + SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL), + SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL), + SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL), + SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL) +}; + +static const unsigned int keccakf_rotc[24] = { + 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62, + 18, 39, 61, 20, 44 +}; + +static const unsigned int keccakf_piln[24] = { + 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20, + 14, 22, 9, 6, 1 +}; + +static enum ccp_cmd_order +ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform) +{ + enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED; + + if (xform == NULL) + return res; + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (xform->next == NULL) + return CCP_CMD_AUTH; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return CCP_CMD_HASH_CIPHER; + } + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (xform->next == NULL) + return CCP_CMD_CIPHER; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return CCP_CMD_CIPHER_HASH; + } + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + return CCP_CMD_COMBINED; + return res; +} + +/* partial hash using openssl */ +static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) +{ + SHA_CTX ctx; + + if (!SHA1_Init(&ctx)) + return -EFAULT; + SHA1_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA224_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA256_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA384_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA512_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, + SHA512_DIGEST_LENGTH); + return 0; +} + +static void +keccakf(uint64_t s[25]) +{ + int i, j, round; + uint64_t t, bc[5]; +#define KECCAK_ROUNDS 24 + + for (round = 0; round < KECCAK_ROUNDS; round++) { + + /* Theta */ + for (i = 0; i < 5; i++) + bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^ + s[i + 20]; + + for (i = 0; i < 5; i++) { + t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1); + for (j = 0; j < 25; j += 5) + s[j + i] ^= t; + } + + /* Rho Pi */ + t = s[1]; + for (i = 0; i < 24; i++) { + j = keccakf_piln[i]; + bc[0] = s[j]; + s[j] = SHA3_ROTL64(t, keccakf_rotc[i]); + t = bc[0]; + } + + /* Chi */ + for (j = 0; j < 25; j += 5) { + for (i = 0; i < 5; i++) + bc[i] = s[j + i]; + for (i = 0; i < 5; i++) + s[j + i] ^= (~bc[(i + 1) % 5]) & + bc[(i + 2) % 5]; + } + + /* Iota */ + s[0] ^= keccakf_rndc[round]; + } +} + +static void +sha3_Init224(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init256(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init384(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t)); +} + +static void +sha3_Init512(void *priv) +{ + sha3_context *ctx = (sha3_context *) priv; + + memset(ctx, 0, sizeof(*ctx)); + ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t)); +} + + +/* This is simply the 'update' with the padding block. + * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80 + * bytes are always present, but they can be the same byte. + */ +static void +sha3_Update(void *priv, void const *bufIn, size_t len) +{ + sha3_context *ctx = (sha3_context *) priv; + unsigned int old_tail = (8 - ctx->byteIndex) & 7; + size_t words; + unsigned int tail; + size_t i; + const uint8_t *buf = bufIn; + + if (len < old_tail) { + while (len--) + ctx->saved |= (uint64_t) (*(buf++)) << + ((ctx->byteIndex++) * 8); + return; + } + + if (old_tail) { + len -= old_tail; + while (old_tail--) + ctx->saved |= (uint64_t) (*(buf++)) << + ((ctx->byteIndex++) * 8); + + ctx->s[ctx->wordIndex] ^= ctx->saved; + ctx->byteIndex = 0; + ctx->saved = 0; + if (++ctx->wordIndex == + (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) { + keccakf(ctx->s); + ctx->wordIndex = 0; + } + } + + words = len / sizeof(uint64_t); + tail = len - words * sizeof(uint64_t); + + for (i = 0; i < words; i++, buf += sizeof(uint64_t)) { + const uint64_t t = (uint64_t) (buf[0]) | + ((uint64_t) (buf[1]) << 8 * 1) | + ((uint64_t) (buf[2]) << 8 * 2) | + ((uint64_t) (buf[3]) << 8 * 3) | + ((uint64_t) (buf[4]) << 8 * 4) | + ((uint64_t) (buf[5]) << 8 * 5) | + ((uint64_t) (buf[6]) << 8 * 6) | + ((uint64_t) (buf[7]) << 8 * 7); + ctx->s[ctx->wordIndex] ^= t; + if (++ctx->wordIndex == + (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) { + keccakf(ctx->s); + ctx->wordIndex = 0; + } + } + + while (tail--) + ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8); +} + +int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init224(ctx); + sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init256(ctx); + sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init384(ctx); + sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out) +{ + sha3_context *ctx; + int i; + + ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0); + if (!ctx) { + CCP_LOG_ERR("sha3-ctx creation failed"); + return -ENOMEM; + } + sha3_Init512(ctx); + sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE); + for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++) + *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1]; + rte_free(ctx); + + return 0; +} + +static int generate_partial_hash(struct ccp_session *sess) +{ + + uint8_t ipad[sess->auth.block_size]; + uint8_t opad[sess->auth.block_size]; + uint8_t *ipad_t, *opad_t; + uint32_t *hash_value_be32, hash_temp32[8]; + uint64_t *hash_value_be64, hash_temp64[8]; + int i, count; + uint8_t *hash_value_sha3; + + opad_t = ipad_t = (uint8_t *)sess->auth.key; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute); + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute); + + /* considering key size is always equal to block size of algorithm */ + for (i = 0; i < sess->auth.block_size; i++) { + ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE); + opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE); + } + + switch (sess->auth.algo) { + case CCP_AUTH_ALGO_SHA1_HMAC: + count = SHA1_DIGEST_SIZE >> 2; + + if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha1(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA224_HMAC: + count = SHA256_DIGEST_SIZE >> 2; + + if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha224(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_224(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_224(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA256_HMAC: + count = SHA256_DIGEST_SIZE >> 2; + + if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + + hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha256(opad, (uint8_t *)hash_temp32)) + return -1; + for (i = 0; i < count; i++, hash_value_be32++) + *hash_value_be32 = hash_temp32[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_256_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_256(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_256(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA384_HMAC: + count = SHA512_DIGEST_SIZE >> 3; + + if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha384(opad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_384(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_384(opad, hash_value_sha3)) + return -1; + return 0; + case CCP_AUTH_ALGO_SHA512_HMAC: + count = SHA512_DIGEST_SIZE >> 3; + + if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + + hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha512(opad, (uint8_t *)hash_temp64)) + return -1; + for (i = 0; i < count; i++, hash_value_be64++) + *hash_value_be64 = hash_temp64[count - 1 - i]; + return 0; + case CCP_AUTH_ALGO_SHA3_512_HMAC: + hash_value_sha3 = sess->auth.pre_compute; + if (partial_hash_sha3_512(ipad, hash_value_sha3)) + return -1; + + hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute + + sess->auth.ctx_len); + if (partial_hash_sha3_512(opad, hash_value_sha3)) + return -1; + return 0; + default: + CCP_LOG_ERR("Invalid auth algo"); + return -1; + } +} + +/* prepare temporary keys K1 and K2 */ +static void prepare_key(unsigned char *k, unsigned char *l, int bl) +{ + int i; + /* Shift block to left, including carry */ + for (i = 0; i < bl; i++) { + k[i] = l[i] << 1; + if (i < bl - 1 && l[i + 1] & 0x80) + k[i] |= 1; + } + /* If MSB set fixup with R */ + if (l[0] & 0x80) + k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b; +} + +/* subkeys K1 and K2 generation for CMAC */ +static int +generate_cmac_subkeys(struct ccp_session *sess) +{ + const EVP_CIPHER *algo; + EVP_CIPHER_CTX *ctx; + unsigned char *ccp_ctx; + size_t i; + int dstlen, totlen; + unsigned char zero_iv[AES_BLOCK_SIZE] = {0}; + unsigned char dst[2 * AES_BLOCK_SIZE] = {0}; + unsigned char k1[AES_BLOCK_SIZE] = {0}; + unsigned char k2[AES_BLOCK_SIZE] = {0}; + + if (sess->auth.ut.aes_type == CCP_AES_TYPE_128) + algo = EVP_aes_128_cbc(); + else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192) + algo = EVP_aes_192_cbc(); + else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256) + algo = EVP_aes_256_cbc(); + else { + CCP_LOG_ERR("Invalid CMAC type length"); + return -1; + } + + ctx = EVP_CIPHER_CTX_new(); + if (!ctx) { + CCP_LOG_ERR("ctx creation failed"); + return -1; + } + if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key, + (unsigned char *)zero_iv) <= 0) + goto key_generate_err; + if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0) + goto key_generate_err; + if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv, + AES_BLOCK_SIZE) <= 0) + goto key_generate_err; + if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0) + goto key_generate_err; + + memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2); + + ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1); + prepare_key(k1, dst, AES_BLOCK_SIZE); + for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--) + *ccp_ctx = k1[i]; + + ccp_ctx = (unsigned char *)(sess->auth.pre_compute + + (2 * CCP_SB_BYTES) - 1); + prepare_key(k2, k1, AES_BLOCK_SIZE); + for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--) + *ccp_ctx = k2[i]; + + EVP_CIPHER_CTX_free(ctx); + + return 0; + +key_generate_err: + CCP_LOG_ERR("CMAC Init failed"); + return -1; +} + +/* configure session */ +static int +ccp_configure_session_cipher(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_cipher_xform *cipher_xform = NULL; + size_t i, j, x; + + cipher_xform = &xform->cipher; + + /* set cipher direction */ + if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; + else + sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; + + /* set cipher key */ + sess->cipher.key_length = cipher_xform->key.length; + rte_memcpy(sess->cipher.key, cipher_xform->key.data, + cipher_xform->key.length); + + /* set iv parameters */ + sess->iv.offset = cipher_xform->iv.offset; + sess->iv.length = cipher_xform->iv.length; + + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CTR: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR; + sess->cipher.um.aes_mode = CCP_AES_MODE_CTR; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_AES_ECB: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; + sess->cipher.um.aes_mode = CCP_AES_MODE_ECB; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_AES_CBC: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC; + sess->cipher.um.aes_mode = CCP_AES_MODE_CBC; + sess->cipher.engine = CCP_ENGINE_AES; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC; + sess->cipher.um.des_mode = CCP_DES_MODE_CBC; + sess->cipher.engine = CCP_ENGINE_3DES; + break; + default: + CCP_LOG_ERR("Unsupported cipher algo"); + return -1; + } + + + switch (sess->cipher.engine) { + case CCP_ENGINE_AES: + if (sess->cipher.key_length == 16) + sess->cipher.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->cipher.key_length == 32) + sess->cipher.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid cipher key length"); + return -1; + } + for (i = 0; i < sess->cipher.key_length ; i++) + sess->cipher.key_ccp[sess->cipher.key_length - i - 1] = + sess->cipher.key[i]; + break; + case CCP_ENGINE_3DES: + if (sess->cipher.key_length == 16) + sess->cipher.ut.des_type = CCP_DES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.des_type = CCP_DES_TYPE_192; + else { + CCP_LOG_ERR("Invalid cipher key length"); + return -1; + } + for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8) + for (i = 0; i < 8; i++) + sess->cipher.key_ccp[(8 + x) - i - 1] = + sess->cipher.key[i + x]; + break; + default: + CCP_LOG_ERR("Invalid CCP Engine"); + return -ENOTSUP; + } + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + return 0; +} + +static int +ccp_configure_session_auth(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_auth_xform *auth_xform = NULL; + size_t i; + + auth_xform = &xform->auth; + + sess->auth.digest_length = auth_xform->digest_length; + if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) + sess->auth.op = CCP_AUTH_OP_GENERATE; + else + sess->auth.op = CCP_AUTH_OP_VERIFY; + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_MD5_HMAC: + if (sess->auth_opt) { + sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + MD5_DIGEST_SIZE); + sess->auth.key_length = auth_xform->key.length; + sess->auth.block_size = MD5_BLOCK_SIZE; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else + return -1; /* HMAC MD5 not supported on CCP */ + break; + case RTE_CRYPTO_AUTH_SHA1: + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.algo = CCP_AUTH_ALGO_SHA1; + sess->auth.ut.sha_type = CCP_SHA_TYPE_1; + sess->auth.ctx = (void *)ccp_sha1_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA1_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA1_BLOCK_SIZE) + return -1; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC; + sess->auth.ut.sha_type = CCP_SHA_TYPE_1; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; + sess->auth.block_size = SHA1_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA224: + sess->auth.algo = CCP_AUTH_ALGO_SHA224; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_224; + sess->auth.ctx = (void *)ccp_sha224_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_224; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_224: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_224; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_224; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_224_HMAC: + if (auth_xform->key.length > SHA3_224_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_224; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE; + sess->auth.block_size = SHA3_224_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA256: + sess->auth.algo = CCP_AUTH_ALGO_SHA256; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_256; + sess->auth.ctx = (void *)ccp_sha256_init; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_256; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_256: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_256; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_256; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_256_HMAC: + if (auth_xform->key.length > SHA3_256_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_256; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE; + sess->auth.block_size = SHA3_256_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA384: + sess->auth.algo = CCP_AUTH_ALGO_SHA384; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_384; + sess->auth.ctx = (void *)ccp_sha384_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_384; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA384_DIGEST_SIZE); + sess->auth.block_size = SHA384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_384: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_384; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_384; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_384_HMAC: + if (auth_xform->key.length > SHA3_384_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_384; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE; + sess->auth.block_size = SHA3_384_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_SHA512: + sess->auth.algo = CCP_AUTH_ALGO_SHA512; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_512; + sess->auth.ctx = (void *)ccp_sha512_init; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + if (sess->auth_opt) { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + } else { + if (auth_xform->key.length > SHA512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA_TYPE_512; + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = ((CCP_SB_BYTES << 1) - + SHA512_DIGEST_SIZE); + sess->auth.block_size = SHA512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, + sess->auth.ctx_len << 1); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + } + break; + case RTE_CRYPTO_AUTH_SHA3_512: + sess->auth.algo = CCP_AUTH_ALGO_SHA3_512; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_512; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE; + break; + case RTE_CRYPTO_AUTH_SHA3_512_HMAC: + if (auth_xform->key.length > SHA3_512_BLOCK_SIZE) + return -1; + sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC; + sess->auth.engine = CCP_ENGINE_SHA; + sess->auth.ut.sha_type = CCP_SHA3_TYPE_512; + sess->auth.ctx_len = CCP_SHA3_CTX_SIZE; + sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE; + sess->auth.block_size = SHA3_512_BLOCK_SIZE; + sess->auth.key_length = auth_xform->key.length; + memset(sess->auth.key, 0, sess->auth.block_size); + memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len); + rte_memcpy(sess->auth.key, auth_xform->key.data, + auth_xform->key.length); + if (generate_partial_hash(sess)) + return -1; + break; + case RTE_CRYPTO_AUTH_AES_CMAC: + sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC; + sess->auth.engine = CCP_ENGINE_AES; + sess->auth.um.aes_mode = CCP_AES_MODE_CMAC; + sess->auth.key_length = auth_xform->key.length; + /* padding and hash result */ + sess->auth.ctx_len = CCP_SB_BYTES << 1; + sess->auth.offset = AES_BLOCK_SIZE; + sess->auth.block_size = AES_BLOCK_SIZE; + if (sess->auth.key_length == 16) + sess->auth.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->auth.key_length == 24) + sess->auth.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->auth.key_length == 32) + sess->auth.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid CMAC key length"); + return -1; + } + rte_memcpy(sess->auth.key, auth_xform->key.data, + sess->auth.key_length); + for (i = 0; i < sess->auth.key_length; i++) + sess->auth.key_ccp[sess->auth.key_length - i - 1] = + sess->auth.key[i]; + if (generate_cmac_subkeys(sess)) + return -1; + break; + default: + CCP_LOG_ERR("Unsupported hash algo"); + return -ENOTSUP; + } + return 0; +} + +static int +ccp_configure_session_aead(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_aead_xform *aead_xform = NULL; + size_t i; + + aead_xform = &xform->aead; + + sess->cipher.key_length = aead_xform->key.length; + rte_memcpy(sess->cipher.key, aead_xform->key.data, + aead_xform->key.length); + + if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) { + sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT; + sess->auth.op = CCP_AUTH_OP_GENERATE; + } else { + sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT; + sess->auth.op = CCP_AUTH_OP_VERIFY; + } + sess->aead_algo = aead_xform->algo; + sess->auth.aad_length = aead_xform->aad_length; + sess->auth.digest_length = aead_xform->digest_length; + + /* set iv parameters */ + sess->iv.offset = aead_xform->iv.offset; + sess->iv.length = aead_xform->iv.length; + + switch (aead_xform->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM; + sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR; + sess->cipher.engine = CCP_ENGINE_AES; + if (sess->cipher.key_length == 16) + sess->cipher.ut.aes_type = CCP_AES_TYPE_128; + else if (sess->cipher.key_length == 24) + sess->cipher.ut.aes_type = CCP_AES_TYPE_192; + else if (sess->cipher.key_length == 32) + sess->cipher.ut.aes_type = CCP_AES_TYPE_256; + else { + CCP_LOG_ERR("Invalid aead key length"); + return -1; + } + for (i = 0; i < sess->cipher.key_length; i++) + sess->cipher.key_ccp[sess->cipher.key_length - i - 1] = + sess->cipher.key[i]; + sess->auth.algo = CCP_AUTH_ALGO_AES_GCM; + sess->auth.engine = CCP_ENGINE_AES; + sess->auth.um.aes_mode = CCP_AES_MODE_GHASH; + sess->auth.ctx_len = CCP_SB_BYTES; + sess->auth.offset = 0; + sess->auth.block_size = AES_BLOCK_SIZE; + sess->cmd_id = CCP_CMD_COMBINED; + break; + default: + CCP_LOG_ERR("Unsupported aead algo"); + return -ENOTSUP; + } + sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce); + sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp); + return 0; +} + +int +ccp_set_session_parameters(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform, + struct ccp_private *internals) +{ + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *aead_xform = NULL; + int ret = 0; + + sess->auth_opt = internals->auth_opt; + sess->cmd_id = ccp_get_cmd_id(xform); + + switch (sess->cmd_id) { + case CCP_CMD_CIPHER: + cipher_xform = xform; + break; + case CCP_CMD_AUTH: + auth_xform = xform; + break; + case CCP_CMD_CIPHER_HASH: + cipher_xform = xform; + auth_xform = xform->next; + break; + case CCP_CMD_HASH_CIPHER: + auth_xform = xform; + cipher_xform = xform->next; + break; + case CCP_CMD_COMBINED: + aead_xform = xform; + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + return -1; + } + + /* Default IV length = 0 */ + sess->iv.length = 0; + if (cipher_xform) { + ret = ccp_configure_session_cipher(sess, cipher_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported cipher parameters"); + return ret; + } + } + if (auth_xform) { + ret = ccp_configure_session_auth(sess, auth_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported auth parameters"); + return ret; + } + } + if (aead_xform) { + ret = ccp_configure_session_aead(sess, aead_xform); + if (ret != 0) { + CCP_LOG_ERR("Invalid/unsupported aead parameters"); + return ret; + } + } + return ret; +} + +/* calculate CCP descriptors requirement */ +static inline int +ccp_cipher_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->cipher.algo) { + case CCP_CIPHER_ALGO_AES_CBC: + count = 2; + /**< op + passthrough for iv */ + break; + case CCP_CIPHER_ALGO_AES_ECB: + count = 1; + /**cipher.algo); + } + return count; +} + +static inline int +ccp_auth_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_SHA1: + case CCP_AUTH_ALGO_SHA224: + case CCP_AUTH_ALGO_SHA256: + case CCP_AUTH_ALGO_SHA384: + case CCP_AUTH_ALGO_SHA512: + count = 3; + /**< op + lsb passthrough cpy to/from*/ + break; + case CCP_AUTH_ALGO_MD5_HMAC: + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + case CCP_AUTH_ALGO_SHA224_HMAC: + case CCP_AUTH_ALGO_SHA256_HMAC: + if (session->auth_opt == 0) + count = 6; + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + case CCP_AUTH_ALGO_SHA512_HMAC: + /** + * 1. Load PHash1 = H(k ^ ipad); to LSB + * 2. generate IHash = H(hash on meassage with PHash1 + * as init values); + * 3. Retrieve IHash 2 slots for 384/512 + * 4. Load Phash2 = H(k ^ opad); to LSB + * 5. generate FHash = H(hash on Ihash with Phash2 + * as init value); + * 6. Retrieve HMAC output from LSB to host memory + */ + if (session->auth_opt == 0) + count = 7; + break; + case CCP_AUTH_ALGO_SHA3_224: + case CCP_AUTH_ALGO_SHA3_256: + case CCP_AUTH_ALGO_SHA3_384: + case CCP_AUTH_ALGO_SHA3_512: + count = 1; + /**< only op ctx and dst in host memory*/ + break; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + case CCP_AUTH_ALGO_SHA3_256_HMAC: + count = 3; + break; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + case CCP_AUTH_ALGO_SHA3_512_HMAC: + count = 4; + /** + * 1. Op to Perform Ihash + * 2. Retrieve result from LSB to host memory + * 3. Perform final hash + */ + break; + case CCP_AUTH_ALGO_AES_CMAC: + count = 4; + /** + * op + * extra descriptor in padding case + * (k1/k2(255:128) with iv(127:0)) + * Retrieve result + */ + break; + default: + CCP_LOG_ERR("Unsupported auth algo %d", + session->auth.algo); + } + + return count; +} + +static int +ccp_aead_slot(struct ccp_session *session) +{ + int count = 0; + + switch (session->aead_algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + break; + default: + CCP_LOG_ERR("Unsupported aead algo %d", + session->aead_algo); + } + switch (session->auth.algo) { + case CCP_AUTH_ALGO_AES_GCM: + count = 5; + /** + * 1. Passthru iv + * 2. Hash AAD + * 3. GCTR + * 4. Reload passthru + * 5. Hash Final tag + */ + break; + default: + CCP_LOG_ERR("Unsupported combined auth ALGO %d", + session->auth.algo); + } + return count; +} + +int +ccp_compute_slot_count(struct ccp_session *session) +{ + int count = 0; + + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + count = ccp_cipher_slot(session); + break; + case CCP_CMD_AUTH: + count = ccp_auth_slot(session); + break; + case CCP_CMD_CIPHER_HASH: + case CCP_CMD_HASH_CIPHER: + count = ccp_cipher_slot(session); + count += ccp_auth_slot(session); + break; + case CCP_CMD_COMBINED: + count = ccp_aead_slot(session); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + + } + + return count; +} + +static uint8_t +algo_select(int sessalgo, + const EVP_MD **algo) +{ + int res = 0; + + switch (sessalgo) { + case CCP_AUTH_ALGO_MD5_HMAC: + *algo = EVP_md5(); + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + *algo = EVP_sha1(); + break; + case CCP_AUTH_ALGO_SHA224_HMAC: + *algo = EVP_sha224(); + break; + case CCP_AUTH_ALGO_SHA256_HMAC: + *algo = EVP_sha256(); + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + *algo = EVP_sha384(); + break; + case CCP_AUTH_ALGO_SHA512_HMAC: + *algo = EVP_sha512(); + break; + default: + res = -EINVAL; + break; + } + return res; +} + +static int +process_cpu_auth_hmac(uint8_t *src, uint8_t *dst, + __rte_unused uint8_t *iv, + EVP_PKEY *pkey, + int srclen, + EVP_MD_CTX *ctx, + const EVP_MD *algo, + uint16_t d_len) +{ + size_t dstlen; + unsigned char temp_dst[64]; + + if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0) + goto process_auth_err; + + if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0) + goto process_auth_err; + + if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0) + goto process_auth_err; + + memcpy(dst, temp_dst, d_len); + return 0; +process_auth_err: + CCP_LOG_ERR("Process cpu auth failed"); + return -EINVAL; +} + +static int cpu_crypto_auth(struct ccp_qp *qp, + struct rte_crypto_op *op, + struct ccp_session *sess, + EVP_MD_CTX *ctx) +{ + uint8_t *src, *dst; + int srclen, status; + struct rte_mbuf *mbuf_src, *mbuf_dst; + const EVP_MD *algo = NULL; + EVP_PKEY *pkey; + + algo_select(sess->auth.algo, &algo); + pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key, + sess->auth.key_length); + mbuf_src = op->sym->m_src; + mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; + srclen = op->sym->auth.data.length; + src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *, + op->sym->auth.data.offset); + + if (sess->auth.op == CCP_AUTH_OP_VERIFY) { + dst = qp->temp_digest; + } else { + dst = op->sym->auth.digest.data; + if (dst == NULL) { + dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *, + op->sym->auth.data.offset + + sess->auth.digest_length); + } + } + status = process_cpu_auth_hmac(src, dst, NULL, + pkey, srclen, + ctx, + algo, + sess->auth.digest_length); + if (status) { + op->status = RTE_CRYPTO_OP_STATUS_ERROR; + return status; + } + + if (sess->auth.op == CCP_AUTH_OP_VERIFY) { + if (memcmp(dst, op->sym->auth.digest.data, + sess->auth.digest_length) != 0) { + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + } else { + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + EVP_PKEY_free(pkey); + return 0; +} + +static void +ccp_perform_passthru(struct ccp_passthru *pst, + struct ccp_queue *cmd_q) +{ + struct ccp_desc *desc; + union ccp_function function; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 0; + CCP_CMD_EOM(desc) = 0; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = pst->byte_swap; + CCP_PT_BITWISE(&function) = pst->bit_mod; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = pst->len; + + if (pst->dir) { + CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); + CCP_CMD_DST_HI(desc) = 0; + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB; + + if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_key; + } else { + + CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr); + CCP_CMD_SRC_HI(desc) = 0; + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB; + + CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + } + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; +} + +static int +ccp_perform_hmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint32_t tail; + phys_addr_t src_addr, dest_addr, dest_addr_t; + struct ccp_passthru pst; + uint64_t auth_msg_bits; + void *append_ptr; + uint8_t *addr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + addr = session->auth.pre_compute; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + dest_addr_t = dest_addr; + + /** Load PHash1 to LSB*/ + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**sha engine command descriptor for IntermediateHash*/ + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + auth_msg_bits = (op->sym->auth.data.length + + session->auth.block_size) * 8; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Intermediate Hash value retrieve */ + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) { + + pst.src_addr = + (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t + CCP_SB_BYTES; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } else { + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } + + /** Load PHash2 to LSB*/ + addr += session->auth.ctx_len; + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**sha engine command descriptor for FinalHash*/ + dest_addr_t += session->auth.offset; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = (session->auth.ctx_len - + session->auth.offset); + auth_msg_bits = (session->auth.block_size + + session->auth.ctx_len - + session->auth.offset) * 8; + + CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Retrieve hmac output */ + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + else + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; + +} + +static int +ccp_perform_sha(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint32_t tail; + phys_addr_t src_addr, dest_addr; + struct ccp_passthru pst; + void *append_ptr; + uint64_t auth_msg_bits; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + + append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr); + + /** Passthru sha context*/ + + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) + session->auth.ctx); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.len = session->auth.ctx_len; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /**prepare sha command descriptor*/ + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + auth_msg_bits = op->sym->auth.data.length * 8; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha; + CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits); + CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits); + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Hash value retrieve */ + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr; + pst.len = session->auth.ctx_len; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + else + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; + +} + +static int +ccp_perform_sha3_hmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + struct ccp_passthru pst; + union ccp_function function; + struct ccp_desc *desc; + uint8_t *append_ptr; + uint32_t tail; + phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + if (!append_ptr) { + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + dest_addr_t = dest_addr + (session->auth.ctx_len / 2); + ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void + *)session->auth.pre_compute); + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /*desc1 for SHA3-Ihash operation */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES); + CCP_CMD_DST_HI(desc) = 0; + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* Intermediate Hash value retrieve */ + if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) || + (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) { + + pst.src_addr = + (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t + CCP_SB_BYTES; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + } else { + pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES); + pst.dest_addr = dest_addr_t; + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + } + + /**sha engine command descriptor for FinalHash*/ + ctx_paddr += CCP_SHA3_CTX_SIZE; + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) { + dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE); + CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE; + } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) { + CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE; + } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) { + dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE); + CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE; + } else { + CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE; + } + + CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr; + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_sha3(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_desc *desc; + uint8_t *ctx_addr, *append_ptr; + uint32_t tail; + phys_addr_t src_addr, dest_addr, ctx_paddr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + if (!append_ptr) { + CCP_LOG_ERR("CCP MBUF append failed\n"); + return -1; + } + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + ctx_addr = session->auth.sha3_ctx; + ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for SHA3 operation */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + function.raw = 0; + CCP_SHA_TYPE(&function) = session->auth.ut.sha_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr); + CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes_cmac(struct rte_crypto_op *op, + struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint8_t *src_tb, *append_ptr, *ctx_addr; + phys_addr_t src_addr, dest_addr, key_addr; + int length, non_align_len; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + key_addr = rte_mem_virt2phy(session->auth.key_ccp); + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->auth.data.offset); + append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src, + session->auth.ctx_len); + dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr); + + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT; + CCP_AES_MODE(&function) = session->auth.um.aes_mode; + CCP_AES_TYPE(&function) = session->auth.ut.aes_type; + + if (op->sym->auth.data.length % session->auth.block_size == 0) { + + ctx_addr = session->auth.pre_compute; + memset(ctx_addr, 0, AES_BLOCK_SIZE); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for aes-cmac command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->auth.data.length; + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + tail = + (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + } else { + ctx_addr = session->auth.pre_compute + CCP_SB_BYTES; + memset(ctx_addr, 0, AES_BLOCK_SIZE); + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + length = (op->sym->auth.data.length / AES_BLOCK_SIZE); + length *= AES_BLOCK_SIZE; + non_align_len = op->sym->auth.data.length - length; + /* prepare desc for aes-cmac command */ + /*Command 1*/ + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = length; + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + /*Command 2*/ + append_ptr = append_ptr + CCP_SB_BYTES; + memset(append_ptr, 0, AES_BLOCK_SIZE); + src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src, + uint8_t *, + op->sym->auth.data.offset + + length); + rte_memcpy(append_ptr, src_tb, non_align_len); + append_ptr[non_align_len] = CMAC_PAD_VALUE; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + CCP_CMD_LEN(desc) = AES_BLOCK_SIZE; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES)); + CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + tail = + (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + } + /* Retrieve result */ + pst.dest_addr = dest_addr; + pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 0; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + struct ccp_session *session; + union ccp_function function; + uint8_t *lsb_buf; + struct ccp_passthru pst = {0}; + struct ccp_desc *desc; + phys_addr_t src_addr, dest_addr, key_addr; + uint8_t *iv; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + function.raw = 0; + + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) { + if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) { + rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, + iv, session->iv.length); + pst.src_addr = (phys_addr_t)session->cipher.nonce_phys; + CCP_AES_SIZE(&function) = 0x1F; + } else { + lsb_buf = + &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); + rte_memcpy(lsb_buf + + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); + pst.src_addr = b_info->lsb_buf_phys + + (b_info->lsb_buf_idx * CCP_SB_BYTES); + b_info->lsb_buf_idx++; + } + + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + } + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->cipher.data.offset); + if (likely(op->sym->m_dst != NULL)) + dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->cipher.data.offset); + else + dest_addr = src_addr; + key_addr = session->cipher.key_phys; + + /* prepare desc for aes command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + + CCP_AES_ENCRYPT(&function) = session->cipher.dir; + CCP_AES_MODE(&function) = session->cipher.um.aes_mode; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->cipher.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_3des(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + struct ccp_session *session; + union ccp_function function; + unsigned char *lsb_buf; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint8_t *iv; + phys_addr_t src_addr, dest_addr, key_addr; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + switch (session->cipher.um.des_mode) { + case CCP_DES_MODE_CBC: + lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]); + b_info->lsb_buf_idx++; + + rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length), + iv, session->iv.length); + + pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf); + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT; + ccp_perform_passthru(&pst, cmd_q); + break; + case CCP_DES_MODE_CFB: + case CCP_DES_MODE_ECB: + CCP_LOG_ERR("Unsupported DES cipher mode"); + return -ENOTSUP; + } + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->cipher.data.offset); + if (unlikely(op->sym->m_dst != NULL)) + dest_addr = + rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->cipher.data.offset); + else + dest_addr = src_addr; + + key_addr = rte_mem_virt2phy(session->cipher.key_ccp); + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + + memset(desc, 0, Q_DESC_SIZE); + + /* prepare desc for des command */ + CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES; + + CCP_CMD_SOC(desc) = 0; + CCP_CMD_IOC(desc) = 0; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_PROT(desc) = 0; + + function.raw = 0; + CCP_DES_ENCRYPT(&function) = session->cipher.dir; + CCP_DES_MODE(&function) = session->cipher.um.des_mode; + CCP_DES_TYPE(&function) = session->cipher.ut.des_type; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = op->sym->cipher.data.length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + if (session->cipher.um.des_mode) + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + + rte_wmb(); + + /* Write the new tail address back to the queue register */ + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + /* Turn the queue back on using our cached control register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static int +ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q) +{ + struct ccp_session *session; + union ccp_function function; + uint8_t *iv; + struct ccp_passthru pst; + struct ccp_desc *desc; + uint32_t tail; + uint64_t *temp; + phys_addr_t src_addr, dest_addr, key_addr, aad_addr; + phys_addr_t digest_dest_addr; + int length, non_align_len; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset); + key_addr = session->cipher.key_phys; + + src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src, + op->sym->aead.data.offset); + if (unlikely(op->sym->m_dst != NULL)) + dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst, + op->sym->aead.data.offset); + else + dest_addr = src_addr; + rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len); + digest_dest_addr = op->sym->aead.digest.phys_addr; + temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE); + *temp++ = rte_bswap64(session->auth.aad_length << 3); + *temp = rte_bswap64(op->sym->aead.data.length << 3); + + non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE; + length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE); + + aad_addr = op->sym->aead.aad.phys_addr; + + /* CMD1 IV Passthru */ + rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv, + session->iv.length); + pst.src_addr = session->cipher.nonce_phys; + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = CCP_SB_BYTES; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /* CMD2 GHASH-AAD */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD; + CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_INIT(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = session->auth.aad_length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr); + CCP_CMD_SRC_HI(desc) = high32_value(aad_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* CMD3 : GCTR Plain text */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = session->cipher.dir; + CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + if (non_align_len == 0) + CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1; + else + CCP_AES_SIZE(&function) = (non_align_len << 3) - 1; + + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_EOM(desc) = 1; + CCP_CMD_FUNCTION(desc) = function.raw; + + CCP_CMD_LEN(desc) = length; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr); + CCP_CMD_SRC_HI(desc) = high32_value(src_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(dest_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + /* CMD4 : PT to copy IV */ + pst.src_addr = session->cipher.nonce_phys; + pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES); + pst.len = AES_BLOCK_SIZE; + pst.dir = 1; + pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP; + pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; + ccp_perform_passthru(&pst, cmd_q); + + /* CMD5 : GHASH-Final */ + function.raw = 0; + CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL; + CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH; + CCP_AES_TYPE(&function) = session->cipher.ut.aes_type; + + desc = &cmd_q->qbase_desc[cmd_q->qidx]; + memset(desc, 0, Q_DESC_SIZE); + + CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES; + CCP_CMD_FUNCTION(desc) = function.raw; + /* Last block (AAD_len || PT_len)*/ + CCP_CMD_LEN(desc) = AES_BLOCK_SIZE; + + CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE); + CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr); + CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr); + CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr); + CCP_CMD_KEY_HI(desc) = high32_value(key_addr); + CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM; + + CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv; + + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + rte_wmb(); + + tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return 0; +} + +static inline int +ccp_crypto_cipher(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->cipher.algo) { + case CCP_CIPHER_ALGO_AES_CBC: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + case CCP_CIPHER_ALGO_AES_CTR: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + case CCP_CIPHER_ALGO_AES_ECB: + result = ccp_perform_aes(op, cmd_q, b_info); + b_info->desccnt += 1; + break; + case CCP_CIPHER_ALGO_3DES_CBC: + result = ccp_perform_3des(op, cmd_q, b_info); + b_info->desccnt += 2; + break; + default: + CCP_LOG_ERR("Unsupported cipher algo %d", + session->cipher.algo); + return -ENOTSUP; + } + return result; +} + +static inline int +ccp_crypto_auth(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_SHA1: + case CCP_AUTH_ALGO_SHA224: + case CCP_AUTH_ALGO_SHA256: + case CCP_AUTH_ALGO_SHA384: + case CCP_AUTH_ALGO_SHA512: + result = ccp_perform_sha(op, cmd_q); + b_info->desccnt += 3; + break; + case CCP_AUTH_ALGO_MD5_HMAC: + if (session->auth_opt == 0) + result = -1; + break; + case CCP_AUTH_ALGO_SHA1_HMAC: + case CCP_AUTH_ALGO_SHA224_HMAC: + case CCP_AUTH_ALGO_SHA256_HMAC: + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 6; + } + break; + case CCP_AUTH_ALGO_SHA384_HMAC: + case CCP_AUTH_ALGO_SHA512_HMAC: + if (session->auth_opt == 0) { + result = ccp_perform_hmac(op, cmd_q); + b_info->desccnt += 7; + } + break; + case CCP_AUTH_ALGO_SHA3_224: + case CCP_AUTH_ALGO_SHA3_256: + case CCP_AUTH_ALGO_SHA3_384: + case CCP_AUTH_ALGO_SHA3_512: + result = ccp_perform_sha3(op, cmd_q); + b_info->desccnt += 1; + break; + case CCP_AUTH_ALGO_SHA3_224_HMAC: + case CCP_AUTH_ALGO_SHA3_256_HMAC: + result = ccp_perform_sha3_hmac(op, cmd_q); + b_info->desccnt += 3; + break; + case CCP_AUTH_ALGO_SHA3_384_HMAC: + case CCP_AUTH_ALGO_SHA3_512_HMAC: + result = ccp_perform_sha3_hmac(op, cmd_q); + b_info->desccnt += 4; + break; + case CCP_AUTH_ALGO_AES_CMAC: + result = ccp_perform_aes_cmac(op, cmd_q); + b_info->desccnt += 4; + break; + default: + CCP_LOG_ERR("Unsupported auth algo %d", + session->auth.algo); + return -ENOTSUP; + } + + return result; +} + +static inline int +ccp_crypto_aead(struct rte_crypto_op *op, + struct ccp_queue *cmd_q, + struct ccp_batch_info *b_info) +{ + int result = 0; + struct ccp_session *session; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + switch (session->auth.algo) { + case CCP_AUTH_ALGO_AES_GCM: + if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) { + CCP_LOG_ERR("Incorrect chain order"); + return -1; + } + result = ccp_perform_aes_gcm(op, cmd_q); + b_info->desccnt += 5; + break; + default: + CCP_LOG_ERR("Unsupported aead algo %d", + session->aead_algo); + return -ENOTSUP; + } + return result; +} + +int +process_ops_to_enqueue(struct ccp_qp *qp, + struct rte_crypto_op **op, + struct ccp_queue *cmd_q, + uint16_t nb_ops, + int slots_req) +{ + int i, result = 0; + struct ccp_batch_info *b_info; + struct ccp_session *session; + EVP_MD_CTX *auth_ctx = NULL; + + if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) { + CCP_LOG_ERR("batch info allocation failed"); + return 0; + } + + auth_ctx = EVP_MD_CTX_create(); + if (unlikely(!auth_ctx)) { + CCP_LOG_ERR("Unable to create auth ctx"); + return 0; + } + b_info->auth_ctr = 0; + + /* populate batch info necessary for dequeue */ + b_info->op_idx = 0; + b_info->lsb_buf_idx = 0; + b_info->desccnt = 0; + b_info->cmd_q = cmd_q; + b_info->lsb_buf_phys = + (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf); + rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req); + + b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * + Q_DESC_SIZE); + for (i = 0; i < nb_ops; i++) { + session = (struct ccp_session *)get_sym_session_private_data( + op[i]->sym->session, + ccp_cryptodev_driver_id); + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + break; + case CCP_CMD_AUTH: + if (session->auth_opt) { + b_info->auth_ctr++; + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); + break; + case CCP_CMD_CIPHER_HASH: + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + if (result) + break; + result = ccp_crypto_auth(op[i], cmd_q, b_info); + break; + case CCP_CMD_HASH_CIPHER: + if (session->auth_opt) { + result = cpu_crypto_auth(qp, op[i], + session, auth_ctx); + if (op[i]->status != + RTE_CRYPTO_OP_STATUS_SUCCESS) + continue; + } else + result = ccp_crypto_auth(op[i], cmd_q, b_info); + + if (result) + break; + result = ccp_crypto_cipher(op[i], cmd_q, b_info); + break; + case CCP_CMD_COMBINED: + result = ccp_crypto_aead(op[i], cmd_q, b_info); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + result = -1; + } + if (unlikely(result < 0)) { + rte_atomic64_add(&b_info->cmd_q->free_slots, + (slots_req - b_info->desccnt)); + break; + } + b_info->op[i] = op[i]; + } + + b_info->opcnt = i; + b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * + Q_DESC_SIZE); + + rte_wmb(); + /* Write the new tail address back to the queue register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, + b_info->tail_offset); + /* Turn the queue back on using our cached control register */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol | CMD_Q_RUN); + + rte_ring_enqueue(qp->processed_pkts, (void *)b_info); + + EVP_MD_CTX_destroy(auth_ctx); + return i; +} + +static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op) +{ + struct ccp_session *session; + uint8_t *digest_data, *addr; + struct rte_mbuf *m_last; + int offset, digest_offset; + uint8_t digest_le[64]; + + session = (struct ccp_session *)get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + + if (session->cmd_id == CCP_CMD_COMBINED) { + digest_data = op->sym->aead.digest.data; + digest_offset = op->sym->aead.data.offset + + op->sym->aead.data.length; + } else { + digest_data = op->sym->auth.digest.data; + digest_offset = op->sym->auth.data.offset + + op->sym->auth.data.length; + } + m_last = rte_pktmbuf_lastseg(op->sym->m_src); + addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off + + m_last->data_len - session->auth.ctx_len); + + rte_mb(); + offset = session->auth.offset; + + if (session->auth.engine == CCP_ENGINE_SHA) + if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) && + (session->auth.ut.sha_type != CCP_SHA_TYPE_224) && + (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) { + /* All other algorithms require byte + * swap done by host + */ + unsigned int i; + + offset = session->auth.ctx_len - + session->auth.offset - 1; + for (i = 0; i < session->auth.digest_length; i++) + digest_le[i] = addr[offset - i]; + offset = 0; + addr = digest_le; + } + + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + if (session->auth.op == CCP_AUTH_OP_VERIFY) { + if (memcmp(addr + offset, digest_data, + session->auth.digest_length) != 0) + op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + + } else { + if (unlikely(digest_data == 0)) + digest_data = rte_pktmbuf_mtod_offset( + op->sym->m_dst, uint8_t *, + digest_offset); + rte_memcpy(digest_data, addr + offset, + session->auth.digest_length); + } + /* Trim area used for digest from mbuf. */ + rte_pktmbuf_trim(op->sym->m_src, + session->auth.ctx_len); +} + +static int +ccp_prepare_ops(struct ccp_qp *qp, + struct rte_crypto_op **op_d, + struct ccp_batch_info *b_info, + uint16_t nb_ops) +{ + int i, min_ops; + struct ccp_session *session; + + EVP_MD_CTX *auth_ctx = NULL; + + auth_ctx = EVP_MD_CTX_create(); + if (unlikely(!auth_ctx)) { + CCP_LOG_ERR("Unable to create auth ctx"); + return 0; + } + min_ops = RTE_MIN(nb_ops, b_info->opcnt); + + for (i = 0; i < min_ops; i++) { + op_d[i] = b_info->op[b_info->op_idx++]; + session = (struct ccp_session *)get_sym_session_private_data( + op_d[i]->sym->session, + ccp_cryptodev_driver_id); + switch (session->cmd_id) { + case CCP_CMD_CIPHER: + op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case CCP_CMD_AUTH: + if (session->auth_opt == 0) + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_CIPHER_HASH: + if (session->auth_opt) + cpu_crypto_auth(qp, op_d[i], + session, auth_ctx); + else + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_HASH_CIPHER: + if (session->auth_opt) + op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + else + ccp_auth_dq_prepare(op_d[i]); + break; + case CCP_CMD_COMBINED: + ccp_auth_dq_prepare(op_d[i]); + break; + default: + CCP_LOG_ERR("Unsupported cmd_id"); + } + } + + EVP_MD_CTX_destroy(auth_ctx); + b_info->opcnt -= min_ops; + return min_ops; +} + +int +process_ops_to_dequeue(struct ccp_qp *qp, + struct rte_crypto_op **op, + uint16_t nb_ops) +{ + struct ccp_batch_info *b_info; + uint32_t cur_head_offset; + + if (qp->b_info != NULL) { + b_info = qp->b_info; + if (unlikely(b_info->op_idx > 0)) + goto success; + } else if (rte_ring_dequeue(qp->processed_pkts, + (void **)&b_info)) + return 0; + + if (b_info->auth_ctr == b_info->opcnt) + goto success; + cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base, + CMD_Q_HEAD_LO_BASE); + + if (b_info->head_offset < b_info->tail_offset) { + if ((cur_head_offset >= b_info->head_offset) && + (cur_head_offset < b_info->tail_offset)) { + qp->b_info = b_info; + return 0; + } + } else { + if ((cur_head_offset >= b_info->head_offset) || + (cur_head_offset < b_info->tail_offset)) { + qp->b_info = b_info; + return 0; + } + } + + +success: + nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops); + rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt); + b_info->desccnt = 0; + if (b_info->opcnt > 0) { + qp->b_info = b_info; + } else { + rte_mempool_put(qp->batch_mp, (void *)b_info); + qp->b_info = NULL; + } + + return nb_ops; +} diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h new file mode 100644 index 00000000..882b398a --- /dev/null +++ b/drivers/crypto/ccp/ccp_crypto.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_CRYPTO_H_ +#define _CCP_CRYPTO_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" + +#define AES_BLOCK_SIZE 16 +#define CMAC_PAD_VALUE 0x80 +#define CTR_NONCE_SIZE 4 +#define CTR_IV_SIZE 8 +#define CCP_SHA3_CTX_SIZE 200 + +/**Macro helpers for CCP command creation*/ +#define CCP_AES_SIZE(p) ((p)->aes.size) +#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) +#define CCP_AES_MODE(p) ((p)->aes.mode) +#define CCP_AES_TYPE(p) ((p)->aes.type) +#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt) +#define CCP_DES_MODE(p) ((p)->des.mode) +#define CCP_DES_TYPE(p) ((p)->des.type) +#define CCP_SHA_TYPE(p) ((p)->sha.type) +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +/* HMAC */ +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c + +/* MD5 */ +#define MD5_DIGEST_SIZE 16 +#define MD5_BLOCK_SIZE 64 + +/* SHA */ +#define SHA_COMMON_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA1_BLOCK_SIZE 64 + +#define SHA224_DIGEST_SIZE 28 +#define SHA224_BLOCK_SIZE 64 +#define SHA3_224_BLOCK_SIZE 144 + +#define SHA256_DIGEST_SIZE 32 +#define SHA256_BLOCK_SIZE 64 +#define SHA3_256_BLOCK_SIZE 136 + +#define SHA384_DIGEST_SIZE 48 +#define SHA384_BLOCK_SIZE 128 +#define SHA3_384_BLOCK_SIZE 104 + +#define SHA512_DIGEST_SIZE 64 +#define SHA512_BLOCK_SIZE 128 +#define SHA3_512_BLOCK_SIZE 72 + +/* Maximum length for digest */ +#define DIGEST_LENGTH_MAX 64 + +/* SHA LSB intialiazation values */ + +#define SHA1_H0 0x67452301UL +#define SHA1_H1 0xefcdab89UL +#define SHA1_H2 0x98badcfeUL +#define SHA1_H3 0x10325476UL +#define SHA1_H4 0xc3d2e1f0UL + +#define SHA224_H0 0xc1059ed8UL +#define SHA224_H1 0x367cd507UL +#define SHA224_H2 0x3070dd17UL +#define SHA224_H3 0xf70e5939UL +#define SHA224_H4 0xffc00b31UL +#define SHA224_H5 0x68581511UL +#define SHA224_H6 0x64f98fa7UL +#define SHA224_H7 0xbefa4fa4UL + +#define SHA256_H0 0x6a09e667UL +#define SHA256_H1 0xbb67ae85UL +#define SHA256_H2 0x3c6ef372UL +#define SHA256_H3 0xa54ff53aUL +#define SHA256_H4 0x510e527fUL +#define SHA256_H5 0x9b05688cUL +#define SHA256_H6 0x1f83d9abUL +#define SHA256_H7 0x5be0cd19UL + +#define SHA384_H0 0xcbbb9d5dc1059ed8ULL +#define SHA384_H1 0x629a292a367cd507ULL +#define SHA384_H2 0x9159015a3070dd17ULL +#define SHA384_H3 0x152fecd8f70e5939ULL +#define SHA384_H4 0x67332667ffc00b31ULL +#define SHA384_H5 0x8eb44a8768581511ULL +#define SHA384_H6 0xdb0c2e0d64f98fa7ULL +#define SHA384_H7 0x47b5481dbefa4fa4ULL + +#define SHA512_H0 0x6a09e667f3bcc908ULL +#define SHA512_H1 0xbb67ae8584caa73bULL +#define SHA512_H2 0x3c6ef372fe94f82bULL +#define SHA512_H3 0xa54ff53a5f1d36f1ULL +#define SHA512_H4 0x510e527fade682d1ULL +#define SHA512_H5 0x9b05688c2b3e6c1fULL +#define SHA512_H6 0x1f83d9abfb41bd6bULL +#define SHA512_H7 0x5be0cd19137e2179ULL + +/** + * CCP supported AES modes + */ +enum ccp_aes_mode { + CCP_AES_MODE_ECB = 0, + CCP_AES_MODE_CBC, + CCP_AES_MODE_OFB, + CCP_AES_MODE_CFB, + CCP_AES_MODE_CTR, + CCP_AES_MODE_CMAC, + CCP_AES_MODE_GHASH, + CCP_AES_MODE_GCTR, + CCP_AES_MODE__LAST, +}; + +/** + * CCP AES GHASH mode + */ +enum ccp_aes_ghash_mode { + CCP_AES_MODE_GHASH_AAD = 0, + CCP_AES_MODE_GHASH_FINAL +}; + +/** + * CCP supported AES types + */ +enum ccp_aes_type { + CCP_AES_TYPE_128 = 0, + CCP_AES_TYPE_192, + CCP_AES_TYPE_256, + CCP_AES_TYPE__LAST, +}; + +/***** 3DES engine *****/ + +/** + * CCP supported DES/3DES modes + */ +enum ccp_des_mode { + CCP_DES_MODE_ECB = 0, /* Not supported */ + CCP_DES_MODE_CBC, + CCP_DES_MODE_CFB, +}; + +/** + * CCP supported DES types + */ +enum ccp_des_type { + CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */ + CCP_DES_TYPE_192, /* 168 + 24 parity */ + CCP_DES_TYPE__LAST, +}; + +/***** SHA engine *****/ + +/** + * ccp_sha_type - type of SHA operation + * + * @CCP_SHA_TYPE_1: SHA-1 operation + * @CCP_SHA_TYPE_224: SHA-224 operation + * @CCP_SHA_TYPE_256: SHA-256 operation + */ +enum ccp_sha_type { + CCP_SHA_TYPE_1 = 1, + CCP_SHA_TYPE_224, + CCP_SHA_TYPE_256, + CCP_SHA_TYPE_384, + CCP_SHA_TYPE_512, + CCP_SHA_TYPE_RSVD1, + CCP_SHA_TYPE_RSVD2, + CCP_SHA3_TYPE_224, + CCP_SHA3_TYPE_256, + CCP_SHA3_TYPE_384, + CCP_SHA3_TYPE_512, + CCP_SHA_TYPE__LAST, +}; + +/** + * CCP supported cipher algorithms + */ +enum ccp_cipher_algo { + CCP_CIPHER_ALGO_AES_CBC = 0, + CCP_CIPHER_ALGO_AES_ECB, + CCP_CIPHER_ALGO_AES_CTR, + CCP_CIPHER_ALGO_AES_GCM, + CCP_CIPHER_ALGO_3DES_CBC, +}; + +/** + * CCP cipher operation type + */ +enum ccp_cipher_dir { + CCP_CIPHER_DIR_DECRYPT = 0, + CCP_CIPHER_DIR_ENCRYPT = 1, +}; + +/** + * CCP supported hash algorithms + */ +enum ccp_hash_algo { + CCP_AUTH_ALGO_SHA1 = 0, + CCP_AUTH_ALGO_SHA1_HMAC, + CCP_AUTH_ALGO_SHA224, + CCP_AUTH_ALGO_SHA224_HMAC, + CCP_AUTH_ALGO_SHA3_224, + CCP_AUTH_ALGO_SHA3_224_HMAC, + CCP_AUTH_ALGO_SHA256, + CCP_AUTH_ALGO_SHA256_HMAC, + CCP_AUTH_ALGO_SHA3_256, + CCP_AUTH_ALGO_SHA3_256_HMAC, + CCP_AUTH_ALGO_SHA384, + CCP_AUTH_ALGO_SHA384_HMAC, + CCP_AUTH_ALGO_SHA3_384, + CCP_AUTH_ALGO_SHA3_384_HMAC, + CCP_AUTH_ALGO_SHA512, + CCP_AUTH_ALGO_SHA512_HMAC, + CCP_AUTH_ALGO_SHA3_512, + CCP_AUTH_ALGO_SHA3_512_HMAC, + CCP_AUTH_ALGO_AES_CMAC, + CCP_AUTH_ALGO_AES_GCM, + CCP_AUTH_ALGO_MD5_HMAC, +}; + +/** + * CCP hash operation type + */ +enum ccp_hash_op { + CCP_AUTH_OP_GENERATE = 0, + CCP_AUTH_OP_VERIFY = 1, +}; + +/* CCP crypto private session structure */ +struct ccp_session { + bool auth_opt; + enum ccp_cmd_order cmd_id; + /**< chain order mode */ + struct { + uint16_t length; + uint16_t offset; + } iv; + /**< IV parameters */ + struct { + enum ccp_cipher_algo algo; + enum ccp_engine engine; + union { + enum ccp_aes_mode aes_mode; + enum ccp_des_mode des_mode; + } um; + union { + enum ccp_aes_type aes_type; + enum ccp_des_type des_type; + } ut; + enum ccp_cipher_dir dir; + uint64_t key_length; + /**< max cipher key size 256 bits */ + uint8_t key[32]; + /**ccp key format*/ + uint8_t key_ccp[32]; + phys_addr_t key_phys; + /**AES-ctr nonce(4) iv(8) ctr*/ + uint8_t nonce[32]; + phys_addr_t nonce_phys; + } cipher; + /**< Cipher Parameters */ + + struct { + enum ccp_hash_algo algo; + enum ccp_engine engine; + union { + enum ccp_aes_mode aes_mode; + } um; + union { + enum ccp_sha_type sha_type; + enum ccp_aes_type aes_type; + } ut; + enum ccp_hash_op op; + uint64_t key_length; + /**< max hash key size 144 bytes (struct capabilties) */ + uint8_t key[144]; + /**< max be key size of AES is 32*/ + uint8_t key_ccp[32]; + phys_addr_t key_phys; + uint64_t digest_length; + void *ctx; + int ctx_len; + int offset; + int block_size; + /**< Buffer to store Software generated precomute values*/ + /**< For HMAC H(ipad ^ key) and H(opad ^ key) */ + /**< For CMAC K1 IV and K2 IV*/ + uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE]; + /**< SHA3 initial ctx all zeros*/ + uint8_t sha3_ctx[200]; + int aad_length; + } auth; + /**< Authentication Parameters */ + enum rte_crypto_aead_algorithm aead_algo; + /**< AEAD Algorithm */ + + uint32_t reserved; +} __rte_cache_aligned; + +extern uint8_t ccp_cryptodev_driver_id; + +struct ccp_qp; +struct ccp_private; + +/** + * Set and validate CCP crypto session parameters + * + * @param sess ccp private session + * @param xform crypto xform for this session + * @return 0 on success otherwise -1 + */ +int ccp_set_session_parameters(struct ccp_session *sess, + const struct rte_crypto_sym_xform *xform, + struct ccp_private *internals); + +/** + * Find count of slots + * + * @param session CCP private session + * @return count of free slots available + */ +int ccp_compute_slot_count(struct ccp_session *session); + +/** + * process crypto ops to be enqueued + * + * @param qp CCP crypto queue-pair + * @param op crypto ops table + * @param cmd_q CCP cmd queue + * @param nb_ops No. of ops to be submitted + * @return 0 on success otherwise -1 + */ +int process_ops_to_enqueue(struct ccp_qp *qp, + struct rte_crypto_op **op, + struct ccp_queue *cmd_q, + uint16_t nb_ops, + int slots_req); + +/** + * process crypto ops to be dequeued + * + * @param qp CCP crypto queue-pair + * @param op crypto ops table + * @param nb_ops requested no. of ops + * @return 0 on success otherwise -1 + */ +int process_ops_to_dequeue(struct ccp_qp *qp, + struct rte_crypto_op **op, + uint16_t nb_ops); + + +/** + * Apis for SHA3 partial hash generation + * @param data_in buffer pointer on which phash is applied + * @param data_out phash result in ccp be format is written + */ +int partial_hash_sha3_224(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_256(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_384(uint8_t *data_in, + uint8_t *data_out); + +int partial_hash_sha3_512(uint8_t *data_in, + uint8_t *data_out); + +#endif /* _CCP_CRYPTO_H_ */ diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c new file mode 100644 index 00000000..80fe6a45 --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.c @@ -0,0 +1,810 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "ccp_dev.h" +#include "ccp_pci.h" +#include "ccp_pmd_private.h" + +struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list); +static int ccp_dev_id; + +int +ccp_dev_start(struct rte_cryptodev *dev) +{ + struct ccp_private *priv = dev->data->dev_private; + + priv->last_dev = TAILQ_FIRST(&ccp_list); + return 0; +} + +struct ccp_queue * +ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req) +{ + int i, ret = 0; + struct ccp_device *dev; + struct ccp_private *priv = cdev->data->dev_private; + + dev = TAILQ_NEXT(priv->last_dev, next); + if (unlikely(dev == NULL)) + dev = TAILQ_FIRST(&ccp_list); + priv->last_dev = dev; + if (dev->qidx >= dev->cmd_q_count) + dev->qidx = 0; + ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); + if (ret >= slot_req) + return &dev->cmd_q[dev->qidx]; + for (i = 0; i < dev->cmd_q_count; i++) { + dev->qidx++; + if (dev->qidx >= dev->cmd_q_count) + dev->qidx = 0; + ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots); + if (ret >= slot_req) + return &dev->cmd_q[dev->qidx]; + } + return NULL; +} + +int +ccp_read_hwrng(uint32_t *value) +{ + struct ccp_device *dev; + + TAILQ_FOREACH(dev, &ccp_list, next) { + void *vaddr = (void *)(dev->pci.mem_resource[2].addr); + + while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) { + *value = CCP_READ_REG(vaddr, TRNG_OUT_REG); + if (*value) { + dev->hwrng_retries = 0; + return 0; + } + } + dev->hwrng_retries = 0; + } + return -1; +} + +static const struct rte_memzone * +ccp_queue_dma_zone_reserve(const char *queue_name, + uint32_t queue_size, + int socket_id) +{ + const struct rte_memzone *mz; + + mz = rte_memzone_lookup(queue_name); + if (mz != 0) { + if (((size_t)queue_size <= mz->len) && + ((socket_id == SOCKET_ID_ANY) || + (socket_id == mz->socket_id))) { + CCP_LOG_INFO("re-use memzone already " + "allocated for %s", queue_name); + return mz; + } + CCP_LOG_ERR("Incompatible memzone already " + "allocated %s, size %u, socket %d. " + "Requested size %u, socket %u", + queue_name, (uint32_t)mz->len, + mz->socket_id, queue_size, socket_id); + return NULL; + } + + CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u", + queue_name, queue_size, socket_id); + + return rte_memzone_reserve_aligned(queue_name, queue_size, + socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size); +} + +/* bitmap support apis */ +static inline void +ccp_set_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n))); +} + +static inline void +ccp_clear_bit(unsigned long *bitmap, int n) +{ + __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n))); +} + +static inline uint32_t +ccp_get_bit(unsigned long *bitmap, int n) +{ + return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0); +} + + +static inline uint32_t +ccp_ffz(unsigned long word) +{ + unsigned long first_zero; + + first_zero = __builtin_ffsl(~word); + return first_zero ? (first_zero - 1) : + BITS_PER_WORD; +} + +static inline uint32_t +ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit) +{ + uint32_t i; + uint32_t nwords = 0; + + nwords = (limit - 1) / BITS_PER_WORD + 1; + for (i = 0; i < nwords; i++) { + if (addr[i] == 0UL) + return i * BITS_PER_WORD; + if (addr[i] < ~(0UL)) + break; + } + return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]); +} + +static void +ccp_bitmap_set(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_set >= 0) { + *p |= mask_to_set; + len -= bits_to_set; + bits_to_set = BITS_PER_WORD; + mask_to_set = ~0UL; + p++; + } + if (len) { + mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size); + *p |= mask_to_set; + } +} + +static void +ccp_bitmap_clear(unsigned long *map, unsigned int start, int len) +{ + unsigned long *p = map + WORD_OFFSET(start); + const unsigned int size = start + len; + int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD); + unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start); + + while (len - bits_to_clear >= 0) { + *p &= ~mask_to_clear; + len -= bits_to_clear; + bits_to_clear = BITS_PER_WORD; + mask_to_clear = ~0UL; + p++; + } + if (len) { + mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size); + *p &= ~mask_to_clear; + } +} + + +static unsigned long +_ccp_find_next_bit(const unsigned long *addr, + unsigned long nbits, + unsigned long start, + unsigned long invert) +{ + unsigned long tmp; + + if (!nbits || start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + + /* Handle 1st word. */ + tmp &= CCP_BITMAP_FIRST_WORD_MASK(start); + start = ccp_round_down(start, BITS_PER_WORD); + + while (!tmp) { + start += BITS_PER_WORD; + if (start >= nbits) + return nbits; + + tmp = addr[start / BITS_PER_WORD] ^ invert; + } + + return RTE_MIN(start + (ffs(tmp) - 1), nbits); +} + +static unsigned long +ccp_find_next_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, 0UL); +} + +static unsigned long +ccp_find_next_zero_bit(const unsigned long *addr, + unsigned long size, + unsigned long offset) +{ + return _ccp_find_next_bit(addr, size, offset, ~0UL); +} + +/** + * bitmap_find_next_zero_area - find a contiguous aligned zero area + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + */ +static unsigned long +ccp_bitmap_find_next_zero_area(unsigned long *map, + unsigned long size, + unsigned long start, + unsigned int nr) +{ + unsigned long index, end, i; + +again: + index = ccp_find_next_zero_bit(map, size, start); + + end = index + nr; + if (end > size) + return end; + i = ccp_find_next_bit(map, end, index); + if (i < end) { + start = i + 1; + goto again; + } + return index; +} + +static uint32_t +ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, 0, + count); + if (start < LSB_SIZE) { + ccp_bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* try to get an entry from the shared blocks */ + ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + + start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, count); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + ccp_bitmap_set(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + return start * LSB_ITEM_SIZE; + } + CCP_LOG_ERR("NO LSBs available"); + + rte_spinlock_unlock(&ccp->lsb_lock); + + return 0; +} + +static void __rte_unused +ccp_lsb_free(struct ccp_queue *cmd_q, + unsigned int start, + unsigned int count) +{ + int lsbno = start / LSB_SIZE; + + if (!start) + return; + + if (cmd_q->lsb == lsbno) { + /* An entry from the private LSB */ + ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->dev; + + rte_spinlock_lock(&ccp->lsb_lock); + ccp_bitmap_clear(ccp->lsbmap, start, count); + rte_spinlock_unlock(&ccp->lsb_lock); + } +} + +static int +ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status) +{ + int q_mask = 1 << cmd_q->id; + int weight = 0; + int j; + + /* Build a bit mask to know which LSBs + * this queue has access to. + * Don't bother with segment 0 + * as it has special + * privileges. + */ + cmd_q->lsbmask = 0; + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + ccp_set_bit(&cmd_q->lsbmask, j); + + status >>= LSB_REGION_WIDTH; + } + + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + weight++; + + printf("Queue %d can access %d LSB regions of mask %lu\n", + (int)cmd_q->id, weight, cmd_q->lsbmask); + + return weight ? 0 : -EINVAL; +} + +static int +ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + unsigned long qlsb = 0; + int bitno = 0; + int qlsb_wgt = 0; + int i, j; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = 0; + for (j = 0; j < MAX_LSB_CNT; j++) + if (ccp_get_bit(&cmd_q->lsbmask, j)) + qlsb_wgt++; + + if (qlsb_wgt == lsb_cnt) { + qlsb = cmd_q->lsbmask; + + bitno = ffs(qlsb) - 1; + while (bitno < MAX_LSB_CNT) { + if (ccp_get_bit(lsb_pub, bitno)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + ccp_clear_bit(lsb_pub, bitno); + break; + } + ccp_clear_bit(&qlsb, bitno); + bitno = ffs(qlsb) - 1; + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared + * resources. + */ +static int +ccp_assign_lsbs(struct ccp_device *ccp) +{ + unsigned long lsb_pub = 0, qlsb = 0; + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + rte_spinlock_init(&ccp->lsb_lock); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + lsb_pub |= ccp->cmd_q[i].lsbmask; + + for (i = 0; i < MAX_LSB_CNT; i++) + if (ccp_get_bit(&lsb_pub, i)) + n_lsbs++; + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + &lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + qlsb = lsb_pub; + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + ccp_set_bit(&qlsb, bitno); + bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static int +ccp_add_device(struct ccp_device *dev, int type) +{ + int i; + uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi; + uint64_t status; + struct ccp_queue *cmd_q; + const struct rte_memzone *q_mz; + void *vaddr; + + if (dev == NULL) + return -1; + + dev->id = ccp_dev_id++; + dev->qidx = 0; + vaddr = (void *)(dev->pci.mem_resource[2].addr); + + if (type == CCP_VERSION_5B) { + CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57); + CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003); + for (i = 0; i < 12; i++) { + CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET, + CCP_READ_REG(vaddr, TRNG_OUT_REG)); + } + CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F); + CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D); + CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000); + + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF); + CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF); + + CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823); + } + CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249); + + /* Copy the private LSB mask to the public registers */ + status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo); + CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi); + status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo); + + dev->cmd_q_count = 0; + /* Find available queues */ + qmr = CCP_READ_REG(vaddr, Q_MASK_REG); + for (i = 0; i < MAX_HW_QUEUES; i++) { + if (!(qmr & (1 << i))) + continue; + cmd_q = &dev->cmd_q[dev->cmd_q_count++]; + cmd_q->dev = dev; + cmd_q->id = i; + cmd_q->qidx = 0; + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + + cmd_q->reg_base = (uint8_t *)vaddr + + CMD_Q_STATUS_INCR * (i + 1); + + /* CCP queue memory */ + snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name), + "%s_%d_%s_%d_%s", + "ccp_dev", + (int)dev->id, "queue", + (int)cmd_q->id, "mem"); + q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name, + cmd_q->qsize, SOCKET_ID_ANY); + cmd_q->qbase_addr = (void *)q_mz->addr; + cmd_q->qbase_desc = (void *)q_mz->addr; + cmd_q->qbase_phys_addr = q_mz->phys_addr; + + cmd_q->qcontrol = 0; + /* init control reg to zero */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* Disable the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE); + CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE); + + /* Clear the interrupts */ + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE, + ALL_INTERRUPTS); + + /* Configure size of each virtual queue accessible to host */ + cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT); + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT; + + dma_addr_lo = low32_value(cmd_q->qbase_phys_addr); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, + (uint32_t)dma_addr_lo); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE, + (uint32_t)dma_addr_lo); + + dma_addr_hi = high32_value(cmd_q->qbase_phys_addr); + cmd_q->qcontrol |= (dma_addr_hi << 16); + CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE, + cmd_q->qcontrol); + + /* create LSB Mask map */ + if (ccp_find_lsb_regions(cmd_q, status)) + CCP_LOG_ERR("queue doesn't have lsb regions"); + cmd_q->lsb = -1; + + rte_atomic64_init(&cmd_q->free_slots); + rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1)); + /* unused slot barrier b/w H&T */ + } + + if (ccp_assign_lsbs(dev)) + CCP_LOG_ERR("Unable to assign lsb region"); + + /* pre-allocate LSB slots */ + for (i = 0; i < dev->cmd_q_count; i++) { + dev->cmd_q[i].sb_key = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_iv = + ccp_lsb_alloc(&dev->cmd_q[i], 1); + dev->cmd_q[i].sb_sha = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + dev->cmd_q[i].sb_hmac = + ccp_lsb_alloc(&dev->cmd_q[i], 2); + } + + TAILQ_INSERT_TAIL(&ccp_list, dev, next); + return 0; +} + +static void +ccp_remove_device(struct ccp_device *dev) +{ + if (dev == NULL) + return; + + TAILQ_REMOVE(&ccp_list, dev, next); +} + +static int +is_ccp_device(const char *dirname, + const struct rte_pci_id *ccp_id, + int *type) +{ + char filename[PATH_MAX]; + const struct rte_pci_id *id; + uint16_t vendor, device_id; + int i; + unsigned long tmp; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + vendor = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + return 0; + device_id = (uint16_t)tmp; + + for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) { + if (vendor == id->vendor_id && + device_id == id->device_id) { + *type = i; + return 1; /* Matched device */ + } + } + return 0; +} + +static int +ccp_probe_device(const char *dirname, uint16_t domain, + uint8_t bus, uint8_t devid, + uint8_t function, int ccp_type) +{ + struct ccp_device *ccp_dev = NULL; + struct rte_pci_device *pci; + char filename[PATH_MAX]; + unsigned long tmp; + int uio_fd = -1, i, uio_num; + char uio_devname[PATH_MAX]; + void *map_addr; + + ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev), + RTE_CACHE_LINE_SIZE); + if (ccp_dev == NULL) + goto fail; + pci = &(ccp_dev->pci); + + pci->addr.domain = domain; + pci->addr.bus = bus; + pci->addr.devid = devid; + pci->addr.function = function; + + /* get vendor id */ + snprintf(filename, sizeof(filename), "%s/vendor", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.vendor_id = (uint16_t)tmp; + + /* get device id */ + snprintf(filename, sizeof(filename), "%s/device", dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.device_id = (uint16_t)tmp; + + /* get subsystem_vendor id */ + snprintf(filename, sizeof(filename), "%s/subsystem_vendor", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_vendor_id = (uint16_t)tmp; + + /* get subsystem_device id */ + snprintf(filename, sizeof(filename), "%s/subsystem_device", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + pci->id.subsystem_device_id = (uint16_t)tmp; + + /* get class_id */ + snprintf(filename, sizeof(filename), "%s/class", + dirname); + if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0) + goto fail; + /* the least 24 bits are valid: class, subclass, program interface */ + pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID; + + /* parse resources */ + snprintf(filename, sizeof(filename), "%s/resource", dirname); + if (ccp_pci_parse_sysfs_resource(filename, pci) < 0) + goto fail; + + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) { + /* + * It may take time for uio device to appear, + * wait here and try again + */ + usleep(100000); + uio_num = ccp_find_uio_devname(dirname); + if (uio_num < 0) + goto fail; + } + snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num); + + uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK); + if (uio_fd < 0) + goto fail; + if (flock(uio_fd, LOCK_EX | LOCK_NB)) + goto fail; + + /* Map the PCI memory resource of device */ + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + + char devname[PATH_MAX]; + int res_fd; + + if (pci->mem_resource[i].phys_addr == 0) + continue; + snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i); + res_fd = open(devname, O_RDWR); + if (res_fd < 0) + goto fail; + map_addr = mmap(NULL, pci->mem_resource[i].len, + PROT_READ | PROT_WRITE, + MAP_SHARED, res_fd, 0); + if (map_addr == MAP_FAILED) + goto fail; + + pci->mem_resource[i].addr = map_addr; + } + + /* device is valid, add in list */ + if (ccp_add_device(ccp_dev, ccp_type)) { + ccp_remove_device(ccp_dev); + goto fail; + } + + return 0; +fail: + CCP_LOG_ERR("CCP Device probe failed"); + if (uio_fd > 0) + close(uio_fd); + if (ccp_dev) + rte_free(ccp_dev); + return -1; +} + +int +ccp_probe_devices(const struct rte_pci_id *ccp_id) +{ + int dev_cnt = 0; + int ccp_type = 0; + struct dirent *d; + DIR *dir; + int ret = 0; + int module_idx = 0; + uint16_t domain; + uint8_t bus, devid, function; + char dirname[PATH_MAX]; + + module_idx = ccp_check_pci_uio_module(); + if (module_idx < 0) + return -1; + + TAILQ_INIT(&ccp_list); + dir = opendir(SYSFS_PCI_DEVICES); + if (dir == NULL) + return -1; + while ((d = readdir(dir)) != NULL) { + if (d->d_name[0] == '.') + continue; + if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name), + &domain, &bus, &devid, &function) != 0) + continue; + snprintf(dirname, sizeof(dirname), "%s/%s", + SYSFS_PCI_DEVICES, d->d_name); + if (is_ccp_device(dirname, ccp_id, &ccp_type)) { + printf("CCP : Detected CCP device with ID = 0x%x\n", + ccp_id[ccp_type].device_id); + ret = ccp_probe_device(dirname, domain, bus, devid, + function, ccp_type); + if (ret == 0) + dev_cnt++; + } + } + closedir(dir); + return dev_cnt; +} diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h new file mode 100644 index 00000000..de3e4bcc --- /dev/null +++ b/drivers/crypto/ccp/ccp_dev.h @@ -0,0 +1,495 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_DEV_H_ +#define _CCP_DEV_H_ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/**< CCP sspecific */ +#define MAX_HW_QUEUES 5 +#define CCP_MAX_TRNG_RETRIES 10 +#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y) + +/**< CCP Register Mappings */ +#define Q_MASK_REG 0x000 +#define TRNG_OUT_REG 0x00c + +/* CCP Version 5 Specifics */ +#define CMD_QUEUE_MASK_OFFSET 0x00 +#define CMD_QUEUE_PRIO_OFFSET 0x04 +#define CMD_REQID_CONFIG_OFFSET 0x08 +#define CMD_CMD_TIMEOUT_OFFSET 0x10 +#define LSB_PUBLIC_MASK_LO_OFFSET 0x18 +#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C +#define LSB_PRIVATE_MASK_LO_OFFSET 0x20 +#define LSB_PRIVATE_MASK_HI_OFFSET 0x24 + +#define CMD_Q_CONTROL_BASE 0x0000 +#define CMD_Q_TAIL_LO_BASE 0x0004 +#define CMD_Q_HEAD_LO_BASE 0x0008 +#define CMD_Q_INT_ENABLE_BASE 0x000C +#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010 + +#define CMD_Q_STATUS_BASE 0x0100 +#define CMD_Q_INT_STATUS_BASE 0x0104 + +#define CMD_CONFIG_0_OFFSET 0x6000 +#define CMD_TRNG_CTL_OFFSET 0x6008 +#define CMD_AES_MASK_OFFSET 0x6010 +#define CMD_CLK_GATE_CTL_OFFSET 0x603C + +/* Address offset between two virtual queue registers */ +#define CMD_Q_STATUS_INCR 0x1000 + +/* Bit masks */ +#define CMD_Q_RUN 0x1 +#define CMD_Q_SIZE 0x1F +#define CMD_Q_SHIFT 3 +#define COMMANDS_PER_QUEUE 2048 + +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD_Q_SIZE) +#define Q_DESC_SIZE sizeof(struct ccp_desc) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define INT_COMPLETION 0x1 +#define INT_ERROR 0x2 +#define INT_QUEUE_STOPPED 0x4 +#define ALL_INTERRUPTS (INT_COMPLETION| \ + INT_ERROR| \ + INT_QUEUE_STOPPED) + +#define LSB_REGION_WIDTH 5 +#define MAX_LSB_CNT 8 + +#define LSB_SIZE 16 +#define LSB_ITEM_SIZE 32 +#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE) +#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE) + +/* General CCP Defines */ + +#define CCP_SB_BYTES 32 +/* Word 0 */ +#define CCP_CMD_DW0(p) ((p)->dw0) +#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc) +#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc) +#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init) +#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom) +#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function) +#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine) +#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP_CMD_DW1(p) ((p)->length) +#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p)) + +/* Word 2 */ +#define CCP_CMD_DW2(p) ((p)->src_lo) +#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p)) + +/* Word 3 */ +#define CCP_CMD_DW3(p) ((p)->dw3) +#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP_CMD_DW4(p) ((p)->dw4) +#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo) +#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p)) +#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) +#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) + +/* Word 6/7 */ +#define CCP_CMD_DW6(p) ((p)->key_lo) +#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p)) +#define CCP_CMD_DW7(p) ((p)->dw7) +#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +/* bitmap */ +enum { + BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT +}; + +#define WORD_OFFSET(b) ((b) / BITS_PER_WORD) +#define BIT_OFFSET(b) ((b) % BITS_PER_WORD) + +#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define CCP_BITMAP_SIZE(nr) \ + CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long)) + +#define CCP_BITMAP_FIRST_WORD_MASK(start) \ + (~0UL << ((start) & (BITS_PER_WORD - 1))) +#define CCP_BITMAP_LAST_WORD_MASK(nbits) \ + (~0UL >> (-(nbits) & (BITS_PER_WORD - 1))) + +#define __ccp_round_mask(x, y) ((typeof(x))((y)-1)) +#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y)) + +/** CCP registers Write/Read */ + +static inline void ccp_pci_reg_write(void *base, int offset, + uint32_t value) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + rte_write32((rte_cpu_to_le_32(value)), reg_addr); +} + +static inline uint32_t ccp_pci_reg_read(void *base, int offset) +{ + volatile void *reg_addr = ((uint8_t *)base + offset); + + return rte_le_to_cpu_32(rte_read32(reg_addr)); +} + +#define CCP_READ_REG(hw_addr, reg_offset) \ + ccp_pci_reg_read(hw_addr, reg_offset) + +#define CCP_WRITE_REG(hw_addr, reg_offset, value) \ + ccp_pci_reg_write(hw_addr, reg_offset, value) + +TAILQ_HEAD(ccp_list, ccp_device); + +extern struct ccp_list ccp_list; + +/** + * CCP device version + */ +enum ccp_device_version { + CCP_VERSION_5A = 0, + CCP_VERSION_5B, +}; + +/** + * A structure describing a CCP command queue. + */ +struct ccp_queue { + struct ccp_device *dev; + char memz_name[RTE_MEMZONE_NAMESIZE]; + + rte_atomic64_t free_slots; + /**< available free slots updated from enq/deq calls */ + + /* Queue identifier */ + uint64_t id; /**< queue id */ + uint64_t qidx; /**< queue index */ + uint64_t qsize; /**< queue size */ + + /* Queue address */ + struct ccp_desc *qbase_desc; + void *qbase_addr; + phys_addr_t qbase_phys_addr; + /**< queue-page registers addr */ + void *reg_base; + + uint32_t qcontrol; + /**< queue ctrl reg */ + + int lsb; + /**< lsb region assigned to queue */ + unsigned long lsbmask; + /**< lsb regions queue can access */ + unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)]; + /**< all lsb resources which queue is using */ + uint32_t sb_key; + /**< lsb assigned for queue */ + uint32_t sb_iv; + /**< lsb assigned for iv */ + uint32_t sb_sha; + /**< lsb assigned for sha ctx */ + uint32_t sb_hmac; + /**< lsb assigned for hmac ctx */ +} ____cacheline_aligned; + +/** + * A structure describing a CCP device. + */ +struct ccp_device { + TAILQ_ENTRY(ccp_device) next; + int id; + /**< ccp dev id on platform */ + struct ccp_queue cmd_q[MAX_HW_QUEUES]; + /**< ccp queue */ + int cmd_q_count; + /**< no. of ccp Queues */ + struct rte_pci_device pci; + /**< ccp pci identifier */ + unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)]; + /**< shared lsb mask of ccp */ + rte_spinlock_t lsb_lock; + /**< protection for shared lsb region allocation */ + int qidx; + /**< current queue index */ + int hwrng_retries; + /**< retry counter for CCP TRNG */ +} __rte_cache_aligned; + +/**< CCP H/W engine related */ +/** + * ccp_engine - CCP operation identifiers + * + * @CCP_ENGINE_AES: AES operation + * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation + * @CCP_ENGINE_3DES: DES/3DES operation + * @CCP_ENGINE_SHA: SHA operation + * @CCP_ENGINE_RSA: RSA operation + * @CCP_ENGINE_PASSTHRU: pass-through operation + * @CCP_ENGINE_ZLIB_DECOMPRESS: unused + * @CCP_ENGINE_ECC: ECC operation + */ +enum ccp_engine { + CCP_ENGINE_AES = 0, + CCP_ENGINE_XTS_AES_128, + CCP_ENGINE_3DES, + CCP_ENGINE_SHA, + CCP_ENGINE_RSA, + CCP_ENGINE_PASSTHRU, + CCP_ENGINE_ZLIB_DECOMPRESS, + CCP_ENGINE_ECC, + CCP_ENGINE__LAST, +}; + +/* Passthru engine */ +/** + * ccp_passthru_bitwise - type of bitwise passthru operation + * + * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed + * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask + * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask + * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask + * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask + */ +enum ccp_passthru_bitwise { + CCP_PASSTHRU_BITWISE_NOOP = 0, + CCP_PASSTHRU_BITWISE_AND, + CCP_PASSTHRU_BITWISE_OR, + CCP_PASSTHRU_BITWISE_XOR, + CCP_PASSTHRU_BITWISE_MASK, + CCP_PASSTHRU_BITWISE__LAST, +}; + +/** + * ccp_passthru_byteswap - type of byteswap passthru operation + * + * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed + * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words + * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words + */ +enum ccp_passthru_byteswap { + CCP_PASSTHRU_BYTESWAP_NOOP = 0, + CCP_PASSTHRU_BYTESWAP_32BIT, + CCP_PASSTHRU_BYTESWAP_256BIT, + CCP_PASSTHRU_BYTESWAP__LAST, +}; + +/** + * CCP passthru + */ +struct ccp_passthru { + phys_addr_t src_addr; + phys_addr_t dest_addr; + enum ccp_passthru_bitwise bit_mod; + enum ccp_passthru_byteswap byte_swap; + int len; + int dir; +}; + +/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t mode:5; + uint16_t type:2; + } aes; + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t mode:5; + uint16_t type:2; + } des; + struct { + uint16_t size:7; + uint16_t encrypt:1; + uint16_t rsvd:5; + uint16_t type:2; + } aes_xts; + struct { + uint16_t rsvd1:10; + uint16_t type:4; + uint16_t rsvd2:1; + } sha; + struct { + uint16_t mode:3; + uint16_t size:12; + } rsa; + struct { + uint16_t byteswap:2; + uint16_t bitwise:3; + uint16_t reflect:2; + uint16_t rsvd:8; + } pt; + struct { + uint16_t rsvd:13; + } zlib; + struct { + uint16_t size:10; + uint16_t type:2; + uint16_t mode:3; + } ecc; + uint16_t raw; +}; + + +/** + * descriptor for version 5 CPP commands + * 8 32-bit words: + * word 0: function; engine; control bits + * word 1: length of source data + * word 2: low 32 bits of source pointer + * word 3: upper 16 bits of source pointer; source memory type + * word 4: low 32 bits of destination pointer + * word 5: upper 16 bits of destination pointer; destination memory + * type + * word 6: low 32 bits of key pointer + * word 7: upper 16 bits of key pointer; key memory type + */ +struct dword0 { + uint32_t soc:1; + uint32_t ioc:1; + uint32_t rsvd1:1; + uint32_t init:1; + uint32_t eom:1; + uint32_t function:15; + uint32_t engine:4; + uint32_t prot:1; + uint32_t rsvd2:7; +}; + +struct dword3 { + uint32_t src_hi:16; + uint32_t src_mem:2; + uint32_t lsb_cxt_id:8; + uint32_t rsvd1:5; + uint32_t fixed:1; +}; + +union dword4 { + uint32_t dst_lo; /* NON-SHA */ + uint32_t sha_len_lo; /* SHA */ +}; + +union dword5 { + struct { + uint32_t dst_hi:16; + uint32_t dst_mem:2; + uint32_t rsvd1:13; + uint32_t fixed:1; + } + fields; + uint32_t sha_len_hi; +}; + +struct dword7 { + uint32_t key_hi:16; + uint32_t key_mem:2; + uint32_t rsvd1:14; +}; + +struct ccp_desc { + struct dword0 dw0; + uint32_t length; + uint32_t src_lo; + struct dword3 dw3; + union dword4 dw4; + union dword5 dw5; + uint32_t key_lo; + struct dword7 dw7; +}; + +/** + * ccp memory type + */ +enum ccp_memtype { + CCP_MEMTYPE_SYSTEM = 0, + CCP_MEMTYPE_SB, + CCP_MEMTYPE_LOCAL, + CCP_MEMTYPE_LAST, +}; + +/** + * cmd id to follow order + */ +enum ccp_cmd_order { + CCP_CMD_CIPHER = 0, + CCP_CMD_AUTH, + CCP_CMD_CIPHER_HASH, + CCP_CMD_HASH_CIPHER, + CCP_CMD_COMBINED, + CCP_CMD_NOT_SUPPORTED, +}; + +static inline uint32_t +low32_value(unsigned long addr) +{ + return ((uint64_t)addr) & 0x0ffffffff; +} + +static inline uint32_t +high32_value(unsigned long addr) +{ + return ((uint64_t)addr >> 32) & 0x00000ffff; +} + +/* + * Start CCP device + */ +int ccp_dev_start(struct rte_cryptodev *dev); + +/** + * Detect ccp platform and initialize all ccp devices + * + * @param ccp_id rte_pci_id list for supported CCP devices + * @return no. of successfully initialized CCP devices + */ +int ccp_probe_devices(const struct rte_pci_id *ccp_id); + +/** + * allocate a ccp command queue + * + * @dev rte crypto device + * @param slot_req number of required + * @return allotted CCP queue on success otherwise NULL + */ +struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req); + +/** + * read hwrng value + * + * @param trng_value data pointer to write RNG value + * @return 0 on success otherwise -1 + */ +int ccp_read_hwrng(uint32_t *trng_value); + +#endif /* _CCP_DEV_H_ */ diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c new file mode 100644 index 00000000..59152ca5 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.c @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include + +#include "ccp_pci.h" + +static const char * const uio_module_names[] = { + "igb_uio", + "uio_pci_generic", +}; + +int +ccp_check_pci_uio_module(void) +{ + FILE *fp; + int i; + char buf[BUFSIZ]; + + fp = fopen(PROC_MODULES, "r"); + if (fp == NULL) + return -1; + i = 0; + while (uio_module_names[i] != NULL) { + while (fgets(buf, sizeof(buf), fp) != NULL) { + if (!strncmp(buf, uio_module_names[i], + strlen(uio_module_names[i]))) + return i; + } + i++; + rewind(fp); + } + printf("Insert igb_uio or uio_pci_generic kernel module(s)"); + return -1;/* uio not inserted */ +} + +/* + * split up a pci address into its constituent parts. + */ +int +ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function) +{ + /* first split on ':' */ + union splitaddr { + struct { + char *domain; + char *bus; + char *devid; + char *function; + }; + char *str[PCI_FMT_NVAL]; + /* last element-separator is "." not ":" */ + } splitaddr; + + char *buf_copy = strndup(buf, bufsize); + + if (buf_copy == NULL) + return -1; + + if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':') + != PCI_FMT_NVAL - 1) + goto error; + /* final split is on '.' between devid and function */ + splitaddr.function = strchr(splitaddr.devid, '.'); + if (splitaddr.function == NULL) + goto error; + *splitaddr.function++ = '\0'; + + /* now convert to int values */ + errno = 0; + *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16); + *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16); + *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16); + *function = (uint8_t)strtoul(splitaddr.function, NULL, 10); + if (errno != 0) + goto error; + + free(buf_copy); /* free the copy made with strdup */ + return 0; +error: + free(buf_copy); + return -1; +} + +int +ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val) +{ + FILE *f; + char buf[BUFSIZ]; + char *end = NULL; + + f = fopen(filename, "r"); + if (f == NULL) + return -1; + if (fgets(buf, sizeof(buf), f) == NULL) { + fclose(f); + return -1; + } + *val = strtoul(buf, &end, 0); + if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) { + fclose(f); + return -1; + } + fclose(f); + return 0; +} + +/** IO resource type: */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 + +/* parse one line of the "resource" sysfs file (note that the 'line' + * string is modified) + */ +static int +ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr, + uint64_t *end_addr, uint64_t *flags) +{ + union pci_resource_info { + struct { + char *phys_addr; + char *end_addr; + char *flags; + }; + char *ptrs[PCI_RESOURCE_FMT_NVAL]; + } res_info; + + if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) + return -1; + errno = 0; + *phys_addr = strtoull(res_info.phys_addr, NULL, 16); + *end_addr = strtoull(res_info.end_addr, NULL, 16); + *flags = strtoull(res_info.flags, NULL, 16); + if (errno != 0) + return -1; + + return 0; +} + +/* parse the "resource" sysfs file */ +int +ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev) +{ + FILE *fp; + char buf[BUFSIZ]; + int i; + uint64_t phys_addr, end_addr, flags; + + fp = fopen(filename, "r"); + if (fp == NULL) + return -1; + + for (i = 0; i < PCI_MAX_RESOURCE; i++) { + if (fgets(buf, sizeof(buf), fp) == NULL) + goto error; + if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf), + &phys_addr, &end_addr, &flags) < 0) + goto error; + + if (flags & IORESOURCE_MEM) { + dev->mem_resource[i].phys_addr = phys_addr; + dev->mem_resource[i].len = end_addr - phys_addr + 1; + /* not mapped for now */ + dev->mem_resource[i].addr = NULL; + } + } + fclose(fp); + return 0; + +error: + fclose(fp); + return -1; +} + +int +ccp_find_uio_devname(const char *dirname) +{ + + DIR *dir; + struct dirent *e; + char dirname_uio[PATH_MAX]; + unsigned int uio_num; + int ret = -1; + + /* depending on kernel version, uio can be located in uio/uioX + * or uio:uioX + */ + snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname); + dir = opendir(dirname_uio); + if (dir == NULL) { + /* retry with the parent directory might be different kernel version*/ + dir = opendir(dirname); + if (dir == NULL) + return -1; + } + + /* take the first file starting with "uio" */ + while ((e = readdir(dir)) != NULL) { + /* format could be uio%d ...*/ + int shortprefix_len = sizeof("uio") - 1; + /* ... or uio:uio%d */ + int longprefix_len = sizeof("uio:uio") - 1; + char *endptr; + + if (strncmp(e->d_name, "uio", 3) != 0) + continue; + + /* first try uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + shortprefix_len)) { + ret = uio_num; + break; + } + + /* then try uio:uio%d */ + errno = 0; + uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10); + if (errno == 0 && endptr != (e->d_name + longprefix_len)) { + ret = uio_num; + break; + } + } + closedir(dir); + return ret; + + +} diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h new file mode 100644 index 00000000..7ed3bac4 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pci.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PCI_H_ +#define _CCP_PCI_H_ + +#include + +#include + +#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices" +#define PROC_MODULES "/proc/modules" + +int ccp_check_pci_uio_module(void); + +int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain, + uint8_t *bus, uint8_t *devid, uint8_t *function); + +int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val); + +int ccp_pci_parse_sysfs_resource(const char *filename, + struct rte_pci_device *dev); + +int ccp_find_uio_devname(const char *dirname); + +#endif /* _CCP_PCI_H_ */ diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c new file mode 100644 index 00000000..6984913f --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_ops.c @@ -0,0 +1,833 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include + +#include +#include +#include + +#include "ccp_pmd_private.h" +#include "ccp_dev.h" +#include "ccp_crypto.h" + +#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \ + { /* SHA1 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 20, \ + .max = 20, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 20, \ + .max = 20, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-224 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_224, \ + .block_size = 144, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-224 HMAC*/ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \ + .block_size = 144, \ + .key_size = { \ + .min = 1, \ + .max = 144, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 28, \ + .max = 28, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256, \ + .block_size = 64, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-256 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_256, \ + .block_size = 136, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-256-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \ + .block_size = 136, \ + .key_size = { \ + .min = 1, \ + .max = 136, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 32, \ + .max = 32, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384, \ + .block_size = 128, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-384 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_384, \ + .block_size = 104, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-384-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \ + .block_size = 104, \ + .key_size = { \ + .min = 1, \ + .max = 104, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 48, \ + .max = 48, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512, \ + .block_size = 128, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-512 */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_512, \ + .block_size = 72, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA3-512-HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \ + .block_size = 72, \ + .key_size = { \ + .min = 1, \ + .max = 72, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 64, \ + .max = 64, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /*AES-CMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_CMAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES ECB */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_ECB, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES GCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 65535, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 16, \ + .increment = 4 \ + }, \ + }, } \ + }, } \ + } + +#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \ + { /* MD5 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 } \ + }, } \ + }, } \ + } + +static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = { + CCP_BASE_SYM_CRYPTO_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = { + CCP_EXTRA_SYM_CRYPTO_CAPABILITIES, + CCP_BASE_SYM_CRYPTO_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static int +ccp_pmd_config(struct rte_cryptodev *dev __rte_unused, + struct rte_cryptodev_config *config __rte_unused) +{ + return 0; +} + +static int +ccp_pmd_start(struct rte_cryptodev *dev) +{ + return ccp_dev_start(dev); +} + +static void +ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused) +{ + +} + +static int +ccp_pmd_close(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +static void +ccp_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } + +} + +static void +ccp_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct ccp_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + +static void +ccp_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct ccp_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = ccp_crypto_cap; + if (internals->auth_opt == 1) + dev_info->capabilities = ccp_crypto_cap_complete; + dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; + } +} + +static int +ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct ccp_qp *qp; + + if (dev->data->queue_pairs[qp_id] != NULL) { + qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id]; + rte_ring_free(qp->processed_pkts); + rte_mempool_free(qp->batch_mp); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +static int +ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct ccp_qp *qp) +{ + unsigned int n = snprintf(qp->name, sizeof(qp->name), + "ccp_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n > sizeof(qp->name)) + return -1; + + return 0; +} + +static struct rte_ring * +ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp, + unsigned int ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (r->size >= ring_size) { + CCP_LOG_INFO( + "Reusing ring %s for processed packets", + qp->name); + return r; + } + CCP_LOG_INFO( + "Unable to reuse ring %s for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +static int +ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool) +{ + struct ccp_private *internals = dev->data->dev_private; + struct ccp_qp *qp; + int retval = 0; + + if (qp_id >= internals->max_nb_qpairs) { + CCP_LOG_ERR("Invalid qp_id %u, should be less than %u", + qp_id, internals->max_nb_qpairs); + return (-EINVAL); + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + ccp_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) { + CCP_LOG_ERR("Failed to allocate queue pair memory"); + return (-ENOMEM); + } + + qp->dev = dev; + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + retval = ccp_pmd_qp_set_unique_name(dev, qp); + if (retval) { + CCP_LOG_ERR("Failed to create unique name for ccp qp"); + goto qp_setup_cleanup; + } + + qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_pkts == NULL) { + CCP_LOG_ERR("Failed to create batch info ring"); + goto qp_setup_cleanup; + } + + qp->sess_mp = session_pool; + + /* mempool for batch info */ + qp->batch_mp = rte_mempool_create( + qp->name, + qp_conf->nb_descriptors, + sizeof(struct ccp_batch_info), + RTE_CACHE_LINE_SIZE, + 0, NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (qp->batch_mp == NULL) + goto qp_setup_cleanup; + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + return 0; + +qp_setup_cleanup: + dev->data->queue_pairs[qp_id] = NULL; + if (qp) + rte_free(qp); + return -1; +} + +static uint32_t +ccp_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +static unsigned +ccp_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct ccp_session); +} + +static int +ccp_pmd_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + int ret; + void *sess_private_data; + struct ccp_private *internals; + + if (unlikely(sess == NULL || xform == NULL)) { + CCP_LOG_ERR("Invalid session struct or xform"); + return -ENOMEM; + } + + if (rte_mempool_get(mempool, &sess_private_data)) { + CCP_LOG_ERR("Couldn't get object from session mempool"); + return -ENOMEM; + } + internals = (struct ccp_private *)dev->data->dev_private; + ret = ccp_set_session_parameters(sess_private_data, xform, internals); + if (ret != 0) { + CCP_LOG_ERR("failed configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + set_sym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +static void +ccp_pmd_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + + if (sess_priv) { + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + rte_mempool_put(sess_mp, sess_priv); + memset(sess_priv, 0, sizeof(struct ccp_session)); + set_sym_session_private_data(sess, index, NULL); + } +} + +struct rte_cryptodev_ops ccp_ops = { + .dev_configure = ccp_pmd_config, + .dev_start = ccp_pmd_start, + .dev_stop = ccp_pmd_stop, + .dev_close = ccp_pmd_close, + + .stats_get = ccp_pmd_stats_get, + .stats_reset = ccp_pmd_stats_reset, + + .dev_infos_get = ccp_pmd_info_get, + + .queue_pair_setup = ccp_pmd_qp_setup, + .queue_pair_release = ccp_pmd_qp_release, + .queue_pair_count = ccp_pmd_qp_count, + + .sym_session_get_size = ccp_pmd_sym_session_get_size, + .sym_session_configure = ccp_pmd_sym_session_configure, + .sym_session_clear = ccp_pmd_sym_session_clear, +}; + +struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops; diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h new file mode 100644 index 00000000..79752f68 --- /dev/null +++ b/drivers/crypto/ccp/ccp_pmd_private.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#ifndef _CCP_PMD_PRIVATE_H_ +#define _CCP_PMD_PRIVATE_H_ + +#include +#include "ccp_crypto.h" + +#define CRYPTODEV_NAME_CCP_PMD crypto_ccp + +#define CCP_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_CCP_DEBUG +#define CCP_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) + +#define CCP_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_CCP_PMD), \ + __func__, __LINE__, ## args) +#else +#define CCP_LOG_INFO(fmt, args...) +#define CCP_LOG_DBG(fmt, args...) +#endif + +/**< Maximum queue pairs supported by CCP PMD */ +#define CCP_PMD_MAX_QUEUE_PAIRS 1 +#define CCP_NB_MAX_DESCRIPTORS 1024 +#define CCP_MAX_BURST 64 + +#include "ccp_dev.h" + +/* private data structure for each CCP crypto device */ +struct ccp_private { + unsigned int max_nb_qpairs; /**< Max number of queue pairs */ + uint8_t crypto_num_dev; /**< Number of working crypto devices */ + bool auth_opt; /**< Authentication offload option */ + struct ccp_device *last_dev; /**< Last working crypto device */ +}; + +/* CCP batch info */ +struct ccp_batch_info { + struct rte_crypto_op *op[CCP_MAX_BURST]; + /**< optable populated at enque time from app*/ + int op_idx; + struct ccp_queue *cmd_q; + uint16_t opcnt; + /**< no. of crypto ops in batch*/ + int desccnt; + /**< no. of ccp queue descriptors*/ + uint32_t head_offset; + /**< ccp queue head tail offsets time of enqueue*/ + uint32_t tail_offset; + uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST]; + phys_addr_t lsb_buf_phys; + /**< LSB intermediate buf for passthru */ + int lsb_buf_idx; + uint16_t auth_ctr; + /**< auth only ops batch for CPU based auth */ +} __rte_cache_aligned; + +/**< CCP crypto queue pair */ +struct ccp_qp { + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + /**< Unique Queue Pair Name */ + struct rte_ring *processed_pkts; + /**< Ring for placing process packets */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_mempool *batch_mp; + /**< Session Mempool for batch info */ + struct rte_cryptodev_stats qp_stats; + /**< Queue pair statistics */ + struct ccp_batch_info *b_info; + /**< Store ops pulled out of queue */ + struct rte_cryptodev *dev; + /**< rte crypto device to which this qp belongs */ + uint8_t temp_digest[DIGEST_LENGTH_MAX]; + /**< Buffer used to store the digest generated + * by the driver when verifying a digest provided + * by the user (using authentication verify operation) + */ +} __rte_cache_aligned; + + +/**< device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *ccp_pmd_ops; + +uint16_t +ccp_cpu_pmd_enqueue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); +uint16_t +ccp_cpu_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops); + +#endif /* _CCP_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build new file mode 100644 index 00000000..e43b0059 --- /dev/null +++ b/drivers/crypto/ccp/meson.build @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + +if host_machine.system() != 'linux' + build = false +endif +dep = dependency('libcrypto', required: false) +if not dep.found() + build = false +endif +deps += 'bus_vdev' +deps += 'bus_pci' + +sources = files('rte_ccp_pmd.c', + 'ccp_crypto.c', + 'ccp_dev.c', + 'ccp_pci.c', + 'ccp_pmd_ops.c') + +ext_deps += dep +pkgconfig_extra_libs += '-lcrypto' diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c new file mode 100644 index 00000000..92d8a955 --- /dev/null +++ b/drivers/crypto/ccp/rte_ccp_pmd.c @@ -0,0 +1,397 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp_crypto.h" +#include "ccp_dev.h" +#include "ccp_pmd_private.h" + +/** + * Global static parameter used to find if CCP device is already initialized. + */ +static unsigned int ccp_pmd_init_done; +uint8_t ccp_cryptodev_driver_id; + +struct ccp_pmd_init_params { + struct rte_cryptodev_pmd_init_params def_p; + bool auth_opt; +}; + +#define CCP_CRYPTODEV_PARAM_NAME ("name") +#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id") +#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs") +#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt") + +const char *ccp_pmd_valid_params[] = { + CCP_CRYPTODEV_PARAM_NAME, + CCP_CRYPTODEV_PARAM_SOCKET_ID, + CCP_CRYPTODEV_PARAM_MAX_NB_QP, + CCP_CRYPTODEV_PARAM_AUTH_OPT, +}; + +/** ccp pmd auth option */ +enum ccp_pmd_auth_opt { + CCP_PMD_AUTH_OPT_CCP = 0, + CCP_PMD_AUTH_OPT_CPU, +}; + +/** parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *) extra_args; + + *i = atoi(value); + if (*i < 0) { + CCP_LOG_ERR("Argument has to be positive.\n"); + return -EINVAL; + } + + return 0; +} + +/** parse name argument */ +static int +parse_name_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct rte_cryptodev_pmd_init_params *params = extra_args; + + if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + CCP_LOG_ERR("Invalid name %s, should be less than " + "%u bytes.\n", value, + RTE_CRYPTODEV_NAME_MAX_LEN - 1); + return -EINVAL; + } + + strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + + return 0; +} + +/** parse authentication operation option */ +static int +parse_auth_opt_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct ccp_pmd_init_params *params = extra_args; + int i; + + i = atoi(value); + if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) { + CCP_LOG_ERR("Invalid ccp pmd auth option. " + "0->auth on CCP(default), " + "1->auth on CPU\n"); + return -EINVAL; + } + params->auth_opt = i; + return 0; +} + +static int +ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + ccp_pmd_valid_params); + if (kvlist == NULL) + return -1; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_MAX_NB_QP, + &parse_integer_arg, + ¶ms->def_p.max_nb_queue_pairs); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_SOCKET_ID, + &parse_integer_arg, + ¶ms->def_p.socket_id); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_NAME, + &parse_name_arg, + ¶ms->def_p); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + CCP_CRYPTODEV_PARAM_AUTH_OPT, + &parse_auth_opt_arg, + params); + if (ret < 0) + goto free_kvlist; + + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +static struct ccp_session * +get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op) +{ + struct ccp_session *sess = NULL; + + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { + if (unlikely(op->sym->session == NULL)) + return NULL; + + sess = (struct ccp_session *) + get_sym_session_private_data( + op->sym->session, + ccp_cryptodev_driver_id); + } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { + void *_sess; + void *_sess_private_data = NULL; + struct ccp_private *internals; + + if (rte_mempool_get(qp->sess_mp, &_sess)) + return NULL; + if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data)) + return NULL; + + sess = (struct ccp_session *)_sess_private_data; + + internals = (struct ccp_private *)qp->dev->data->dev_private; + if (unlikely(ccp_set_session_parameters(sess, op->sym->xform, + internals) != 0)) { + rte_mempool_put(qp->sess_mp, _sess); + rte_mempool_put(qp->sess_mp, _sess_private_data); + sess = NULL; + } + op->sym->session = (struct rte_cryptodev_sym_session *)_sess; + set_sym_session_private_data(op->sym->session, + ccp_cryptodev_driver_id, + _sess_private_data); + } + + return sess; +} + +static uint16_t +ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ccp_session *sess = NULL; + struct ccp_qp *qp = queue_pair; + struct ccp_queue *cmd_q; + struct rte_cryptodev *dev = qp->dev; + uint16_t i, enq_cnt = 0, slots_req = 0; + + if (nb_ops == 0) + return 0; + + if (unlikely(rte_ring_full(qp->processed_pkts) != 0)) + return 0; + + for (i = 0; i < nb_ops; i++) { + sess = get_ccp_session(qp, ops[i]); + if (unlikely(sess == NULL) && (i == 0)) { + qp->qp_stats.enqueue_err_count++; + return 0; + } else if (sess == NULL) { + nb_ops = i; + break; + } + slots_req += ccp_compute_slot_count(sess); + } + + cmd_q = ccp_allot_queue(dev, slots_req); + if (unlikely(cmd_q == NULL)) + return 0; + + enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req); + qp->qp_stats.enqueued_count += enq_cnt; + return enq_cnt; +} + +static uint16_t +ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct ccp_qp *qp = queue_pair; + uint16_t nb_dequeued = 0, i; + + nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops); + + /* Free session if a session-less crypto op */ + for (i = 0; i < nb_dequeued; i++) + if (unlikely(ops[i]->sess_type == + RTE_CRYPTO_OP_SESSIONLESS)) { + rte_mempool_put(qp->sess_mp, + ops[i]->sym->session); + ops[i]->sym->session = NULL; + } + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +/* + * The set of PCI devices this driver supports + */ +static struct rte_pci_id ccp_pci_id[] = { + { + RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */ + }, + { + RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */ + }, + {.device_id = 0}, +}; + +/** Remove ccp pmd */ +static int +cryptodev_ccp_remove(struct rte_vdev_device *dev) +{ + const char *name; + + ccp_pmd_init_done = 0; + name = rte_vdev_device_name(dev); + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n", + name, rte_socket_id()); + + return 0; +} + +/** Create crypto device */ +static int +cryptodev_ccp_create(const char *name, + struct rte_vdev_device *vdev, + struct ccp_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct ccp_private *internals; + uint8_t cryptodev_cnt = 0; + + if (init_params->def_p.name[0] == '\0') + snprintf(init_params->def_p.name, + sizeof(init_params->def_p.name), + "%s", name); + + dev = rte_cryptodev_pmd_create(init_params->def_p.name, + &vdev->device, + &init_params->def_p); + if (dev == NULL) { + CCP_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + cryptodev_cnt = ccp_probe_devices(ccp_pci_id); + + if (cryptodev_cnt == 0) { + CCP_LOG_ERR("failed to detect CCP crypto device"); + goto init_error; + } + + printf("CCP : Crypto device count = %d\n", cryptodev_cnt); + dev->driver_id = ccp_cryptodev_driver_id; + + /* register rx/tx burst functions for data path */ + dev->dev_ops = ccp_pmd_ops; + dev->enqueue_burst = ccp_pmd_enqueue_burst; + dev->dequeue_burst = ccp_pmd_dequeue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + + internals = dev->data->dev_private; + + internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs; + internals->auth_opt = init_params->auth_opt; + internals->crypto_num_dev = cryptodev_cnt; + + return 0; + +init_error: + CCP_LOG_ERR("driver %s: %s() failed", + init_params->def_p.name, __func__); + cryptodev_ccp_remove(vdev); + + return -EFAULT; +} + +/** Probe ccp pmd */ +static int +cryptodev_ccp_probe(struct rte_vdev_device *vdev) +{ + int rc = 0; + const char *name; + struct ccp_pmd_init_params init_params = { + .def_p = { + "", + sizeof(struct ccp_private), + rte_socket_id(), + CCP_PMD_MAX_QUEUE_PAIRS + }, + .auth_opt = CCP_PMD_AUTH_OPT_CCP, + }; + const char *input_args; + + if (ccp_pmd_init_done) { + RTE_LOG(INFO, PMD, "CCP PMD already initialized\n"); + return -EFAULT; + } + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + + input_args = rte_vdev_device_args(vdev); + ccp_pmd_parse_input_args(&init_params, input_args); + init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS; + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.def_p.socket_id); + RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n", + init_params.def_p.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, "Authentication offload to %s\n", + ((init_params.auth_opt == 0) ? "CCP" : "CPU")); + + rc = cryptodev_ccp_create(name, vdev, &init_params); + if (rc) + return rc; + ccp_pmd_init_done = 1; + return 0; +} + +static struct rte_vdev_driver cryptodev_ccp_pmd_drv = { + .probe = cryptodev_ccp_probe, + .remove = cryptodev_ccp_remove +}; + +static struct cryptodev_driver ccp_crypto_drv; + +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD, + "max_nb_queue_pairs= " + "socket_id= " + "ccp_auth_opt="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver, + ccp_cryptodev_driver_id); diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map new file mode 100644 index 00000000..9b9ab1a4 --- /dev/null +++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map @@ -0,0 +1,4 @@ +DPDK_18.05 { + + local: *; +}; diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile index cb6c63e6..da3d8f84 100644 --- a/drivers/crypto/dpaa2_sec/Makefile +++ b/drivers/crypto/dpaa2_sec/Makefile @@ -18,13 +18,8 @@ LIB = librte_pmd_dpaa2_sec.a # build flags CFLAGS += -DALLOW_EXPERIMENTAL_API -ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -D _GNU_SOURCE ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c index 9a790ddd..2a3c61c6 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c @@ -30,6 +30,9 @@ #include "dpaa2_sec_priv.h" #include "dpaa2_sec_logs.h" +/* Required types */ +typedef uint64_t dma_addr_t; + /* RTA header files */ #include #include @@ -56,6 +59,8 @@ enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8; static uint8_t cryptodev_driver_id; +int dpaa2_logtype_sec; + static inline int build_proto_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, @@ -77,11 +82,11 @@ build_proto_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src)); DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off); DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len); - DPAA2_SET_FD_FLC(fd, ((uint64_t)flc)); + DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc); /* save physical address of mbuf */ op->sym->aead.digest.phys_addr = mbuf->buf_iova; - mbuf->buf_iova = (uint64_t)op; + mbuf->buf_iova = (size_t)op; return 0; } @@ -113,12 +118,12 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -132,11 +137,11 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" + DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n" "iv-len=%d data_off: 0x%x\n", sym_op->aead.data.offset, sym_op->aead.data.length, - sym_op->aead.digest.length, + sess->digest_length, sess->iv.length, sym_op->m_src->data_off); @@ -264,12 +269,12 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, */ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; if (likely(bpid < MAX_BPID)) { @@ -297,11 +302,11 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n" + DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n" "iv-len=%d data_off: 0x%x\n", sym_op->aead.data.offset, sym_op->aead.data.length, - sym_op->aead.digest.length, + sess->digest_length, sess->iv.length, sym_op->m_src->data_off); @@ -409,12 +414,12 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -428,16 +433,16 @@ build_authenc_sg_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" - "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", - sym_op->auth.data.offset, - sym_op->auth.data.length, - sym_op->auth.digest.length, - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sym_op->cipher.iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n" + "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", + sym_op->auth.data.offset, + sym_op->auth.data.length, + sess->digest_length, + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* Configure Output FLE with Scatter/Gather Entry */ DPAA2_SET_FLE_SG_EXT(op_fle); @@ -558,12 +563,12 @@ build_authenc_fd(dpaa2_sec_session *sess, */ retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; if (likely(bpid < MAX_BPID)) { @@ -591,15 +596,16 @@ build_authenc_fd(dpaa2_sec_session *sess, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" - "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", - sym_op->auth.data.offset, - sym_op->auth.data.length, - sess->digest_length, - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sess->iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n" + "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n", + sym_op->auth.data.offset, + sym_op->auth.data.length, + sess->digest_length, + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* Configure Output FLE with Scatter/Gather Entry */ DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge)); @@ -686,13 +692,13 @@ static inline int build_auth_sg_fd( fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (unlikely(!fle)) { - RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); /* first FLE entry used to store mbuf and session ctxt */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; sge = fle + 3; @@ -762,7 +768,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); @@ -772,8 +778,8 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, * to get the MBUF Addr from the previous FLE. * We can have a better approach to use the inline Mbuf */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; if (likely(bpid < MAX_BPID)) { @@ -859,13 +865,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE, RTE_CACHE_LINE_SIZE); if (!fle) { - RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_SG_MEM_SIZE); /* first FLE entry used to store mbuf and session ctxt */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); op_fle = fle + 1; ip_fle = fle + 2; @@ -873,12 +879,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, flc = &priv->flc_desc[0].flc; - PMD_TX_LOG(DEBUG, - "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x", - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sym_op->cipher.iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d" + " data_off: 0x%x\n", + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); /* o/p fle */ DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge)); @@ -901,10 +908,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, } DPAA2_SET_FLE_FIN(sge); - PMD_TX_LOG(DEBUG, - "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", - flc, fle, fle->addr_hi, fle->addr_lo, - fle->length); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); /* i/p fle */ mbuf = sym_op->m_src; @@ -944,13 +951,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[bpid].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG( + "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d" + " off =%d, len =%d\n", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); return 0; } @@ -976,7 +984,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, retval = rte_mempool_get(priv->fle_pool, (void **)(&fle)); if (retval) { - RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n"); + DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE"); return -1; } memset(fle, 0, FLE_POOL_BUF_SIZE); @@ -986,8 +994,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, * to get the MBUF Addr from the previous FLE. * We can have a better approach to use the inline Mbuf */ - DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op)); - DPAA2_FLE_SAVE_CTXT(fle, priv); + DPAA2_SET_FLE_ADDR(fle, (size_t)op); + DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv); fle = fle + 1; sge = fle + 2; @@ -1012,12 +1020,13 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FD_COMPOUND_FMT(fd); DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc)); - PMD_TX_LOG(DEBUG, - "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x", - sym_op->cipher.data.offset, - sym_op->cipher.data.length, - sess->iv.length, - sym_op->m_src->data_off); + DPAA2_SEC_DP_DEBUG( + "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d," + " data_off: 0x%x\n", + sym_op->cipher.data.offset, + sym_op->cipher.data.length, + sess->iv.length, + sym_op->m_src->data_off); DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst)); DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset + @@ -1025,10 +1034,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, fle->length = sym_op->cipher.data.length + sess->iv.length; - PMD_TX_LOG(DEBUG, - "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d", - flc, fle, fle->addr_hi, fle->addr_lo, - fle->length); + DPAA2_SEC_DP_DEBUG( + "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n", + flc, fle, fle->addr_hi, fle->addr_lo, + fle->length); fle++; @@ -1049,13 +1058,14 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op, DPAA2_SET_FLE_FIN(sge); DPAA2_SET_FLE_FIN(fle); - PMD_TX_LOG(DEBUG, - "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[bpid].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG( + "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d" + " off =%d, len =%d\n", + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[bpid].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); return 0; } @@ -1070,7 +1080,7 @@ build_sec_fd(struct rte_crypto_op *op, PMD_INIT_FUNC_TRACE(); if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) - sess = (dpaa2_sec_session *)get_session_private_data( + sess = (dpaa2_sec_session *)get_sym_session_private_data( op->sym->session, cryptodev_driver_id); else if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) sess = (dpaa2_sec_session *)get_sec_session_private_data( @@ -1095,7 +1105,7 @@ build_sec_fd(struct rte_crypto_op *op, break; case DPAA2_SEC_HASH_CIPHER: default: - RTE_LOG(ERR, PMD, "error: Unsupported session\n"); + DPAA2_SEC_ERR("error: Unsupported session"); } } else { switch (sess->ctxt_type) { @@ -1116,7 +1126,7 @@ build_sec_fd(struct rte_crypto_op *op, break; case DPAA2_SEC_HASH_CIPHER: default: - RTE_LOG(ERR, PMD, "error: Unsupported session\n"); + DPAA2_SEC_ERR("error: Unsupported session"); } } return ret; @@ -1143,7 +1153,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, return 0; if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n"); + DPAA2_SEC_ERR("sessionless crypto op not supported"); return 0; } /*Prepare enqueue descriptor*/ @@ -1152,14 +1162,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, qbman_eq_desc_set_response(&eqdesc, 0, 0); qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid); - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; while (nb_ops) { frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops; @@ -1171,8 +1181,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, bpid = mempool_to_bpid(mb_pool); ret = build_sec_fd(*ops, &fd_arr[loop], bpid); if (ret) { - PMD_DRV_LOG(ERR, "error: Improper packet" - " contents for crypto operation\n"); + DPAA2_SEC_ERR("error: Improper packet contents" + " for crypto operation"); goto skip_tx; } ops++; @@ -1206,7 +1216,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id) DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)), rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size); - op = (struct rte_crypto_op *)mbuf->buf_iova; + op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova; mbuf->buf_iova = op->sym->aead.digest.phys_addr; op->sym->aead.digest.phys_addr = 0L; @@ -1236,8 +1246,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)); - PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x", - fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); + DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n", + fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset); /* we are using the first FLE entry to store Mbuf. * Currently we donot know which FLE has the mbuf stored. @@ -1248,11 +1258,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) if (unlikely(DPAA2_GET_FD_IVP(fd))) { /* TODO complete it. */ - RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n"); + DPAA2_SEC_ERR("error: non inline buffer"); return NULL; } - op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR( - DPAA2_GET_FLE_ADDR((fle - 1))); + op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1)); /* Prefeth op */ src = op->sym->m_src; @@ -1264,19 +1273,19 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id) } else dst = src; - PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p", - (void *)dst, dst->buf_addr); - - PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d", - (void *)DPAA2_GET_FD_ADDR(fd), - DPAA2_GET_FD_BPID(fd), - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, - DPAA2_GET_FD_OFFSET(fd), - DPAA2_GET_FD_LEN(fd)); + DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p," + " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n", + (void *)dst, + dst->buf_addr, + DPAA2_GET_FD_ADDR(fd), + DPAA2_GET_FD_BPID(fd), + rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size, + DPAA2_GET_FD_OFFSET(fd), + DPAA2_GET_FD_LEN(fd)); /* free the fle memory */ if (likely(rte_pktmbuf_is_contiguous(src))) { - priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1); + priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1); rte_mempool_put(priv->fle_pool, (void *)(fle-1)); } else rte_free((void *)(fle-1)); @@ -1300,14 +1309,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, const struct qbman_fd *fd; struct qbman_pull_desc pulldesc; - if (!DPAA2_PER_LCORE_SEC_DPIO) { - ret = dpaa2_affine_qbman_swp_sec(); + if (!DPAA2_PER_LCORE_DPIO) { + ret = dpaa2_affine_qbman_swp(); if (ret) { - RTE_LOG(ERR, PMD, "Failure in affining portal\n"); + DPAA2_SEC_ERR("Failure in affining portal"); return 0; } } - swp = DPAA2_PER_LCORE_SEC_PORTAL; + swp = DPAA2_PER_LCORE_PORTAL; dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0]; qbman_pull_desc_clear(&pulldesc); @@ -1322,8 +1331,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, /*Issue a volatile dequeue command. */ while (1) { if (qbman_swp_pull(swp, &pulldesc)) { - RTE_LOG(WARNING, PMD, - "SEC VDQ command is not issued : QBMAN busy\n"); + DPAA2_SEC_WARN( + "SEC VDQ command is not issued : QBMAN busy"); /* Portal was busy, try again */ continue; } @@ -1355,7 +1364,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, status = (uint8_t)qbman_result_DQ_flags(dq_storage); if (unlikely( (status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) { - PMD_RX_LOG(DEBUG, "No frame is delivered"); + DPAA2_SEC_DP_DEBUG("No frame is delivered\n"); continue; } } @@ -1365,8 +1374,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, if (unlikely(fd->simple.frc)) { /* TODO Parse SEC errors */ - RTE_LOG(ERR, PMD, "SEC returned Error - %x\n", - fd->simple.frc); + DPAA2_SEC_ERR("SEC returned Error - %x", + fd->simple.frc); ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR; } else { ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; @@ -1378,7 +1387,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa2_qp->rx_vq.rx_pkts += num_rx; - PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx); + DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); /*Return the total number of packets received to DPAA2 app*/ return num_rx; } @@ -1420,11 +1429,11 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, /* If qp is already in use free ring memory and qp metadata. */ if (dev->data->queue_pairs[qp_id] != NULL) { - PMD_DRV_LOG(INFO, "QP already setup"); + DPAA2_SEC_INFO("QP already setup"); return 0; } - PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", + DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg)); @@ -1432,7 +1441,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp), RTE_CACHE_LINE_SIZE); if (!qp) { - RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n"); + DPAA2_SEC_ERR("malloc failed for rx/tx queues"); return -1; } @@ -1442,45 +1451,25 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, sizeof(struct queue_storage_info_t), RTE_CACHE_LINE_SIZE); if (!qp->rx_vq.q_storage) { - RTE_LOG(ERR, PMD, "malloc failed for q_storage\n"); + DPAA2_SEC_ERR("malloc failed for q_storage"); return -1; } memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t)); if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) { - RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n"); + DPAA2_SEC_ERR("Unable to allocate dequeue storage"); return -1; } dev->data->queue_pairs[qp_id] = qp; cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX; - cfg.user_ctx = (uint64_t)(&qp->rx_vq); + cfg.user_ctx = (size_t)(&qp->rx_vq); retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token, qp_id, &cfg); return retcode; } -/** Start queue pair */ -static int -dpaa2_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - -/** Stop queue pair */ -static int -dpaa2_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - /** Return the number of allocated queue pairs */ static uint32_t dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) @@ -1492,7 +1481,7 @@ dpaa2_sec_queue_pair_count(struct rte_cryptodev *dev) /** Returns the size of the aesni gcm session structure */ static unsigned int -dpaa2_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) +dpaa2_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -1517,7 +1506,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1528,7 +1517,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -1; } @@ -1536,7 +1525,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, memcpy(session->cipher_key.data, xform->cipher.key.data, xform->cipher.key.length); - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -1571,11 +1560,11 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: case RTE_CRYPTO_CIPHER_ZUC_EEA3: case RTE_CRYPTO_CIPHER_NULL: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", xform->cipher.algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", xform->cipher.algo); goto error_out; } @@ -1586,7 +1575,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, &cipherdata, NULL, session->iv.length, session->dir); if (bufsize < 0) { - RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n"); + DPAA2_SEC_ERR("Crypto: Descriptor build failed"); goto error_out; } flc->dhr = 0; @@ -1595,16 +1584,15 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev, flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", - i, priv->flc_desc[0].desc[i]); + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); return 0; @@ -1621,7 +1609,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, { struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo authdata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; @@ -1633,7 +1621,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1643,7 +1631,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("Unable to allocate memory for auth key"); rte_free(priv); return -1; } @@ -1651,7 +1639,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, memcpy(session->auth_key.data, xform->auth.key.data, xform->auth.key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -1703,12 +1691,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - xform->auth.algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un", + xform->auth.algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - xform->auth.algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + xform->auth.algo); goto error_out; } session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? @@ -1717,18 +1705,22 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev, bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc, 1, 0, &authdata, !session->dir, session->digest_length); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", - i, priv->flc_desc[DESC_INITFINAL].desc[i]); + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", + i, priv->flc_desc[DESC_INITFINAL].desc[i]); return 0; @@ -1747,7 +1739,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo aeaddata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; struct rte_crypto_aead_xform *aead_xform = &xform->aead; @@ -1765,7 +1757,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1775,7 +1767,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && aead_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for aead key\n"); + DPAA2_SEC_ERR("No Memory for aead key"); rte_free(priv); return -1; } @@ -1786,7 +1778,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_key.length = aead_xform->key.length; ctxt->auth_only_len = aead_xform->aad_length; - aeaddata.key = (uint64_t)session->aead_key.data; + aeaddata.key = (size_t)session->aead_key.data; aeaddata.keylen = session->aead_key.length; aeaddata.key_enc_flags = 0; aeaddata.key_type = RTA_DATA_IMM; @@ -1798,12 +1790,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM; break; case RTE_CRYPTO_AEAD_AES_CCM: - RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n", - aead_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u", + aead_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n", - aead_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u", + aead_xform->algo); goto error_out; } session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? @@ -1816,7 +1808,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, &priv->flc_desc[0].desc[1], 1); if (err < 0) { - PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); + DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); goto error_out; } if (priv->flc_desc[0].desc[1] & 1) { @@ -1838,16 +1830,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev, priv->flc_desc[0].desc, 1, 0, &aeaddata, session->iv.length, session->digest_length); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } + flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", + DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n", i, priv->flc_desc[0].desc[i]); return 0; @@ -1867,7 +1864,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt; struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private; struct alginfo authdata, cipherdata; - unsigned int bufsize, i; + int bufsize, i; struct ctxt_priv *priv; struct sec_flow_context *flc; struct rte_crypto_cipher_xform *cipher_xform; @@ -1899,7 +1896,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc), RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n"); + DPAA2_SEC_ERR("No Memory for priv CTXT"); return -1; } @@ -1909,7 +1906,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -1; } @@ -1917,7 +1914,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); rte_free(priv); return -1; @@ -1928,7 +1925,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, memcpy(session->auth_key.data, auth_xform->key.data, auth_xform->key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -1980,15 +1977,15 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_AES_CMAC: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + auth_xform->algo); goto error_out; } - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -2014,12 +2011,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); goto error_out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", + cipher_xform->algo); goto error_out; } session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? @@ -2033,7 +2030,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, &priv->flc_desc[0].desc[2], 2); if (err < 0) { - PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n"); + DPAA2_SEC_ERR("Crypto: Incorrect key lengths"); goto error_out; } if (priv->flc_desc[0].desc[2] & 1) { @@ -2059,21 +2056,25 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev, ctxt->auth_only_len, session->digest_length, session->dir); + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto error_out; + } } else { - RTE_LOG(ERR, PMD, "Hash before cipher not supported\n"); + DPAA2_SEC_ERR("Hash before cipher not supported"); goto error_out; } flc->word1_sdl = (uint8_t)bufsize; flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); session->ctxt = priv; for (i = 0; i < bufsize; i++) - PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n", + DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]); return 0; @@ -2094,7 +2095,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (unlikely(sess == NULL)) { - RTE_LOG(ERR, PMD, "invalid session struct\n"); + DPAA2_SEC_ERR("Invalid session struct"); return -1; } @@ -2130,7 +2131,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa2_sec_aead_init(dev, xform, session); } else { - RTE_LOG(ERR, PMD, "Invalid crypto type\n"); + DPAA2_SEC_ERR("Invalid crypto type"); return -EINVAL; } @@ -2150,7 +2151,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, struct ipsec_encap_pdb encap_pdb; struct ipsec_decap_pdb decap_pdb; struct alginfo authdata, cipherdata; - unsigned int bufsize; + int bufsize; struct sec_flow_context *flc; PMD_INIT_FUNC_TRACE(); @@ -2168,7 +2169,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (priv == NULL) { - RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT"); + DPAA2_SEC_ERR("No memory for priv CTXT"); return -ENOMEM; } @@ -2180,7 +2181,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA2_SEC_ERR("No Memory for cipher key"); rte_free(priv); return -ENOMEM; } @@ -2191,7 +2192,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA2_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); rte_free(priv); return -ENOMEM; @@ -2202,7 +2203,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, memcpy(session->auth_key.data, auth_xform->key.data, auth_xform->key.length); - authdata.key = (uint64_t)session->auth_key.data; + authdata.key = (size_t)session->auth_key.data; authdata.keylen = session->auth_key.length; authdata.key_enc_flags = 0; authdata.key_type = RTA_DATA_IMM; @@ -2253,15 +2254,15 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u", + auth_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", - auth_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u", + auth_xform->algo); goto out; } - cipherdata.key = (uint64_t)session->cipher_key.data; + cipherdata.key = (size_t)session->cipher_key.data; cipherdata.keylen = session->cipher_key.length; cipherdata.key_enc_flags = 0; cipherdata.key_type = RTA_DATA_IMM; @@ -2289,12 +2290,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u", + cipher_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); + DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u", + cipher_xform->algo); goto out; } @@ -2340,15 +2341,21 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev, 1, 0, &decap_pdb, &cipherdata, &authdata); } else goto out; + + if (bufsize < 0) { + DPAA2_SEC_ERR("Crypto: Invalid buffer length"); + goto out; + } + flc->word1_sdl = (uint8_t)bufsize; /* Enable the stashing control bit */ DPAA2_SET_FLC_RSC(flc); flc->word2_rflc_31_0 = lower_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq) | 0x14); flc->word3_rflc_63_32 = upper_32_bits( - (uint64_t)&(((struct dpaa2_sec_qp *) + (size_t)&(((struct dpaa2_sec_qp *) dev->data->queue_pairs[0])->rx_vq)); /* Set EWS bit i.e. enable write-safe */ @@ -2379,8 +2386,7 @@ dpaa2_sec_security_session_create(void *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA2_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } @@ -2395,9 +2401,7 @@ dpaa2_sec_security_session_create(void *dev, return -EINVAL; } if (ret != 0) { - PMD_DRV_LOG(ERR, - "DPAA2 PMD: failed to configure session parameters"); - + DPAA2_SEC_ERR("Failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; @@ -2432,7 +2436,7 @@ dpaa2_sec_security_session_destroy(void *dev __rte_unused, } static int -dpaa2_sec_session_configure(struct rte_cryptodev *dev, +dpaa2_sec_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -2441,22 +2445,19 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA2_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data); if (ret != 0) { - PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure " - "session parameters"); - + DPAA2_SEC_ERR("Failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -2464,12 +2465,12 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -dpaa2_sec_session_clear(struct rte_cryptodev *dev, +dpaa2_sec_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { PMD_INIT_FUNC_TRACE(); uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); dpaa2_sec_session *s = (dpaa2_sec_session *)sess_priv; if (sess_priv) { @@ -2478,7 +2479,7 @@ dpaa2_sec_session_clear(struct rte_cryptodev *dev, rte_free(s->auth_key.data); memset(sess, 0, sizeof(dpaa2_sec_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -2511,14 +2512,13 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev) ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n", - priv->hw_id); + DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED", + priv->hw_id); goto get_attr_failure; } ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr); if (ret) { - PMD_INIT_LOG(ERR, - "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n"); + DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC"); goto get_attr_failure; } for (i = 0; i < attr.num_rx_queues && qp[i]; i++) { @@ -2526,14 +2526,14 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev) dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i, &rx_attr); dpaa2_q->fqid = rx_attr.fqid; - PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid); + DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid); } for (i = 0; i < attr.num_tx_queues && qp[i]; i++) { dpaa2_q = &qp[i]->tx_vq; dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i, &tx_attr); dpaa2_q->fqid = tx_attr.fqid; - PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid); + DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid); } return 0; @@ -2553,15 +2553,14 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev) ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device", + DPAA2_SEC_ERR("Failure in disabling dpseci %d device", priv->hw_id); return; } ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token); if (ret < 0) { - PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n", - ret); + DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret); return; } } @@ -2585,8 +2584,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev) /*Close the device at underlying layer*/ ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token); if (ret) { - PMD_INIT_LOG(ERR, "Failure closing dpseci device with" - " error code %d\n", ret); + DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret); return -1; } @@ -2608,7 +2606,8 @@ dpaa2_sec_dev_infos_get(struct rte_cryptodev *dev, info->max_nb_queue_pairs = internals->max_nb_queue_pairs; info->feature_flags = dev->feature_flags; info->capabilities = dpaa2_sec_capabilities; - info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; info->driver_id = cryptodev_driver_id; } } @@ -2626,12 +2625,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (stats == NULL) { - PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); + DPAA2_SEC_ERR("Invalid stats ptr NULL"); return; } for (i = 0; i < dev->data->nb_queue_pairs; i++) { if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); + DPAA2_SEC_DEBUG("Uninitialised queue pair"); continue; } @@ -2644,16 +2643,16 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev, ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token, &counters); if (ret) { - PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n"); + DPAA2_SEC_ERR("SEC counters failed"); } else { - PMD_DRV_LOG(INFO, "dpseci hw stats:" - "\n\tNumber of Requests Dequeued = %lu" - "\n\tNumber of Outbound Encrypt Requests = %lu" - "\n\tNumber of Inbound Decrypt Requests = %lu" - "\n\tNumber of Outbound Bytes Encrypted = %lu" - "\n\tNumber of Outbound Bytes Protected = %lu" - "\n\tNumber of Inbound Bytes Decrypted = %lu" - "\n\tNumber of Inbound Bytes Validated = %lu", + DPAA2_SEC_INFO("dpseci hardware stats:" + "\n\tNum of Requests Dequeued = %" PRIu64 + "\n\tNum of Outbound Encrypt Requests = %" PRIu64 + "\n\tNum of Inbound Decrypt Requests = %" PRIu64 + "\n\tNum of Outbound Bytes Encrypted = %" PRIu64 + "\n\tNum of Outbound Bytes Protected = %" PRIu64 + "\n\tNum of Inbound Bytes Decrypted = %" PRIu64 + "\n\tNum of Inbound Bytes Validated = %" PRIu64, counters.dequeued_requests, counters.ob_enc_requests, counters.ib_dec_requests, @@ -2675,7 +2674,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev) for (i = 0; i < dev->data->nb_queue_pairs; i++) { if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); + DPAA2_SEC_DEBUG("Uninitialised queue pair"); continue; } qp[i]->tx_vq.rx_pkts = 0; @@ -2697,12 +2696,10 @@ static struct rte_cryptodev_ops crypto_ops = { .stats_reset = dpaa2_sec_stats_reset, .queue_pair_setup = dpaa2_sec_queue_pair_setup, .queue_pair_release = dpaa2_sec_queue_pair_release, - .queue_pair_start = dpaa2_sec_queue_pair_start, - .queue_pair_stop = dpaa2_sec_queue_pair_stop, .queue_pair_count = dpaa2_sec_queue_pair_count, - .session_get_size = dpaa2_sec_session_get_size, - .session_configure = dpaa2_sec_session_configure, - .session_clear = dpaa2_sec_session_clear, + .sym_session_get_size = dpaa2_sec_sym_session_get_size, + .sym_session_configure = dpaa2_sec_sym_session_configure, + .sym_session_clear = dpaa2_sec_sym_session_clear, }; static const struct rte_security_capability * @@ -2729,8 +2726,8 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev) rte_mempool_free(internals->fle_pool); - PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n", - dev->data->name, rte_socket_id()); + DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u", + dev->data->name, rte_socket_id()); return 0; } @@ -2751,7 +2748,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) PMD_INIT_FUNC_TRACE(); dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); if (dpaa2_dev == NULL) { - PMD_INIT_LOG(ERR, "dpaa2_device not found\n"); + DPAA2_SEC_ERR("DPAA2 SEC device not found"); return -1; } hw_id = dpaa2_dev->object_id; @@ -2765,10 +2762,13 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_SECURITY | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; internals = cryptodev->data->dev_private; - internals->max_nb_sessions = RTE_DPAA2_SEC_PMD_MAX_NB_SESSIONS; /* * For secondary processes, we don't initialise any further as primary @@ -2776,7 +2776,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_INIT_LOG(DEBUG, "Device already init by primary process"); + DPAA2_SEC_DEBUG("Device already init by primary process"); return 0; } @@ -2794,21 +2794,21 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1, sizeof(struct fsl_mc_io), 0); if (!dpseci) { - PMD_INIT_LOG(ERR, - "Error in allocating the memory for dpsec object"); + DPAA2_SEC_ERR( + "Error in allocating the memory for dpsec object"); return -1; } dpseci->regs = rte_mcp_ptr_list[0]; retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token); if (retcode != 0) { - PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x", - retcode); + DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x", + retcode); goto init_error; } retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr); if (retcode != 0) { - PMD_INIT_LOG(ERR, + DPAA2_SEC_ERR( "Cannot get dpsec device attributed: Error = %x", retcode); goto init_error; @@ -2828,15 +2828,15 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev) NULL, NULL, NULL, NULL, SOCKET_ID_ANY, 0); if (!internals->fle_pool) { - RTE_LOG(ERR, PMD, "%s create failed\n", str); + DPAA2_SEC_ERR("Mempool (%s) creation failed", str); goto init_error; } - PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); + DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name); return 0; init_error: - PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); + DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name); /* dpaa2_sec_uninit(crypto_dev_name); */ return -EFAULT; @@ -2866,7 +2866,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv, if (cryptodev->data->dev_private == NULL) rte_panic("Cannot allocate memzone for private " - "device data"); + "device data"); } dpaa2_dev->cryptodev = cryptodev; @@ -2919,5 +2919,13 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = { static struct cryptodev_driver dpaa2_sec_crypto_drv; RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver); -RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, + rte_dpaa2_sec_driver.driver, cryptodev_driver_id); + +RTE_INIT(dpaa2_sec_init_log) +{ + /* Bus level logs */ + dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2"); + if (dpaa2_logtype_sec >= 0) + rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h index 23251141..8a990442 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h @@ -8,37 +8,35 @@ #ifndef _DPAA2_SEC_LOGS_H_ #define _DPAA2_SEC_LOGS_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa2_logtype_sec; + +#define DPAA2_SEC_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \ + fmt "\n", ##args) + +#define DPAA2_SEC_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_DEBUG(">>") + +#define DPAA2_SEC_INFO(fmt, args...) \ + DPAA2_SEC_LOG(INFO, fmt, ## args) +#define DPAA2_SEC_ERR(fmt, args...) \ + DPAA2_SEC_LOG(ERR, fmt, ## args) +#define DPAA2_SEC_WARN(fmt, args...) \ + DPAA2_SEC_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA2_SEC_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA2_SEC_DP_DEBUG(fmt, args...) \ + DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args) +#define DPAA2_SEC_DP_INFO(fmt, args...) \ + DPAA2_SEC_DP_LOG(INFO, fmt, ## args) +#define DPAA2_SEC_DP_WARN(fmt, args...) \ + DPAA2_SEC_DP_LOG(WARNING, fmt, ## args) + #endif /* _DPAA2_SEC_LOGS_H_ */ diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h index e8ac95ba..d015be1e 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h @@ -23,8 +23,6 @@ struct dpaa2_sec_dev_private { uint16_t token; /**< Token required by DPxxx objects */ unsigned int max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned int max_nb_sessions; - /**< Max number of sessions supported by device */ }; struct dpaa2_sec_qp { @@ -185,9 +183,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 16, + .min = 1, .max = 16, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -206,9 +204,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -227,9 +225,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 28, + .min = 1, .max = 28, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -248,9 +246,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 32, - .max = 32, - .increment = 0 + .min = 1, + .max = 32, + .increment = 1 }, .iv_size = { 0 } }, } @@ -269,9 +267,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 48, + .min = 1, .max = 48, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -290,9 +288,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build new file mode 100644 index 00000000..01afc587 --- /dev/null +++ b/drivers/crypto/dpaa2_sec/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['security', 'mempool_dpaa2'] +sources = files('dpaa2_sec_dpseci.c', + 'mc/dpseci.c') + +allow_experimental_apis = true + +includes += include_directories('mc', 'hw') diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile index fe2c5932..9be44704 100644 --- a/drivers/crypto/dpaa_sec/Makefile +++ b/drivers/crypto/dpaa_sec/Makefile @@ -12,13 +12,8 @@ LIB = librte_pmd_dpaa_sec.a # build flags CFLAGS += -DALLOW_EXPERIMENTAL_API CFLAGS += -D _GNU_SOURCE -ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y) -CFLAGS += -O0 -g -CFLAGS += "-Wno-error" -else CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -endif CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c index 18681cf3..f571050b 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.c +++ b/drivers/crypto/dpaa_sec/dpaa_sec.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP + * Copyright 2017-2018 NXP * */ @@ -39,6 +39,8 @@ enum rta_sec_era rta_sec_era; +int dpaa_logtype_sec; + static uint8_t cryptodev_driver_id; static __thread struct rte_crypto_op **dpaa_sec_ops; @@ -53,7 +55,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx) if (!ctx->fd_status) { ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } else { - PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status); + DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status); ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR; } @@ -69,7 +71,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx)); if (!ctx || retval) { - PMD_TX_LOG(ERR, "Alloc sec descriptor failed!"); + DPAA_SEC_DP_WARN("Alloc sec descriptor failed!"); return NULL; } /* @@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) dcbz_64(&ctx->job.sg[SG_CACHELINE_3]); ctx->ctx_pool = ses->ctx_pool; - ctx->vtop_offset = (uint64_t) ctx + ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx); return ctx; @@ -93,43 +95,18 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses) static inline rte_iova_t dpaa_mem_vtop(void *vaddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - uint64_t vaddr_64, paddr; - int i; - - vaddr_64 = (uint64_t)vaddr; - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (vaddr_64 >= memseg[i].addr_64 && - vaddr_64 < memseg[i].addr_64 + memseg[i].len) { - paddr = memseg[i].iova + - (vaddr_64 - memseg[i].addr_64); - - return (rte_iova_t)paddr; - } - } - return (rte_iova_t)(NULL); -} + const struct rte_memseg *ms; -/* virtual address conversin when mempool support is available for ctx */ -static inline phys_addr_t -dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr) -{ - return (uint64_t)vaddr - ctx->vtop_offset; + ms = rte_mem_virt2memseg(vaddr, NULL); + if (ms) + return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr); + return (size_t)NULL; } static inline void * dpaa_mem_ptov(rte_iova_t paddr) { - const struct rte_memseg *memseg = rte_eal_get_physmem_layout(); - int i; - - for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) { - if (paddr >= memseg[i].iova && - (char *)paddr < (char *)memseg[i].iova + memseg[i].len) - return (void *)(memseg[i].addr_64 + - (paddr - memseg[i].iova)); - } - return NULL; + return rte_mem_iova2virt(paddr); } static void @@ -137,8 +114,8 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused, struct qman_fq *fq, const struct qm_mr_entry *msg) { - RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n", - fq->fqid, msg->ern.rc, msg->ern.seqnum); + DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n", + fq->fqid, msg->ern.rc, msg->ern.seqnum); } /* initialize the queue with dest chan as caam chan so that @@ -166,11 +143,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc, fq_in->cb.ern = ern_sec_fq_handler; - PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out); + DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out); ret = qman_init_fq(fq_in, flags, &fq_opts); if (unlikely(ret != 0)) - PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret); + DPAA_SEC_ERR("qman_init_fq failed %d", ret); return ret; } @@ -229,7 +206,7 @@ dpaa_sec_init_tx(struct qman_fq *fq) ret = qman_create_fq(0, flags, fq); if (unlikely(ret)) { - PMD_INIT_LOG(ERR, "qman_create_fq failed"); + DPAA_SEC_ERR("qman_create_fq failed"); return ret; } @@ -244,7 +221,7 @@ dpaa_sec_init_tx(struct qman_fq *fq) ret = qman_init_fq(fq, 0, &opts); if (unlikely(ret)) { - PMD_INIT_LOG(ERR, "unable to init caam source fq!"); + DPAA_SEC_ERR("unable to init caam source fq!"); return ret; } @@ -336,7 +313,7 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a) alginfo_a->algmode = OP_ALG_AAI_HMAC; break; default: - PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg); + DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg); } } @@ -365,7 +342,7 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c) alginfo_c->algmode = OP_ALG_AAI_CTR; break; default: - PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg); + DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg); } } @@ -378,7 +355,7 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo) alginfo->algmode = OP_ALG_AAI_GCM; break; default: - PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg); + DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg); } } @@ -388,7 +365,7 @@ static int dpaa_sec_prep_cdb(dpaa_sec_session *ses) { struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0}; - uint32_t shared_desc_len = 0; + int32_t shared_desc_len = 0; struct sec_cdb *cdb = &ses->cdb; int err; #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN @@ -402,11 +379,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) if (is_cipher_only(ses)) { caam_cipher_alg(ses, &alginfo_c); if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported cipher alg\n"); + DPAA_SEC_ERR("not supported cipher alg"); return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; @@ -420,11 +397,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else if (is_auth_only(ses)) { caam_auth_alg(ses, &alginfo_a); if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported auth alg\n"); + DPAA_SEC_ERR("not supported auth alg"); return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -436,10 +413,10 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else if (is_aead(ses)) { caam_aead_alg(ses, &alginfo); if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported aead alg\n"); + DPAA_SEC_ERR("not supported aead alg"); return -ENOTSUP; } - alginfo.key = (uint64_t)ses->aead_key.data; + alginfo.key = (size_t)ses->aead_key.data; alginfo.keylen = ses->aead_key.length; alginfo.key_enc_flags = 0; alginfo.key_type = RTA_DATA_IMM; @@ -459,22 +436,22 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) } else { caam_cipher_alg(ses, &alginfo_c); if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported cipher alg\n"); + DPAA_SEC_ERR("not supported cipher alg"); return -ENOTSUP; } - alginfo_c.key = (uint64_t)ses->cipher_key.data; + alginfo_c.key = (size_t)ses->cipher_key.data; alginfo_c.keylen = ses->cipher_key.length; alginfo_c.key_enc_flags = 0; alginfo_c.key_type = RTA_DATA_IMM; caam_auth_alg(ses, &alginfo_a); if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) { - PMD_TX_LOG(ERR, "not supported auth alg\n"); + DPAA_SEC_ERR("not supported auth alg"); return -ENOTSUP; } - alginfo_a.key = (uint64_t)ses->auth_key.data; + alginfo_a.key = (size_t)ses->auth_key.data; alginfo_a.keylen = ses->auth_key.length; alginfo_a.key_enc_flags = 0; alginfo_a.key_type = RTA_DATA_IMM; @@ -487,21 +464,21 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) &cdb->sh_desc[2], 2); if (err < 0) { - PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths"); + DPAA_SEC_ERR("Crypto: Incorrect key lengths"); return err; } if (cdb->sh_desc[2] & 1) alginfo_c.key_type = RTA_DATA_IMM; else { - alginfo_c.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_c.key); + alginfo_c.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_c.key); alginfo_c.key_type = RTA_DATA_PTR; } if (cdb->sh_desc[2] & (1<<1)) alginfo_a.key_type = RTA_DATA_IMM; else { - alginfo_a.key = (uint64_t)dpaa_mem_vtop( - (void *)alginfo_a.key); + alginfo_a.key = (size_t)dpaa_mem_vtop( + (void *)(size_t)alginfo_a.key); alginfo_a.key_type = RTA_DATA_PTR; } cdb->sh_desc[0] = 0; @@ -530,6 +507,12 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses) ses->digest_length, ses->dir); } } + + if (shared_desc_len < 0) { + DPAA_SEC_ERR("error in preparing command block"); + return shared_desc_len; + } + cdb->sh_hdr.hi.field.idlen = shared_desc_len; cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word); cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word); @@ -543,12 +526,25 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) { struct qman_fq *fq; unsigned int pkts = 0; - int ret; + int num_rx_bufs, ret; struct qm_dqrr_entry *dq; + uint32_t vdqcr_flags = 0; fq = &qp->outq; - ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ? - DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops); + /* + * Until request for four buffers, we provide exact number of buffers. + * Otherwise we do not set the QM_VDQCR_EXACT flag. + * Not setting QM_VDQCR_EXACT flag can provide two more buffers than + * requested, so we request two less in this case. + */ + if (nb_ops < 4) { + vdqcr_flags = QM_VDQCR_EXACT; + num_rx_bufs = nb_ops; + } else { + num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ? + (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2); + } + ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags); if (ret) return 0; @@ -585,7 +581,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops) if (!ctx->fd_status) { op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; } else { - printf("\nSEC return err: 0x%x", ctx->fd_status); + DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status); op->status = RTE_CRYPTO_OP_STATUS_ERROR; } ops[pkts++] = op; @@ -616,8 +612,8 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) extra_segs = 2; if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n", - MAX_SG_ENTRIES); + DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d", + MAX_SG_ENTRIES); return NULL; } ctx = dpaa_sec_alloc_ctx(ses); @@ -640,7 +636,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) in_sg->extension = 1; in_sg->final = 1; in_sg->length = sym->auth.data.length; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2])); /* 1st seg */ sg = in_sg + 1; @@ -664,7 +660,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; rte_memcpy(old_digest, sym->auth.digest.data, ses->digest_length); - start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); + start_addr = dpaa_mem_vtop(old_digest); qm_sg_entry_set64(sg, start_addr); sg->length = ses->digest_length; in_sg->length += ses->digest_length; @@ -718,7 +714,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) if (is_decode(ses)) { /* need to extend the input to a compound frame */ sg->extension = 1; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); sg->length = sym->auth.data.length + ses->digest_length; sg->final = 1; cpu_to_hw_sg(sg); @@ -732,7 +728,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses) cpu_to_hw_sg(sg); /* let's check digest by hw */ - start_addr = dpaa_mem_vtop_ctx(ctx, old_digest); + start_addr = dpaa_mem_vtop(old_digest); sg++; qm_sg_entry_set64(sg, start_addr); sg->length = ses->digest_length; @@ -769,8 +765,8 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) } if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n", - MAX_SG_ENTRIES); + DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d", + MAX_SG_ENTRIES); return NULL; } @@ -785,7 +781,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) out_sg = &cf->sg[0]; out_sg->extension = 1; out_sg->length = sym->cipher.data.length; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2])); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -814,7 +810,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) in_sg->length = sym->cipher.data.length + ses->iv.length; sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* IV */ @@ -881,7 +877,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses) sg->extension = 1; sg->final = 1; sg->length = sym->cipher.data.length + ses->iv.length; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2])); + qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2])); cpu_to_hw_sg(sg); sg = &cf->sg[2]; @@ -922,7 +918,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) req_segs++; if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n", + DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d", MAX_SG_ENTRIES); return NULL; } @@ -947,7 +943,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output sg entries */ sg = &cf->sg[2]; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -991,7 +987,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input sg entries */ sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* 1st seg IV */ @@ -1028,7 +1024,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; memcpy(ctx->digest, sym->aead.digest.data, ses->digest_length); - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; } sg->final = 1; @@ -1066,7 +1062,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -1111,7 +1107,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -1125,7 +1121,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); qm_sg_entry_set64(sg, dst_start_addr + sym->aead.data.offset - ses->auth_only_len); sg->length = sym->aead.data.length + ses->auth_only_len; @@ -1170,7 +1166,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) } if (req_segs > MAX_SG_ENTRIES) { - PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n", + DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d", MAX_SG_ENTRIES); return NULL; } @@ -1194,7 +1190,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output sg entries */ sg = &cf->sg[2]; - qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(out_sg); /* 1st seg */ @@ -1236,7 +1232,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input sg entries */ sg++; - qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg)); cpu_to_hw_sg(in_sg); /* 1st seg IV */ @@ -1266,7 +1262,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses) sg++; memcpy(ctx->digest, sym->auth.digest.data, ses->digest_length); - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; } sg->final = 1; @@ -1303,7 +1299,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* input */ rte_prefetch0(cf->sg); sg = &cf->sg[2]; - qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg)); if (is_encode(ses)) { qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr)); sg->length = ses->iv.length; @@ -1333,7 +1329,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) ses->digest_length); sg++; - qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest)); + qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest)); sg->length = ses->digest_length; length += sg->length; sg->final = 1; @@ -1347,7 +1343,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses) /* output */ sg++; - qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg)); + qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg)); qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset); sg->length = sym->cipher.data.length; length = sg->length; @@ -1422,7 +1418,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, struct rte_crypto_op *op; struct dpaa_sec_job *cf; dpaa_sec_session *ses; - struct dpaa_sec_op_ctx *ctx; uint32_t auth_only_len; struct qman_fq *inq[DPAA_SEC_BURST]; @@ -1434,7 +1429,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, switch (op->sess_type) { case RTE_CRYPTO_OP_WITH_SESSION: ses = (dpaa_sec_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); break; @@ -1444,15 +1439,15 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, op->sym->sec_session); break; default: - PMD_TX_LOG(ERR, + DPAA_SEC_DP_ERR( "sessionless crypto op not supported"); frames_to_send = loop; nb_ops = loop; goto send_pkts; } if (unlikely(!ses->qp || ses->qp != qp)) { - PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", - ses->qp, qp); + DPAA_SEC_DP_ERR("sess->qp - %p qp %p", + ses->qp, qp); if (dpaa_sec_attach_sess_q(qp, ses)) { frames_to_send = loop; nb_ops = loop; @@ -1475,7 +1470,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } else if (is_proto_ipsec(ses)) { cf = build_proto(op, ses); } else { - PMD_TX_LOG(ERR, "not supported sec op"); + DPAA_SEC_DP_ERR("not supported ops"); frames_to_send = loop; nb_ops = loop; goto send_pkts; @@ -1491,7 +1486,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, } else if (is_auth_cipher(ses)) { cf = build_cipher_auth_sg(op, ses); } else { - PMD_TX_LOG(ERR, "not supported sec op"); + DPAA_SEC_DP_ERR("not supported ops"); frames_to_send = loop; nb_ops = loop; goto send_pkts; @@ -1507,8 +1502,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops, inq[loop] = ses->inq; fd->opaque_addr = 0; fd->cmd = 0; - ctx = container_of(cf, struct dpaa_sec_op_ctx, job); - qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg)); + qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg)); fd->_format1 = qm_fd_compound; fd->length29 = 2 * sizeof(struct qm_sg_entry); /* Auth_only_len is set as 0 in descriptor and it is @@ -1547,7 +1541,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops, dpaa_qp->rx_pkts += num_rx; dpaa_qp->rx_errs += nb_ops - num_rx; - PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx); + DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx); return num_rx; } @@ -1562,11 +1556,11 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); - PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id); + DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id); internals = dev->data->dev_private; if (qp_id >= internals->max_nb_queue_pairs) { - PMD_INIT_LOG(ERR, "Max supported qpid %d", + DPAA_SEC_ERR("Max supported qpid %d", internals->max_nb_queue_pairs); return -EINVAL; } @@ -1588,12 +1582,11 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, struct dpaa_sec_dev_private *internals; struct dpaa_sec_qp *qp = NULL; - PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p", - dev, qp_id, qp_conf); + DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf); internals = dev->data->dev_private; if (qp_id >= internals->max_nb_queue_pairs) { - PMD_INIT_LOG(ERR, "Max supported qpid %d", + DPAA_SEC_ERR("Max supported qpid %d", internals->max_nb_queue_pairs); return -EINVAL; } @@ -1605,26 +1598,6 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } -/** Start queue pair */ -static int -dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - -/** Stop queue pair */ -static int -dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - PMD_INIT_FUNC_TRACE(); - - return 0; -} - /** Return the number of allocated queue pairs */ static uint32_t dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) @@ -1636,7 +1609,7 @@ dpaa_sec_queue_pair_count(struct rte_cryptodev *dev) /** Returns the size of session structure */ static unsigned int -dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused) +dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { PMD_INIT_FUNC_TRACE(); @@ -1654,7 +1627,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused, session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for cipher key\n"); + DPAA_SEC_ERR("No Memory for cipher key"); return -ENOMEM; } session->cipher_key.length = xform->cipher.key.length; @@ -1676,7 +1649,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused, session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && xform->auth.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for auth key\n"); + DPAA_SEC_ERR("No Memory for auth key"); return -ENOMEM; } session->auth_key.length = xform->auth.key.length; @@ -1702,7 +1675,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused, session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length, RTE_CACHE_LINE_SIZE); if (session->aead_key.data == NULL && xform->aead.key.length > 0) { - PMD_INIT_LOG(ERR, "No Memory for aead key\n"); + DPAA_SEC_ERR("No Memory for aead key\n"); return -ENOMEM; } session->aead_key.length = xform->aead.key.length; @@ -1727,7 +1700,7 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi) return &qi->inq[i]; } } - PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions); + DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions); return NULL; } @@ -1756,46 +1729,24 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess) sess->qp = qp; ret = dpaa_sec_prep_cdb(sess); if (ret) { - PMD_DRV_LOG(ERR, "Unable to prepare sec cdb"); + DPAA_SEC_ERR("Unable to prepare sec cdb"); return -1; } - + if (unlikely(!RTE_PER_LCORE(dpaa_io))) { + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_SEC_ERR("Failure in affining portal"); + return ret; + } + } ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb), qman_fq_fqid(&qp->outq)); if (ret) - PMD_DRV_LOG(ERR, "Unable to init sec queue"); + DPAA_SEC_ERR("Unable to init sec queue"); return ret; } -static int -dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused, - uint16_t qp_id __rte_unused, - void *ses __rte_unused) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -static int -dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, - uint16_t qp_id __rte_unused, - void *ses) -{ - dpaa_sec_session *sess = ses; - struct dpaa_sec_dev_private *qi = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - - if (sess->inq) - dpaa_sec_detach_rxq(qi, sess->inq); - sess->inq = NULL; - - sess->qp = NULL; - - return 0; -} - static int dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, void *sess) @@ -1806,7 +1757,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (unlikely(sess == NULL)) { - RTE_LOG(ERR, PMD, "invalid session struct\n"); + DPAA_SEC_ERR("invalid session struct"); return -EINVAL; } @@ -1831,7 +1782,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_cipher_init(dev, xform, session); dpaa_sec_auth_init(dev, xform->next, session); } else { - PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); + DPAA_SEC_ERR("Not supported: Auth then Cipher"); return -EINVAL; } @@ -1842,7 +1793,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_auth_init(dev, xform, session); dpaa_sec_cipher_init(dev, xform->next, session); } else { - PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher"); + DPAA_SEC_ERR("Not supported: Auth then Cipher"); return -EINVAL; } @@ -1852,13 +1803,13 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev, dpaa_sec_aead_init(dev, xform, session); } else { - PMD_DRV_LOG(ERR, "Invalid crypto type"); + DPAA_SEC_ERR("Invalid crypto type"); return -EINVAL; } session->ctx_pool = internals->ctx_pool; session->inq = dpaa_sec_attach_rxq(internals); if (session->inq == NULL) { - PMD_DRV_LOG(ERR, "unable to attach sec queue"); + DPAA_SEC_ERR("unable to attach sec queue"); goto err1; } @@ -1873,7 +1824,7 @@ err1: } static int -dpaa_sec_session_configure(struct rte_cryptodev *dev, +dpaa_sec_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -1884,22 +1835,20 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, PMD_INIT_FUNC_TRACE(); if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data); if (ret != 0) { - PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure " - "session parameters"); + DPAA_SEC_ERR("failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); @@ -1908,12 +1857,12 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -dpaa_sec_session_clear(struct rte_cryptodev *dev, +dpaa_sec_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct dpaa_sec_dev_private *qi = dev->data->dev_private; uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); PMD_INIT_FUNC_TRACE(); @@ -1927,7 +1876,7 @@ dpaa_sec_session_clear(struct rte_cryptodev *dev, rte_free(s->cipher_key.data); rte_free(s->auth_key.data); memset(s, 0, sizeof(dpaa_sec_session)); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -1958,7 +1907,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for cipher key\n"); + DPAA_SEC_ERR("No Memory for cipher key"); return -ENOMEM; } @@ -1968,7 +1917,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, RTE_CACHE_LINE_SIZE); if (session->auth_key.data == NULL && auth_xform->key.length > 0) { - RTE_LOG(ERR, PMD, "No Memory for auth key\n"); + DPAA_SEC_ERR("No Memory for auth key"); rte_free(session->cipher_key.data); return -ENOMEM; } @@ -2013,11 +1962,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, case RTE_CRYPTO_AUTH_KASUMI_F9: case RTE_CRYPTO_AUTH_AES_CBC_MAC: case RTE_CRYPTO_AUTH_ZUC_EIA3: - RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n", + DPAA_SEC_ERR("Crypto: Unsupported auth alg %u", auth_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n", + DPAA_SEC_ERR("Crypto: Undefined Auth specified %u", auth_xform->algo); goto out; } @@ -2037,11 +1986,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_AES_ECB: case RTE_CRYPTO_CIPHER_KASUMI_F8: - RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n", + DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u", cipher_xform->algo); goto out; default: - RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n", + DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u", cipher_xform->algo); goto out; } @@ -2086,7 +2035,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev, session->ctx_pool = internals->ctx_pool; session->inq = dpaa_sec_attach_rxq(internals); if (session->inq == NULL) { - PMD_DRV_LOG(ERR, "unable to attach sec queue"); + DPAA_SEC_ERR("unable to attach sec queue"); goto out; } @@ -2110,8 +2059,7 @@ dpaa_sec_security_session_create(void *dev, int ret; if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + DPAA_SEC_ERR("Couldn't get object from session mempool"); return -ENOMEM; } @@ -2126,9 +2074,7 @@ dpaa_sec_security_session_create(void *dev, return -EINVAL; } if (ret != 0) { - PMD_DRV_LOG(ERR, - "DPAA2 PMD: failed to configure session parameters"); - + DPAA_SEC_ERR("failed to configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; @@ -2163,11 +2109,32 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused, static int -dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused, +dpaa_sec_dev_configure(struct rte_cryptodev *dev, struct rte_cryptodev_config *config __rte_unused) { + + char str[20]; + struct dpaa_sec_dev_private *internals; + PMD_INIT_FUNC_TRACE(); + internals = dev->data->dev_private; + sprintf(str, "ctx_pool_%d", dev->data->dev_id); + if (!internals->ctx_pool) { + internals->ctx_pool = rte_mempool_create((const char *)str, + CTX_POOL_NUM_BUFS, + CTX_POOL_BUF_SIZE, + CTX_POOL_CACHE_SIZE, 0, + NULL, NULL, NULL, NULL, + SOCKET_ID_ANY, 0); + if (!internals->ctx_pool) { + DPAA_SEC_ERR("%s create failed\n", str); + return -ENOMEM; + } + } else + DPAA_SEC_INFO("mempool already created for dev_id : %d", + dev->data->dev_id); + return 0; } @@ -2185,9 +2152,19 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused) } static int -dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused) +dpaa_sec_dev_close(struct rte_cryptodev *dev) { + struct dpaa_sec_dev_private *internals; + PMD_INIT_FUNC_TRACE(); + + if (dev == NULL) + return -ENOMEM; + + internals = dev->data->dev_private; + rte_mempool_free(internals->ctx_pool); + internals->ctx_pool = NULL; + return 0; } @@ -2203,9 +2180,6 @@ dpaa_sec_dev_infos_get(struct rte_cryptodev *dev, info->feature_flags = dev->feature_flags; info->capabilities = dpaa_sec_capabilities; info->sym.max_nb_sessions = internals->max_nb_sessions; - info->sym.max_nb_sessions_per_qp = - RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / - RTE_DPAA_MAX_NB_SEC_QPS; info->driver_id = cryptodev_driver_id; } } @@ -2218,14 +2192,10 @@ static struct rte_cryptodev_ops crypto_ops = { .dev_infos_get = dpaa_sec_dev_infos_get, .queue_pair_setup = dpaa_sec_queue_pair_setup, .queue_pair_release = dpaa_sec_queue_pair_release, - .queue_pair_start = dpaa_sec_queue_pair_start, - .queue_pair_stop = dpaa_sec_queue_pair_stop, .queue_pair_count = dpaa_sec_queue_pair_count, - .session_get_size = dpaa_sec_session_get_size, - .session_configure = dpaa_sec_session_configure, - .session_clear = dpaa_sec_session_clear, - .qp_attach_session = dpaa_sec_qp_attach_sess, - .qp_detach_session = dpaa_sec_qp_detach_sess, + .sym_session_get_size = dpaa_sec_sym_session_get_size, + .sym_session_configure = dpaa_sec_sym_session_configure, + .sym_session_clear = dpaa_sec_sym_session_clear }; static const struct rte_security_capability * @@ -2246,18 +2216,20 @@ struct rte_security_ops dpaa_sec_security_ops = { static int dpaa_sec_uninit(struct rte_cryptodev *dev) { - struct dpaa_sec_dev_private *internals = dev->data->dev_private; + struct dpaa_sec_dev_private *internals; if (dev == NULL) return -ENODEV; + internals = dev->data->dev_private; rte_free(dev->security_ctx); + /* In case close has been called, internals->ctx_pool would be NULL */ rte_mempool_free(internals->ctx_pool); rte_free(internals); - PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n", - dev->data->name, rte_socket_id()); + DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u", + dev->data->name, rte_socket_id()); return 0; } @@ -2270,7 +2242,6 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) struct dpaa_sec_qp *qp; uint32_t i, flags; int ret; - char str[20]; PMD_INIT_FUNC_TRACE(); @@ -2283,7 +2254,11 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) RTE_CRYPTODEV_FF_HW_ACCELERATED | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_SECURITY | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; internals = cryptodev->data->dev_private; internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS; @@ -2295,7 +2270,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) * RX function */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_INIT_LOG(DEBUG, "Device already init by primary process"); + DPAA_SEC_WARN("Device already init by primary process"); return 0; } @@ -2314,7 +2289,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) qp = &internals->qps[i]; ret = dpaa_sec_init_tx(&qp->outq); if (ret) { - PMD_INIT_LOG(ERR, "config tx of queue pair %d", i); + DPAA_SEC_ERR("config tx of queue pair %d", i); goto init_error; } } @@ -2325,28 +2300,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev) /* create rx qman fq for sessions*/ ret = qman_create_fq(0, flags, &internals->inq[i]); if (unlikely(ret != 0)) { - PMD_INIT_LOG(ERR, "sec qman_create_fq failed"); + DPAA_SEC_ERR("sec qman_create_fq failed"); goto init_error; } } - sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id); - internals->ctx_pool = rte_mempool_create((const char *)str, - CTX_POOL_NUM_BUFS, - CTX_POOL_BUF_SIZE, - CTX_POOL_CACHE_SIZE, 0, - NULL, NULL, NULL, NULL, - SOCKET_ID_ANY, 0); - if (!internals->ctx_pool) { - RTE_LOG(ERR, PMD, "%s create failed\n", str); - goto init_error; - } - - PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name); + RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name); return 0; init_error: - PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name); + DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name); dpaa_sec_uninit(cryptodev); return -EFAULT; @@ -2445,5 +2408,12 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = { static struct cryptodev_driver dpaa_sec_crypto_drv; RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver); -RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver, +RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver, cryptodev_driver_id); + +RTE_INIT(dpaa_sec_init_log) +{ + dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa"); + if (dpaa_logtype_sec >= 0) + rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h index f45b36cb..ac6c00a6 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec.h @@ -7,6 +7,9 @@ #ifndef _DPAA_SEC_H_ #define _DPAA_SEC_H_ +#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec +/**< NXP DPAA - SEC PMD device name */ + #define NUM_POOL_CHANNELS 4 #define DPAA_SEC_BURST 7 #define DPAA_SEC_ALG_UNSUPPORT (-1) @@ -23,6 +26,7 @@ #define CTX_POOL_NUM_BUFS 32000 #define CTX_POOL_BUF_SIZE sizeof(struct dpaa_sec_op_ctx) #define CTX_POOL_CACHE_SIZE 512 +#define RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS 2048 #define DIR_ENC 1 #define DIR_DEC 0 @@ -133,7 +137,7 @@ struct dpaa_sec_qp { int tx_errs; }; -#define RTE_DPAA_MAX_NB_SEC_QPS 1 +#define RTE_DPAA_MAX_NB_SEC_QPS 8 #define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS #define DPAA_MAX_DEQUEUE_NUM_FRAMES 63 @@ -182,10 +186,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 16, + .min = 1, .max = 16, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -202,10 +207,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -222,10 +228,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 28, + .min = 1, .max = 28, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -242,10 +249,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 32, + .min = 1, .max = 32, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -262,10 +270,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 48, + .min = 1, .max = 48, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, @@ -282,10 +291,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, + .iv_size = { 0 } }, } }, } }, diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h index 992a79f5..fb895a8b 100644 --- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h +++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h @@ -1,44 +1,43 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright NXP 2017. + * Copyright 2017-2018 NXP * */ #ifndef _DPAA_SEC_LOG_H_ #define _DPAA_SEC_LOG_H_ -#define PMD_INIT_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) +extern int dpaa_logtype_sec; + +#define DPAA_SEC_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \ + fmt "\n", ##args) + +#define DPAA_SEC_DEBUG(fmt, args...) \ + rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \ + fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() DPAA_SEC_DEBUG(" >>") + +#define DPAA_SEC_INFO(fmt, args...) \ + DPAA_SEC_LOG(INFO, fmt, ## args) +#define DPAA_SEC_ERR(fmt, args...) \ + DPAA_SEC_LOG(ERR, fmt, ## args) +#define DPAA_SEC_WARN(fmt, args...) \ + DPAA_SEC_LOG(WARNING, fmt, ## args) + +/* DP Logs, toggled out at compile time if level lower than current level */ +#define DPAA_SEC_DP_LOG(level, fmt, args...) \ + RTE_LOG_DP(level, PMD, fmt, ## args) + +#define DPAA_SEC_DP_DEBUG(fmt, args...) \ + DPAA_SEC_DP_LOG(DEBUG, fmt, ## args) +#define DPAA_SEC_DP_INFO(fmt, args...) \ + DPAA_SEC_DP_LOG(INFO, fmt, ## args) +#define DPAA_SEC_DP_WARN(fmt, args...) \ + DPAA_SEC_DP_LOG(WARNING, fmt, ## args) +#define DPAA_SEC_DP_ERR(fmt, args...) \ + DPAA_SEC_DP_LOG(ERR, fmt, ## args) #endif /* _DPAA_SEC_LOG_H_ */ diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build new file mode 100644 index 00000000..8a570984 --- /dev/null +++ b/drivers/crypto/dpaa_sec/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright 2018 NXP + +if host_machine.system() != 'linux' + build = false +endif + +deps += ['bus_dpaa', 'security'] +sources = files('dpaa_sec.c') + +allow_experimental_apis = true + +includes += include_directories('../dpaa2_sec/') diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c index 356621d4..239a1cf4 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -79,18 +79,20 @@ kasumi_set_session_parameters(struct kasumi_session *sess, break; case KASUMI_OP_NOT_SUPPORTED: default: - KASUMI_LOG_ERR("Unsupported operation chain order parameter"); + KASUMI_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } if (cipher_xform) { /* Only KASUMI F8 supported */ - if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) + if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) { + KASUMI_LOG(ERR, "Unsupported cipher algorithm "); return -ENOTSUP; + } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; if (cipher_xform->cipher.iv.length != KASUMI_IV_LENGTH) { - KASUMI_LOG_ERR("Wrong IV length"); + KASUMI_LOG(ERR, "Wrong IV length"); return -EINVAL; } @@ -101,11 +103,13 @@ kasumi_set_session_parameters(struct kasumi_session *sess, if (auth_xform) { /* Only KASUMI F9 supported */ - if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) { + KASUMI_LOG(ERR, "Unsupported authentication"); return -ENOTSUP; + } if (auth_xform->auth.digest_length != KASUMI_DIGEST_LENGTH) { - KASUMI_LOG_ERR("Wrong digest length"); + KASUMI_LOG(ERR, "Wrong digest length"); return -EINVAL; } @@ -131,7 +135,7 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct kasumi_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -153,8 +157,8 @@ kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -213,7 +217,7 @@ process_kasumi_cipher_op_bit(struct rte_crypto_op *op, src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); if (op->sym->m_dst == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - KASUMI_LOG_ERR("bit-level in-place not supported\n"); + KASUMI_LOG(ERR, "bit-level in-place not supported"); return 0; } dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); @@ -244,7 +248,7 @@ process_kasumi_hash_op(struct kasumi_qp *qp, struct rte_crypto_op **ops, /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - KASUMI_LOG_ERR("offset"); + KASUMI_LOG(ERR, "Invalid Offset"); break; } @@ -320,7 +324,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(session, 0, sizeof(struct kasumi_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, session); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; @@ -409,9 +413,9 @@ kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, (curr_c_op->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( curr_c_op->sym->m_dst))) { - KASUMI_LOG_ERR("PMD supports only contiguous mbufs, " + KASUMI_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " - "source/destination buffer.\n", curr_c_op); + "source/destination buffer.", curr_c_op); curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; break; } @@ -531,7 +535,7 @@ cryptodev_kasumi_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - KASUMI_LOG_ERR("failed to create cryptodev vdev"); + KASUMI_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -555,11 +559,10 @@ cryptodev_kasumi_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", + KASUMI_LOG(ERR, "driver %s: failed", init_params->name); cryptodev_kasumi_remove(vdev); @@ -573,8 +576,7 @@ cryptodev_kasumi_probe(struct rte_vdev_device *vdev) "", sizeof(struct kasumi_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -617,7 +619,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_KASUMI_PMD, cryptodev_kasumi_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, + cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(kasumi_init_log) +{ + kasumi_logtype_driver = rte_log_register("pmd.crypto.kasumi"); +} diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c index a388dbb6..9e4bf1b5 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -126,7 +126,8 @@ kasumi_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = kasumi_pmd_capabilities; } @@ -171,13 +172,13 @@ kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) == ring_size) { - KASUMI_LOG_INFO("Reusing existing ring %s" + KASUMI_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - KASUMI_LOG_ERR("Unable to reuse existing ring %s" + KASUMI_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -228,22 +229,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t kasumi_pmd_qp_count(struct rte_cryptodev *dev) @@ -253,14 +238,14 @@ kasumi_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the KASUMI session structure */ static unsigned -kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +kasumi_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct kasumi_session); } /** Configure a KASUMI session from a crypto xform chain */ static int -kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +kasumi_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -269,26 +254,26 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - KASUMI_LOG_ERR("invalid session struct"); + KASUMI_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + KASUMI_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = kasumi_set_session_parameters(sess_private_data, xform); if (ret != 0) { - KASUMI_LOG_ERR("failed configure session parameters"); + KASUMI_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -296,17 +281,17 @@ kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -kasumi_pmd_session_clear(struct rte_cryptodev *dev, +kasumi_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct kasumi_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -324,13 +309,11 @@ struct rte_cryptodev_ops kasumi_pmd_ops = { .queue_pair_setup = kasumi_pmd_qp_setup, .queue_pair_release = kasumi_pmd_qp_release, - .queue_pair_start = kasumi_pmd_qp_start, - .queue_pair_stop = kasumi_pmd_qp_stop, .queue_pair_count = kasumi_pmd_qp_count, - .session_get_size = kasumi_pmd_session_get_size, - .session_configure = kasumi_pmd_session_configure, - .session_clear = kasumi_pmd_session_clear + .sym_session_get_size = kasumi_pmd_sym_session_get_size, + .sym_session_configure = kasumi_pmd_sym_session_configure, + .sym_session_clear = kasumi_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h index a397bee6..488777ca 100644 --- a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_KASUMI_PMD_PRIVATE_H_ @@ -10,25 +10,13 @@ #define CRYPTODEV_NAME_KASUMI_PMD crypto_kasumi /**< KASUMI PMD device name */ -#define KASUMI_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) +/** KASUMI PMD LOGTYPE DRIVER */ +int kasumi_logtype_driver; -#ifdef RTE_LIBRTE_KASUMI_DEBUG -#define KASUMI_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) - -#define KASUMI_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_KASUMI_PMD), \ - __func__, __LINE__, ## args) -#else -#define KASUMI_LOG_INFO(fmt, args...) -#define KASUMI_LOG_DBG(fmt, args...) -#endif +#define KASUMI_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, kasumi_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define KASUMI_DIGEST_LENGTH 4 @@ -36,8 +24,6 @@ struct kasumi_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** KASUMI buffer queue pair */ diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build index 17041ad8..d64ca418 100644 --- a/drivers/crypto/meson.build +++ b/drivers/crypto/meson.build @@ -1,7 +1,9 @@ # SPDX-License-Identifier: BSD-3-Clause # Copyright(c) 2017 Intel Corporation -drivers = ['qat', 'null', 'openssl'] +drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam', + 'null', 'openssl', 'qat', 'virtio'] + std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps config_flag_fmt = 'RTE_LIBRTE_@0@_PMD' driver_name_fmt = 'rte_pmd_@0@' diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile deleted file mode 100644 index bc5c2270..00000000 --- a/drivers/crypto/mrvl/Makefile +++ /dev/null @@ -1,67 +0,0 @@ -# BSD LICENSE -# -# Copyright(c) 2017 Marvell International Ltd. -# Copyright(c) 2017 Semihalf. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -include $(RTE_SDK)/mk/rte.vars.mk - -ifneq ($(MAKECMDGOALS),clean) -ifneq ($(MAKECMDGOALS),config) -ifeq ($(LIBMUSDK_PATH),) -$(error "Please define LIBMUSDK_PATH environment variable") -endif -endif -endif - -# library name -LIB = librte_pmd_mrvl_crypto.a - -# build flags -CFLAGS += -O3 -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -I$(LIBMUSDK_PATH)/include -CFLAGS += -DMVCONF_TYPES_PUBLIC -CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC - -# library version -LIBABIVER := 1 - -# versioning export map -EXPORT_MAP := rte_pmd_mrvl_version.map - -# external library dependencies -LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk -LDLIBS += -lrte_bus_vdev - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h deleted file mode 100644 index 22cd1840..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_compat.h +++ /dev/null @@ -1,51 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RTE_MRVL_COMPAT_H_ -#define _RTE_MRVL_COMPAT_H_ - -/* Unluckily, container_of is defined by both DPDK and MUSDK, - * we'll declare only one version. - * - * Note that it is not used in this PMD anyway. - */ -#ifdef container_of -#undef container_of -#endif -#include "env/mv_autogen_comp_flags.h" -#include "drivers/mv_sam.h" -#include "drivers/mv_sam_cio.h" -#include "drivers/mv_sam_session.h" - -#endif /* _RTE_MRVL_COMPAT_H_ */ diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mrvl/rte_mrvl_pmd.c deleted file mode 100644 index 31f3fe58..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd.c +++ /dev/null @@ -1,857 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "rte_mrvl_pmd_private.h" - -#define MRVL_MUSDK_DMA_MEMSIZE 41943040 - -static uint8_t cryptodev_driver_id; - -/** - * Flag if particular crypto algorithm is supported by PMD/MUSDK. - * - * The idea is to have Not Supported value as default (0). - * This way we need only to define proper map sizes, - * non-initialized entries will be by default not supported. - */ -enum algo_supported { - ALGO_NOT_SUPPORTED = 0, - ALGO_SUPPORTED = 1, -}; - -/** Map elements for cipher mapping.*/ -struct cipher_params_mapping { - enum algo_supported supported; /**< On/Off switch */ - enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */ - enum sam_cipher_mode cipher_mode; /**< Cipher mode */ - unsigned int max_key_len; /**< Maximum key length (in bytes)*/ -} -/* We want to squeeze in multiple maps into the cache line. */ -__rte_aligned(32); - -/** Map elements for auth mapping.*/ -struct auth_params_mapping { - enum algo_supported supported; /**< On/off switch */ - enum sam_auth_alg auth_alg; /**< Auth algorithm */ -} -/* We want to squeeze in multiple maps into the cache line. */ -__rte_aligned(32); - -/** - * Map of supported cipher algorithms. - */ -static const -struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = { - [RTE_CRYPTO_CIPHER_3DES_CBC] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_CBC, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_3DES_CTR] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_CTR, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_3DES_ECB] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_3DES, - .cipher_mode = SAM_CIPHER_ECB, - .max_key_len = BITS2BYTES(192) }, - [RTE_CRYPTO_CIPHER_AES_CBC] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_CBC, - .max_key_len = BITS2BYTES(256) }, - [RTE_CRYPTO_CIPHER_AES_CTR] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_CTR, - .max_key_len = BITS2BYTES(256) }, -}; - -/** - * Map of supported auth algorithms. - */ -static const -struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = { - [RTE_CRYPTO_AUTH_MD5_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_MD5 }, - [RTE_CRYPTO_AUTH_MD5] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_MD5 }, - [RTE_CRYPTO_AUTH_SHA1_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA1 }, - [RTE_CRYPTO_AUTH_SHA1] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA1 }, - [RTE_CRYPTO_AUTH_SHA224] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_224 }, - [RTE_CRYPTO_AUTH_SHA256_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_256 }, - [RTE_CRYPTO_AUTH_SHA256] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_256 }, - [RTE_CRYPTO_AUTH_SHA384_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_384 }, - [RTE_CRYPTO_AUTH_SHA384] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_384 }, - [RTE_CRYPTO_AUTH_SHA512_HMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HMAC_SHA2_512 }, - [RTE_CRYPTO_AUTH_SHA512] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_HASH_SHA2_512 }, - [RTE_CRYPTO_AUTH_AES_GMAC] = { - .supported = ALGO_SUPPORTED, - .auth_alg = SAM_AUTH_AES_GMAC }, -}; - -/** - * Map of supported aead algorithms. - */ -static const -struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = { - [RTE_CRYPTO_AEAD_AES_GCM] = { - .supported = ALGO_SUPPORTED, - .cipher_alg = SAM_CIPHER_AES, - .cipher_mode = SAM_CIPHER_GCM, - .max_key_len = BITS2BYTES(256) }, -}; - -/* - *----------------------------------------------------------------------------- - * Forward declarations. - *----------------------------------------------------------------------------- - */ -static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev); - -/* - *----------------------------------------------------------------------------- - * Session Preparation. - *----------------------------------------------------------------------------- - */ - -/** - * Get xform chain order. - * - * @param xform Pointer to configuration structure chain for crypto operations. - * @returns Order of crypto operations. - */ -static enum mrvl_crypto_chain_order -mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform) -{ - /* Currently, Marvell supports max 2 operations in chain */ - if (xform->next != NULL && xform->next->next != NULL) - return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; - - if (xform->next != NULL) { - if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) && - (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)) - return MRVL_CRYPTO_CHAIN_AUTH_CIPHER; - - if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && - (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)) - return MRVL_CRYPTO_CHAIN_CIPHER_AUTH; - } else { - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return MRVL_CRYPTO_CHAIN_AUTH_ONLY; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return MRVL_CRYPTO_CHAIN_CIPHER_ONLY; - - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) - return MRVL_CRYPTO_CHAIN_COMBINED; - } - return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; -} - -/** - * Set session parameters for cipher part. - * - * @param sess Crypto session pointer. - * @param cipher_xform Pointer to configuration structure for cipher operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *cipher_xform) -{ - /* Make sure we've got proper struct */ - if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) || - (cipher_map[cipher_xform->cipher.algo].supported - != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!"); - return -EINVAL; - } - - sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; - - sess->sam_sess_params.dir = - (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.cipher_alg = - cipher_map[cipher_xform->cipher.algo].cipher_alg; - sess->sam_sess_params.cipher_mode = - cipher_map[cipher_xform->cipher.algo].cipher_mode; - - /* Assume IV will be passed together with data. */ - sess->sam_sess_params.cipher_iv = NULL; - - /* Get max key length. */ - if (cipher_xform->cipher.key.length > - cipher_map[cipher_xform->cipher.algo].max_key_len) { - MRVL_CRYPTO_LOG_ERR("Wrong key length!"); - return -EINVAL; - } - - sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length; - sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data; - - return 0; -} - -/** - * Set session parameters for authentication part. - * - * @param sess Crypto session pointer. - * @param auth_xform Pointer to configuration structure for auth operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *auth_xform) -{ - /* Make sure we've got proper struct */ - if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((auth_xform->auth.algo > RTE_DIM(auth_map)) || - (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!"); - return -EINVAL; - } - - sess->sam_sess_params.dir = - (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.auth_alg = - auth_map[auth_xform->auth.algo].auth_alg; - sess->sam_sess_params.u.basic.auth_icv_len = - auth_xform->auth.digest_length; - /* auth_key must be NULL if auth algorithm does not use HMAC */ - sess->sam_sess_params.auth_key = auth_xform->auth.key.length ? - auth_xform->auth.key.data : NULL; - sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length; - - return 0; -} - -/** - * Set session parameters for aead part. - * - * @param sess Crypto session pointer. - * @param aead_xform Pointer to configuration structure for aead operations. - * @returns 0 in case of success, negative value otherwise. - */ -static int -mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *aead_xform) -{ - /* Make sure we've got proper struct */ - if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) { - MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); - return -EINVAL; - } - - /* See if map data is present and valid */ - if ((aead_xform->aead.algo > RTE_DIM(aead_map)) || - (aead_map[aead_xform->aead.algo].supported - != ALGO_SUPPORTED)) { - MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!"); - return -EINVAL; - } - - sess->sam_sess_params.dir = - (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? - SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; - sess->sam_sess_params.cipher_alg = - aead_map[aead_xform->aead.algo].cipher_alg; - sess->sam_sess_params.cipher_mode = - aead_map[aead_xform->aead.algo].cipher_mode; - - /* Assume IV will be passed together with data. */ - sess->sam_sess_params.cipher_iv = NULL; - - /* Get max key length. */ - if (aead_xform->aead.key.length > - aead_map[aead_xform->aead.algo].max_key_len) { - MRVL_CRYPTO_LOG_ERR("Wrong key length!"); - return -EINVAL; - } - - sess->sam_sess_params.cipher_key = aead_xform->aead.key.data; - sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length; - - if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) - sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM; - - sess->sam_sess_params.u.basic.auth_icv_len = - aead_xform->aead.digest_length; - - sess->sam_sess_params.u.basic.auth_aad_len = - aead_xform->aead.aad_length; - - return 0; -} - -/** - * Parse crypto transform chain and setup session parameters. - * - * @param dev Pointer to crypto device - * @param sess Poiner to crypto session - * @param xform Pointer to configuration structure chain for crypto operations. - * @returns 0 in case of success, negative value otherwise. - */ -int -mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *xform) -{ - const struct rte_crypto_sym_xform *cipher_xform = NULL; - const struct rte_crypto_sym_xform *auth_xform = NULL; - const struct rte_crypto_sym_xform *aead_xform = NULL; - - /* Filter out spurious/broken requests */ - if (xform == NULL) - return -EINVAL; - - sess->chain_order = mrvl_crypto_get_chain_order(xform); - switch (sess->chain_order) { - case MRVL_CRYPTO_CHAIN_CIPHER_AUTH: - cipher_xform = xform; - auth_xform = xform->next; - break; - case MRVL_CRYPTO_CHAIN_AUTH_CIPHER: - auth_xform = xform; - cipher_xform = xform->next; - break; - case MRVL_CRYPTO_CHAIN_CIPHER_ONLY: - cipher_xform = xform; - break; - case MRVL_CRYPTO_CHAIN_AUTH_ONLY: - auth_xform = xform; - break; - case MRVL_CRYPTO_CHAIN_COMBINED: - aead_xform = xform; - break; - default: - return -EINVAL; - } - - if ((cipher_xform != NULL) && - (mrvl_crypto_set_cipher_session_parameters( - sess, cipher_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters"); - return -EINVAL; - } - - if ((auth_xform != NULL) && - (mrvl_crypto_set_auth_session_parameters( - sess, auth_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters"); - return -EINVAL; - } - - if ((aead_xform != NULL) && - (mrvl_crypto_set_aead_session_parameters( - sess, aead_xform) < 0)) { - MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters"); - return -EINVAL; - } - - return 0; -} - -/* - *----------------------------------------------------------------------------- - * Process Operations - *----------------------------------------------------------------------------- - */ - -/** - * Prepare a single request. - * - * This function basically translates DPDK crypto request into one - * understandable by MUDSK's SAM. If this is a first request in a session, - * it starts the session. - * - * @param request Pointer to pre-allocated && reset request buffer [Out]. - * @param src_bd Pointer to pre-allocated source descriptor [Out]. - * @param dst_bd Pointer to pre-allocated destination descriptor [Out]. - * @param op Pointer to DPDK crypto operation struct [In]. - */ -static inline int -mrvl_request_prepare(struct sam_cio_op_params *request, - struct sam_buf_info *src_bd, - struct sam_buf_info *dst_bd, - struct rte_crypto_op *op) -{ - struct mrvl_crypto_session *sess; - struct rte_mbuf *dst_mbuf; - uint8_t *digest; - - if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { - MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session " - "oriented requests, op (%p) is sessionless.", - op); - return -EINVAL; - } - - sess = (struct mrvl_crypto_session *)get_session_private_data( - op->sym->session, cryptodev_driver_id); - if (unlikely(sess == NULL)) { - MRVL_CRYPTO_LOG_ERR("Session was not created for this device"); - return -EINVAL; - } - - /* - * If application delivered us null dst buffer, it means it expects - * us to deliver the result in src buffer. - */ - dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; - - request->sa = sess->sam_sess; - request->cookie = op; - - /* Single buffers only, sorry. */ - request->num_bufs = 1; - request->src = src_bd; - src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *); - src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src); - src_bd->len = rte_pktmbuf_data_len(op->sym->m_src); - - /* Empty source. */ - if (rte_pktmbuf_data_len(op->sym->m_src) == 0) { - /* EIP does not support 0 length buffers. */ - MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!"); - return -1; - } - - /* Empty destination. */ - if (rte_pktmbuf_data_len(dst_mbuf) == 0) { - /* Make dst buffer fit at least source data. */ - if (rte_pktmbuf_append(dst_mbuf, - rte_pktmbuf_data_len(op->sym->m_src)) == NULL) { - MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!"); - return -1; - } - } - - request->dst = dst_bd; - dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *); - dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf); - - /* - * We can use all available space in dst_mbuf, - * not only what's used currently. - */ - dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf); - - if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) { - request->cipher_len = op->sym->aead.data.length; - request->cipher_offset = op->sym->aead.data.offset; - request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->cipher_iv_offset); - - request->auth_aad = op->sym->aead.aad.data; - request->auth_offset = request->cipher_offset; - request->auth_len = request->cipher_len; - } else { - request->cipher_len = op->sym->cipher.data.length; - request->cipher_offset = op->sym->cipher.data.offset; - request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, - sess->cipher_iv_offset); - - request->auth_offset = op->sym->auth.data.offset; - request->auth_len = op->sym->auth.data.length; - } - - digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ? - op->sym->aead.digest.data : op->sym->auth.digest.data; - if (digest == NULL) { - /* No auth - no worry. */ - return 0; - } - - request->auth_icv_offset = request->auth_offset + request->auth_len; - - /* - * EIP supports only scenarios where ICV(digest buffer) is placed at - * auth_icv_offset. Any other placement means risking errors. - */ - if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) { - /* - * This should be the most common case anyway, - * EIP will overwrite DST buffer at auth_icv_offset. - */ - if (rte_pktmbuf_mtod_offset( - dst_mbuf, uint8_t *, - request->auth_icv_offset) == digest) { - return 0; - } - } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */ - /* - * EIP will look for digest at auth_icv_offset - * offset in SRC buffer. - */ - if (rte_pktmbuf_mtod_offset( - op->sym->m_src, uint8_t *, - request->auth_icv_offset) == digest) { - return 0; - } - } - - /* - * If we landed here it means that digest pointer is - * at different than expected place. - */ - return -1; -} - -/* - *----------------------------------------------------------------------------- - * PMD Framework handlers - *----------------------------------------------------------------------------- - */ - -/** - * Enqueue burst. - * - * @param queue_pair Pointer to queue pair. - * @param ops Pointer to ops requests array. - * @param nb_ops Number of elements in ops requests array. - * @returns Number of elements consumed from ops. - */ -static uint16_t -mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - uint16_t iter_ops = 0; - uint16_t to_enq = 0; - uint16_t consumed = 0; - int ret; - struct sam_cio_op_params requests[nb_ops]; - /* - * DPDK uses single fragment buffers, so we can KISS descriptors. - * SAM does not store bd pointers, so on-stack scope will be enough. - */ - struct sam_buf_info src_bd[nb_ops]; - struct sam_buf_info dst_bd[nb_ops]; - struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair; - - if (nb_ops == 0) - return 0; - - /* Prepare the burst. */ - memset(&requests, 0, sizeof(requests)); - - /* Iterate through */ - for (; iter_ops < nb_ops; ++iter_ops) { - if (mrvl_request_prepare(&requests[iter_ops], - &src_bd[iter_ops], - &dst_bd[iter_ops], - ops[iter_ops]) < 0) { - MRVL_CRYPTO_LOG_ERR( - "Error while parameters preparation!"); - qp->stats.enqueue_err_count++; - ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR; - - /* - * Number of handled ops is increased - * (even if the result of handling is error). - */ - ++consumed; - break; - } - - ops[iter_ops]->status = - RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - /* Increase the number of ops to enqueue. */ - ++to_enq; - } /* for (; iter_ops < nb_ops;... */ - - if (to_enq > 0) { - /* Send the burst */ - ret = sam_cio_enq(qp->cio, requests, &to_enq); - consumed += to_enq; - if (ret < 0) { - /* - * Trust SAM that in this case returned value will be at - * some point correct (now it is returned unmodified). - */ - qp->stats.enqueue_err_count += to_enq; - for (iter_ops = 0; iter_ops < to_enq; ++iter_ops) - ops[iter_ops]->status = - RTE_CRYPTO_OP_STATUS_ERROR; - } - } - - qp->stats.enqueued_count += to_enq; - return consumed; -} - -/** - * Dequeue burst. - * - * @param queue_pair Pointer to queue pair. - * @param ops Pointer to ops requests array. - * @param nb_ops Number of elements in ops requests array. - * @returns Number of elements dequeued. - */ -static uint16_t -mrvl_crypto_pmd_dequeue_burst(void *queue_pair, - struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - int ret; - struct mrvl_crypto_qp *qp = queue_pair; - struct sam_cio *cio = qp->cio; - struct sam_cio_op_result results[nb_ops]; - uint16_t i; - - ret = sam_cio_deq(cio, results, &nb_ops); - if (ret < 0) { - /* Count all dequeued as error. */ - qp->stats.dequeue_err_count += nb_ops; - - /* But act as they were dequeued anyway*/ - qp->stats.dequeued_count += nb_ops; - - return 0; - } - - /* Unpack and check results. */ - for (i = 0; i < nb_ops; ++i) { - ops[i] = results[i].cookie; - - switch (results[i].status) { - case SAM_CIO_OK: - ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - break; - case SAM_CIO_ERR_ICV: - MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV."); - ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - break; - default: - MRVL_CRYPTO_LOG_DBG( - "CIO returned Error: %d", results[i].status); - ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR; - break; - } - } - - qp->stats.dequeued_count += nb_ops; - return nb_ops; -} - -/** - * Create a new crypto device. - * - * @param name Driver name. - * @param vdev Pointer to device structure. - * @param init_params Pointer to initialization parameters. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_create(const char *name, - struct rte_vdev_device *vdev, - struct rte_cryptodev_pmd_init_params *init_params) -{ - struct rte_cryptodev *dev; - struct mrvl_crypto_private *internals; - struct sam_init_params sam_params; - int ret; - - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); - if (dev == NULL) { - MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); - goto init_error; - } - - dev->driver_id = cryptodev_driver_id; - dev->dev_ops = rte_mrvl_crypto_pmd_ops; - - /* Register rx/tx burst functions for data path. */ - dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst; - dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst; - - dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_HW_ACCELERATED; - - /* Set vector instructions mode supported */ - internals = dev->data->dev_private; - - internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; - - /* - * ret == -EEXIST is correct, it means DMA - * has been already initialized. - */ - ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); - if (ret < 0) { - if (ret != -EEXIST) - return ret; - - MRVL_CRYPTO_LOG_INFO( - "DMA memory has been already initialized by a different driver."); - } - - sam_params.max_num_sessions = internals->max_nb_sessions; - - return sam_init(&sam_params); - -init_error: - MRVL_CRYPTO_LOG_ERR( - "driver %s: %s failed", init_params->name, __func__); - - cryptodev_mrvl_crypto_uninit(vdev); - return -EFAULT; -} - -/** - * Initialize the crypto device. - * - * @param vdev Pointer to device structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev) -{ - struct rte_cryptodev_pmd_init_params init_params = { }; - const char *name, *args; - int ret; - - name = rte_vdev_device_name(vdev); - if (name == NULL) - return -EINVAL; - args = rte_vdev_device_args(vdev); - - init_params.private_data_size = sizeof(struct mrvl_crypto_private); - init_params.max_nb_queue_pairs = sam_get_num_inst() * SAM_HW_RING_NUM; - init_params.max_nb_sessions = - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS; - init_params.socket_id = rte_socket_id(); - - ret = rte_cryptodev_pmd_parse_input_args(&init_params, args); - if (ret) { - RTE_LOG(ERR, PMD, - "Failed to parse initialisation arguments[%s]\n", - args); - return -EINVAL; - } - - return cryptodev_mrvl_crypto_create(name, vdev, &init_params); -} - -/** - * Uninitialize the crypto device - * - * @param vdev Pointer to device structure. - * @returns 0 in case of success, negative value otherwise. - */ -static int -cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev) -{ - struct rte_cryptodev *cryptodev; - const char *name = rte_vdev_device_name(vdev); - - if (name == NULL) - return -EINVAL; - - RTE_LOG(INFO, PMD, - "Closing Marvell crypto device %s on numa socket %u\n", - name, rte_socket_id()); - - sam_deinit(); - - cryptodev = rte_cryptodev_pmd_get_named_dev(name); - if (cryptodev == NULL) - return -ENODEV; - - return rte_cryptodev_pmd_destroy(cryptodev); -} - -/** - * Basic driver handlers for use in the constructor. - */ -static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = { - .probe = cryptodev_mrvl_crypto_init, - .remove = cryptodev_mrvl_crypto_uninit -}; - -static struct cryptodev_driver mrvl_crypto_drv; - -/* Register the driver in constructor. */ -RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv); -RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD, - "max_nb_queue_pairs= " - "max_nb_sessions= " - "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv, - cryptodev_driver_id); diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c deleted file mode 100644 index a1de33ae..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c +++ /dev/null @@ -1,778 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include - -#include -#include -#include - -#include "rte_mrvl_pmd_private.h" - -/** - * Capabilities list to be used in reporting to DPDK. - */ -static const struct rte_cryptodev_capabilities - mrvl_crypto_pmd_capabilities[] = { - { /* MD5 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - }, } - }, } - }, - { /* MD5 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_MD5, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA1 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA1 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA1, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 20, - .max = 20, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA224 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA224, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 28, - .max = 28, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA256 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, - .block_size = 64, - .key_size = { - .min = 1, - .max = 64, - .increment = 1 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA256 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA256, - .block_size = 64, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 32, - .max = 32, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA384 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, - .block_size = 128, - .key_size = { - .min = 1, - .max = 128, - .increment = 1 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA384 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA384, - .block_size = 128, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 48, - .max = 48, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA512 HMAC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, - .block_size = 128, - .key_size = { - .min = 1, - .max = 128, - .increment = 1 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - }, } - }, } - }, - { /* SHA512 */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_SHA512, - .block_size = 128, - .key_size = { - .min = 0, - .max = 0, - .increment = 0 - }, - .digest_size = { - .min = 64, - .max = 64, - .increment = 0 - }, - }, } - }, } - }, - { /* AES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CBC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_AES_CTR, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .iv_size = { - .min = 16, - .max = 16, - .increment = 0 - } - }, } - }, } - }, - { /* AES GCM */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, - {.aead = { - .algo = RTE_CRYPTO_AEAD_AES_GCM, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .aad_size = { - .min = 8, - .max = 12, - .increment = 4 - }, - .iv_size = { - .min = 12, - .max = 16, - .increment = 4 - } - }, } - }, } - }, - { /* AES GMAC (AUTH) */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, - {.auth = { - .algo = RTE_CRYPTO_AUTH_AES_GMAC, - .block_size = 16, - .key_size = { - .min = 16, - .max = 32, - .increment = 8 - }, - .digest_size = { - .min = 16, - .max = 16, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 65532, - .increment = 4 - } - }, } - }, } - }, - { /* 3DES CBC */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, - .block_size = 8, - .key_size = { - .min = 24, - .max = 24, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - { /* 3DES CTR */ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, - {.sym = { - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, - {.cipher = { - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, - .block_size = 8, - .key_size = { - .min = 24, - .max = 24, - .increment = 0 - }, - .iv_size = { - .min = 8, - .max = 8, - .increment = 0 - } - }, } - }, } - }, - - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - - -/** - * Configure device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param config Pointer to configuration structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev, - __rte_unused struct rte_cryptodev_config *config) -{ - return 0; -} - -/** - * Start device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev) -{ - return 0; -} - -/** - * Stop device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static void -mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev) -{ -} - -/** - * Get device statistics (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param stats Pointer to statistics structure [out]. - */ -static void -mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { - struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; - - stats->enqueued_count += qp->stats.enqueued_count; - stats->dequeued_count += qp->stats.dequeued_count; - - stats->enqueue_err_count += qp->stats.enqueue_err_count; - stats->dequeue_err_count += qp->stats.dequeue_err_count; - } -} - -/** - * Reset device statistics (PMD ops callback). - * - * @param dev Pointer to the device structure. - */ -static void -mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { - struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; - - memset(&qp->stats, 0, sizeof(qp->stats)); - } -} - -/** - * Get device info (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param dev_info Pointer to the device info structure [out]. - */ -static void -mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *dev_info) -{ - struct mrvl_crypto_private *internals = dev->data->dev_private; - - if (dev_info != NULL) { - dev_info->driver_id = dev->driver_id; - dev_info->feature_flags = dev->feature_flags; - dev_info->capabilities = mrvl_crypto_pmd_capabilities; - dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; - } -} - -/** - * Release queue pair (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param qp_id ID of Queue Pair to release. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) -{ - struct mrvl_crypto_qp *qp = - (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id]; - - if (dev->data->queue_pairs[qp_id] != NULL) { - sam_cio_flush(qp->cio); - sam_cio_deinit(qp->cio); - rte_free(dev->data->queue_pairs[qp_id]); - dev->data->queue_pairs[qp_id] = NULL; - } - - return 0; -} - -/** - * Close device (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static int -mrvl_crypto_pmd_close(struct rte_cryptodev *dev) -{ - int qp_id; - - for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) - mrvl_crypto_pmd_qp_release(dev, qp_id); - - return 0; -} - -/** - * Setup a queue pair (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @param qp_conf Queue pair configuration (nb of descriptors). - * @param socket_id NUMA socket to allocate memory on. - * @returns 0 upon success, negative value otherwise. - */ -static int -mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id, struct rte_mempool *session_pool) -{ - struct mrvl_crypto_qp *qp = NULL; - char match[RTE_CRYPTODEV_NAME_MAX_LEN]; - unsigned int n; - - /* Allocate the queue pair data structure. */ - qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp), - RTE_CACHE_LINE_SIZE, socket_id); - if (qp == NULL) - return -ENOMEM; - - /* Free old qp prior setup if needed. */ - if (dev->data->queue_pairs[qp_id] != NULL) - mrvl_crypto_pmd_qp_release(dev, qp_id); - - do { /* Error handling block */ - - /* - * This extra check is necessary due to a bug in - * crypto library. - */ - int num = sam_get_num_inst(); - if (num == 0) { - MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n"); - return -1; - } - - /* - * In case two crypto engines are enabled qps will - * be evenly spread among them. Even and odd qps will - * be handled by cio-0 and cio-1 respectively. qp-cio mapping - * will look as follows: - * - * qp: 0 1 2 3 - * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1 - * - * qp: 4 5 6 7 - * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3 - * - * In case just one engine is enabled mapping will look as - * follows: - * qp: 0 1 2 3 - * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3 - */ - n = snprintf(match, sizeof(match), "cio-%u:%u", - qp_id % num, qp_id / num); - - if (n >= sizeof(match)) - break; - - qp->cio_params.match = match; - qp->cio_params.size = qp_conf->nb_descriptors; - - if (sam_cio_init(&qp->cio_params, &qp->cio) < 0) - break; - - qp->sess_mp = session_pool; - - memset(&qp->stats, 0, sizeof(qp->stats)); - dev->data->queue_pairs[qp_id] = qp; - return 0; - } while (0); - - rte_free(qp); - return -1; -} - -/** Start queue pair (PMD ops callback) - not supported. - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @returns -ENOTSUP. Always. - */ -static int -mrvl_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair (PMD ops callback) - not supported. - * - * @param dev Pointer to the device structure. - * @param qp_id ID of the Queue Pair. - * @returns -ENOTSUP. Always. - */ -static int -mrvl_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Return the number of allocated queue pairs (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @returns Number of allocated queue pairs. - */ -static uint32_t -mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev) -{ - return dev->data->nb_queue_pairs; -} - -/** Returns the size of the session structure (PMD ops callback). - * - * @param dev Pointer to the device structure [Unused]. - * @returns Size of Marvell crypto session. - */ -static unsigned -mrvl_crypto_pmd_session_get_size(__rte_unused struct rte_cryptodev *dev) -{ - return sizeof(struct mrvl_crypto_session); -} - -/** Configure the session from a crypto xform chain (PMD ops callback). - * - * @param dev Pointer to the device structure. - * @param xform Pointer to the crytpo configuration structure. - * @param sess Pointer to the empty session structure. - * @returns 0 upon success, negative value otherwise. - */ -static int -mrvl_crypto_pmd_session_configure(__rte_unused struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mp) -{ - struct mrvl_crypto_session *mrvl_sess; - void *sess_private_data; - int ret; - - if (sess == NULL) { - MRVL_CRYPTO_LOG_ERR("Invalid session struct."); - return -EINVAL; - } - - if (rte_mempool_get(mp, &sess_private_data)) { - CDEV_LOG_ERR("Couldn't get object from session mempool."); - return -ENOMEM; - } - - ret = mrvl_crypto_set_session_parameters(sess_private_data, xform); - if (ret != 0) { - MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters."); - - /* Return session to mempool */ - rte_mempool_put(mp, sess_private_data); - return ret; - } - - set_session_private_data(sess, dev->driver_id, sess_private_data); - - mrvl_sess = (struct mrvl_crypto_session *)sess_private_data; - if (sam_session_create(&mrvl_sess->sam_sess_params, - &mrvl_sess->sam_sess) < 0) { - MRVL_CRYPTO_LOG_DBG("Failed to create session!"); - return -EIO; - } - - return 0; -} - -/** - * Clear the memory of session so it doesn't leave key material behind. - * - * @param dev Pointer to the device structure. - * @returns 0. Always. - */ -static void -mrvl_crypto_pmd_session_clear(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *sess) -{ - - uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); - - /* Zero out the whole structure */ - if (sess_priv) { - struct mrvl_crypto_session *mrvl_sess = - (struct mrvl_crypto_session *)sess_priv; - - if (mrvl_sess->sam_sess && - sam_session_destroy(mrvl_sess->sam_sess) < 0) { - MRVL_CRYPTO_LOG_INFO("Error while destroying session!"); - } - - memset(sess, 0, sizeof(struct mrvl_crypto_session)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } -} - -/** - * PMD handlers for crypto ops. - */ -static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = { - .dev_configure = mrvl_crypto_pmd_config, - .dev_start = mrvl_crypto_pmd_start, - .dev_stop = mrvl_crypto_pmd_stop, - .dev_close = mrvl_crypto_pmd_close, - - .dev_infos_get = mrvl_crypto_pmd_info_get, - - .stats_get = mrvl_crypto_pmd_stats_get, - .stats_reset = mrvl_crypto_pmd_stats_reset, - - .queue_pair_setup = mrvl_crypto_pmd_qp_setup, - .queue_pair_release = mrvl_crypto_pmd_qp_release, - .queue_pair_start = mrvl_crypto_pmd_qp_start, - .queue_pair_stop = mrvl_crypto_pmd_qp_stop, - .queue_pair_count = mrvl_crypto_pmd_qp_count, - - .session_get_size = mrvl_crypto_pmd_session_get_size, - .session_configure = mrvl_crypto_pmd_session_configure, - .session_clear = mrvl_crypto_pmd_session_clear -}; - -struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops; diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mrvl/rte_mrvl_pmd_private.h deleted file mode 100644 index 923faaf9..00000000 --- a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h +++ /dev/null @@ -1,123 +0,0 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2017 Marvell International Ltd. - * Copyright(c) 2017 Semihalf. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of the copyright holder nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef _RTE_MRVL_PMD_PRIVATE_H_ -#define _RTE_MRVL_PMD_PRIVATE_H_ - -#include "rte_mrvl_compat.h" - -#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl -/**< Marvell PMD device name */ - -#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG -#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ - __func__, __LINE__, ## args) - -#else -#define MRVL_CRYPTO_LOG_INFO(fmt, args...) -#define MRVL_CRYPTO_LOG_DBG(fmt, args...) -#endif - -/** - * Handy bits->bytes conversion macro. - */ -#define BITS2BYTES(x) ((x) >> 3) - -/** The operation order mode enumerator. */ -enum mrvl_crypto_chain_order { - MRVL_CRYPTO_CHAIN_CIPHER_ONLY, - MRVL_CRYPTO_CHAIN_AUTH_ONLY, - MRVL_CRYPTO_CHAIN_CIPHER_AUTH, - MRVL_CRYPTO_CHAIN_AUTH_CIPHER, - MRVL_CRYPTO_CHAIN_COMBINED, - MRVL_CRYPTO_CHAIN_NOT_SUPPORTED, -}; - -/** Private data structure for each crypto device. */ -struct mrvl_crypto_private { - unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; /**< Max number of sessions */ -}; - -/** MRVL crypto queue pair structure. */ -struct mrvl_crypto_qp { - /** SAM CIO (MUSDK Queue Pair equivalent).*/ - struct sam_cio *cio; - - /** Session Mempool. */ - struct rte_mempool *sess_mp; - - /** Queue pair statistics. */ - struct rte_cryptodev_stats stats; - - /** CIO initialization parameters.*/ - struct sam_cio_params cio_params; -} __rte_cache_aligned; - -/** MRVL crypto private session structure. */ -struct mrvl_crypto_session { - /** Crypto operations chain order. */ - enum mrvl_crypto_chain_order chain_order; - - /** Session initialization parameters. */ - struct sam_session_params sam_sess_params; - - /** SAM session pointer. */ - struct sam_sa *sam_sess; - - /** Cipher IV offset. */ - uint16_t cipher_iv_offset; -} __rte_cache_aligned; - -/** Set and validate MRVL crypto session parameters */ -extern int -mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, - const struct rte_crypto_sym_xform *xform); - -/** device specific operations function pointer structure */ -extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops; - -#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mrvl/rte_pmd_mrvl_version.map deleted file mode 100644 index a7530317..00000000 --- a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_17.11 { - local: *; -}; diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile new file mode 100644 index 00000000..c3dc72c1 --- /dev/null +++ b/drivers/crypto/mvsam/Makefile @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2017 Marvell International Ltd. +# Copyright(c) 2017 Semihalf. +# All rights reserved. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),config) +ifeq ($(LIBMUSDK_PATH),) +$(error "Please define LIBMUSDK_PATH environment variable") +endif +endif +endif + +# library name +LIB = librte_pmd_mvsam_crypto.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) +CFLAGS += -I$(LIBMUSDK_PATH)/include +CFLAGS += -DMVCONF_TYPES_PUBLIC +CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_mvsam_version.map + +# external library dependencies +LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_bus_vdev + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build new file mode 100644 index 00000000..3c8ea3cf --- /dev/null +++ b/drivers/crypto/mvsam/meson.build @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 Marvell International Ltd. +# Copyright(c) 2018 Semihalf. +# All rights reserved. + +path = get_option('lib_musdk_dir') +lib_dir = path + '/lib' +inc_dir = path + '/include' + +lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false) +if not lib.found() + build = false +else + ext_deps += lib + includes += include_directories(inc_dir) + cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC'] +endif + +sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c') + +deps += ['bus_vdev'] diff --git a/drivers/crypto/mvsam/rte_mrvl_compat.h b/drivers/crypto/mvsam/rte_mrvl_compat.h new file mode 100644 index 00000000..4ab28d39 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_compat.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _RTE_MRVL_COMPAT_H_ +#define _RTE_MRVL_COMPAT_H_ + +/* Unluckily, container_of is defined by both DPDK and MUSDK, + * we'll declare only one version. + * + * Note that it is not used in this PMD anyway. + */ +#ifdef container_of +#undef container_of +#endif +#include "env/mv_autogen_comp_flags.h" +#include "drivers/mv_sam.h" +#include "drivers/mv_sam_cio.h" +#include "drivers/mv_sam_session.h" + +#endif /* _RTE_MRVL_COMPAT_H_ */ diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c new file mode 100644 index 00000000..73eff757 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c @@ -0,0 +1,937 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "rte_mrvl_pmd_private.h" + +#define MRVL_MUSDK_DMA_MEMSIZE 41943040 + +#define MRVL_PMD_MAX_NB_SESS_ARG ("max_nb_sessions") +#define MRVL_PMD_DEFAULT_MAX_NB_SESSIONS 2048 + +static uint8_t cryptodev_driver_id; + +struct mrvl_pmd_init_params { + struct rte_cryptodev_pmd_init_params common; + uint32_t max_nb_sessions; +}; + +const char *mrvl_pmd_valid_params[] = { + RTE_CRYPTODEV_PMD_NAME_ARG, + RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG, + RTE_CRYPTODEV_PMD_SOCKET_ID_ARG, + MRVL_PMD_MAX_NB_SESS_ARG +}; + +/** + * Flag if particular crypto algorithm is supported by PMD/MUSDK. + * + * The idea is to have Not Supported value as default (0). + * This way we need only to define proper map sizes, + * non-initialized entries will be by default not supported. + */ +enum algo_supported { + ALGO_NOT_SUPPORTED = 0, + ALGO_SUPPORTED = 1, +}; + +/** Map elements for cipher mapping.*/ +struct cipher_params_mapping { + enum algo_supported supported; /**< On/Off switch */ + enum sam_cipher_alg cipher_alg; /**< Cipher algorithm */ + enum sam_cipher_mode cipher_mode; /**< Cipher mode */ + unsigned int max_key_len; /**< Maximum key length (in bytes)*/ +} +/* We want to squeeze in multiple maps into the cache line. */ +__rte_aligned(32); + +/** Map elements for auth mapping.*/ +struct auth_params_mapping { + enum algo_supported supported; /**< On/off switch */ + enum sam_auth_alg auth_alg; /**< Auth algorithm */ +} +/* We want to squeeze in multiple maps into the cache line. */ +__rte_aligned(32); + +/** + * Map of supported cipher algorithms. + */ +static const +struct cipher_params_mapping cipher_map[RTE_CRYPTO_CIPHER_LIST_END] = { + [RTE_CRYPTO_CIPHER_3DES_CBC] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_CBC, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_3DES_CTR] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_CTR, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_3DES_ECB] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_3DES, + .cipher_mode = SAM_CIPHER_ECB, + .max_key_len = BITS2BYTES(192) }, + [RTE_CRYPTO_CIPHER_AES_CBC] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_CBC, + .max_key_len = BITS2BYTES(256) }, + [RTE_CRYPTO_CIPHER_AES_CTR] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_CTR, + .max_key_len = BITS2BYTES(256) }, +}; + +/** + * Map of supported auth algorithms. + */ +static const +struct auth_params_mapping auth_map[RTE_CRYPTO_AUTH_LIST_END] = { + [RTE_CRYPTO_AUTH_MD5_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_MD5 }, + [RTE_CRYPTO_AUTH_MD5] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_MD5 }, + [RTE_CRYPTO_AUTH_SHA1_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA1 }, + [RTE_CRYPTO_AUTH_SHA1] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA1 }, + [RTE_CRYPTO_AUTH_SHA224] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_224 }, + [RTE_CRYPTO_AUTH_SHA256_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_256 }, + [RTE_CRYPTO_AUTH_SHA256] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_256 }, + [RTE_CRYPTO_AUTH_SHA384_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_384 }, + [RTE_CRYPTO_AUTH_SHA384] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_384 }, + [RTE_CRYPTO_AUTH_SHA512_HMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HMAC_SHA2_512 }, + [RTE_CRYPTO_AUTH_SHA512] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_HASH_SHA2_512 }, + [RTE_CRYPTO_AUTH_AES_GMAC] = { + .supported = ALGO_SUPPORTED, + .auth_alg = SAM_AUTH_AES_GMAC }, +}; + +/** + * Map of supported aead algorithms. + */ +static const +struct cipher_params_mapping aead_map[RTE_CRYPTO_AEAD_LIST_END] = { + [RTE_CRYPTO_AEAD_AES_GCM] = { + .supported = ALGO_SUPPORTED, + .cipher_alg = SAM_CIPHER_AES, + .cipher_mode = SAM_CIPHER_GCM, + .max_key_len = BITS2BYTES(256) }, +}; + +/* + *----------------------------------------------------------------------------- + * Forward declarations. + *----------------------------------------------------------------------------- + */ +static int cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev); + +/* + *----------------------------------------------------------------------------- + * Session Preparation. + *----------------------------------------------------------------------------- + */ + +/** + * Get xform chain order. + * + * @param xform Pointer to configuration structure chain for crypto operations. + * @returns Order of crypto operations. + */ +static enum mrvl_crypto_chain_order +mrvl_crypto_get_chain_order(const struct rte_crypto_sym_xform *xform) +{ + /* Currently, Marvell supports max 2 operations in chain */ + if (xform->next != NULL && xform->next->next != NULL) + return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; + + if (xform->next != NULL) { + if ((xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)) + return MRVL_CRYPTO_CHAIN_AUTH_CIPHER; + + if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)) + return MRVL_CRYPTO_CHAIN_CIPHER_AUTH; + } else { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return MRVL_CRYPTO_CHAIN_AUTH_ONLY; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return MRVL_CRYPTO_CHAIN_CIPHER_ONLY; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) + return MRVL_CRYPTO_CHAIN_COMBINED; + } + return MRVL_CRYPTO_CHAIN_NOT_SUPPORTED; +} + +/** + * Set session parameters for cipher part. + * + * @param sess Crypto session pointer. + * @param cipher_xform Pointer to configuration structure for cipher operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_cipher_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *cipher_xform) +{ + /* Make sure we've got proper struct */ + if (cipher_xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((cipher_xform->cipher.algo > RTE_DIM(cipher_map)) || + (cipher_map[cipher_xform->cipher.algo].supported + != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("Cipher algorithm not supported!"); + return -EINVAL; + } + + sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; + + sess->sam_sess_params.dir = + (cipher_xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.cipher_alg = + cipher_map[cipher_xform->cipher.algo].cipher_alg; + sess->sam_sess_params.cipher_mode = + cipher_map[cipher_xform->cipher.algo].cipher_mode; + + /* Assume IV will be passed together with data. */ + sess->sam_sess_params.cipher_iv = NULL; + + /* Get max key length. */ + if (cipher_xform->cipher.key.length > + cipher_map[cipher_xform->cipher.algo].max_key_len) { + MRVL_CRYPTO_LOG_ERR("Wrong key length!"); + return -EINVAL; + } + + sess->sam_sess_params.cipher_key_len = cipher_xform->cipher.key.length; + sess->sam_sess_params.cipher_key = cipher_xform->cipher.key.data; + + return 0; +} + +/** + * Set session parameters for authentication part. + * + * @param sess Crypto session pointer. + * @param auth_xform Pointer to configuration structure for auth operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_auth_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *auth_xform) +{ + /* Make sure we've got proper struct */ + if (auth_xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((auth_xform->auth.algo > RTE_DIM(auth_map)) || + (auth_map[auth_xform->auth.algo].supported != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("Auth algorithm not supported!"); + return -EINVAL; + } + + sess->sam_sess_params.dir = + (auth_xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.auth_alg = + auth_map[auth_xform->auth.algo].auth_alg; + sess->sam_sess_params.u.basic.auth_icv_len = + auth_xform->auth.digest_length; + /* auth_key must be NULL if auth algorithm does not use HMAC */ + sess->sam_sess_params.auth_key = auth_xform->auth.key.length ? + auth_xform->auth.key.data : NULL; + sess->sam_sess_params.auth_key_len = auth_xform->auth.key.length; + + return 0; +} + +/** + * Set session parameters for aead part. + * + * @param sess Crypto session pointer. + * @param aead_xform Pointer to configuration structure for aead operations. + * @returns 0 in case of success, negative value otherwise. + */ +static int +mrvl_crypto_set_aead_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *aead_xform) +{ + /* Make sure we've got proper struct */ + if (aead_xform->type != RTE_CRYPTO_SYM_XFORM_AEAD) { + MRVL_CRYPTO_LOG_ERR("Wrong xform struct provided!"); + return -EINVAL; + } + + /* See if map data is present and valid */ + if ((aead_xform->aead.algo > RTE_DIM(aead_map)) || + (aead_map[aead_xform->aead.algo].supported + != ALGO_SUPPORTED)) { + MRVL_CRYPTO_LOG_ERR("AEAD algorithm not supported!"); + return -EINVAL; + } + + sess->sam_sess_params.dir = + (aead_xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ? + SAM_DIR_ENCRYPT : SAM_DIR_DECRYPT; + sess->sam_sess_params.cipher_alg = + aead_map[aead_xform->aead.algo].cipher_alg; + sess->sam_sess_params.cipher_mode = + aead_map[aead_xform->aead.algo].cipher_mode; + + /* Assume IV will be passed together with data. */ + sess->sam_sess_params.cipher_iv = NULL; + + /* Get max key length. */ + if (aead_xform->aead.key.length > + aead_map[aead_xform->aead.algo].max_key_len) { + MRVL_CRYPTO_LOG_ERR("Wrong key length!"); + return -EINVAL; + } + + sess->sam_sess_params.cipher_key = aead_xform->aead.key.data; + sess->sam_sess_params.cipher_key_len = aead_xform->aead.key.length; + + if (sess->sam_sess_params.cipher_mode == SAM_CIPHER_GCM) + sess->sam_sess_params.auth_alg = SAM_AUTH_AES_GCM; + + sess->sam_sess_params.u.basic.auth_icv_len = + aead_xform->aead.digest_length; + + sess->sam_sess_params.u.basic.auth_aad_len = + aead_xform->aead.aad_length; + + return 0; +} + +/** + * Parse crypto transform chain and setup session parameters. + * + * @param dev Pointer to crypto device + * @param sess Poiner to crypto session + * @param xform Pointer to configuration structure chain for crypto operations. + * @returns 0 in case of success, negative value otherwise. + */ +int +mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *cipher_xform = NULL; + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *aead_xform = NULL; + + /* Filter out spurious/broken requests */ + if (xform == NULL) + return -EINVAL; + + sess->chain_order = mrvl_crypto_get_chain_order(xform); + switch (sess->chain_order) { + case MRVL_CRYPTO_CHAIN_CIPHER_AUTH: + cipher_xform = xform; + auth_xform = xform->next; + break; + case MRVL_CRYPTO_CHAIN_AUTH_CIPHER: + auth_xform = xform; + cipher_xform = xform->next; + break; + case MRVL_CRYPTO_CHAIN_CIPHER_ONLY: + cipher_xform = xform; + break; + case MRVL_CRYPTO_CHAIN_AUTH_ONLY: + auth_xform = xform; + break; + case MRVL_CRYPTO_CHAIN_COMBINED: + aead_xform = xform; + break; + default: + return -EINVAL; + } + + if ((cipher_xform != NULL) && + (mrvl_crypto_set_cipher_session_parameters( + sess, cipher_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported cipher parameters"); + return -EINVAL; + } + + if ((auth_xform != NULL) && + (mrvl_crypto_set_auth_session_parameters( + sess, auth_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported auth parameters"); + return -EINVAL; + } + + if ((aead_xform != NULL) && + (mrvl_crypto_set_aead_session_parameters( + sess, aead_xform) < 0)) { + MRVL_CRYPTO_LOG_ERR("Invalid/unsupported aead parameters"); + return -EINVAL; + } + + return 0; +} + +/* + *----------------------------------------------------------------------------- + * Process Operations + *----------------------------------------------------------------------------- + */ + +/** + * Prepare a single request. + * + * This function basically translates DPDK crypto request into one + * understandable by MUDSK's SAM. If this is a first request in a session, + * it starts the session. + * + * @param request Pointer to pre-allocated && reset request buffer [Out]. + * @param src_bd Pointer to pre-allocated source descriptor [Out]. + * @param dst_bd Pointer to pre-allocated destination descriptor [Out]. + * @param op Pointer to DPDK crypto operation struct [In]. + */ +static inline int +mrvl_request_prepare(struct sam_cio_op_params *request, + struct sam_buf_info *src_bd, + struct sam_buf_info *dst_bd, + struct rte_crypto_op *op) +{ + struct mrvl_crypto_session *sess; + struct rte_mbuf *dst_mbuf; + uint8_t *digest; + + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + MRVL_CRYPTO_LOG_ERR("MRVL CRYPTO PMD only supports session " + "oriented requests, op (%p) is sessionless.", + op); + return -EINVAL; + } + + sess = (struct mrvl_crypto_session *)get_sym_session_private_data( + op->sym->session, cryptodev_driver_id); + if (unlikely(sess == NULL)) { + MRVL_CRYPTO_LOG_ERR("Session was not created for this device"); + return -EINVAL; + } + + /* + * If application delivered us null dst buffer, it means it expects + * us to deliver the result in src buffer. + */ + dst_mbuf = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src; + + request->sa = sess->sam_sess; + request->cookie = op; + + /* Single buffers only, sorry. */ + request->num_bufs = 1; + request->src = src_bd; + src_bd->vaddr = rte_pktmbuf_mtod(op->sym->m_src, void *); + src_bd->paddr = rte_pktmbuf_iova(op->sym->m_src); + src_bd->len = rte_pktmbuf_data_len(op->sym->m_src); + + /* Empty source. */ + if (rte_pktmbuf_data_len(op->sym->m_src) == 0) { + /* EIP does not support 0 length buffers. */ + MRVL_CRYPTO_LOG_ERR("Buffer length == 0 not supported!"); + return -1; + } + + /* Empty destination. */ + if (rte_pktmbuf_data_len(dst_mbuf) == 0) { + /* Make dst buffer fit at least source data. */ + if (rte_pktmbuf_append(dst_mbuf, + rte_pktmbuf_data_len(op->sym->m_src)) == NULL) { + MRVL_CRYPTO_LOG_ERR("Unable to set big enough dst buffer!"); + return -1; + } + } + + request->dst = dst_bd; + dst_bd->vaddr = rte_pktmbuf_mtod(dst_mbuf, void *); + dst_bd->paddr = rte_pktmbuf_iova(dst_mbuf); + + /* + * We can use all available space in dst_mbuf, + * not only what's used currently. + */ + dst_bd->len = dst_mbuf->buf_len - rte_pktmbuf_headroom(dst_mbuf); + + if (sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED) { + request->cipher_len = op->sym->aead.data.length; + request->cipher_offset = op->sym->aead.data.offset; + request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->cipher_iv_offset); + + request->auth_aad = op->sym->aead.aad.data; + request->auth_offset = request->cipher_offset; + request->auth_len = request->cipher_len; + } else { + request->cipher_len = op->sym->cipher.data.length; + request->cipher_offset = op->sym->cipher.data.offset; + request->cipher_iv = rte_crypto_op_ctod_offset(op, uint8_t *, + sess->cipher_iv_offset); + + request->auth_offset = op->sym->auth.data.offset; + request->auth_len = op->sym->auth.data.length; + } + + digest = sess->chain_order == MRVL_CRYPTO_CHAIN_COMBINED ? + op->sym->aead.digest.data : op->sym->auth.digest.data; + if (digest == NULL) { + /* No auth - no worry. */ + return 0; + } + + request->auth_icv_offset = request->auth_offset + request->auth_len; + + /* + * EIP supports only scenarios where ICV(digest buffer) is placed at + * auth_icv_offset. Any other placement means risking errors. + */ + if (sess->sam_sess_params.dir == SAM_DIR_ENCRYPT) { + /* + * This should be the most common case anyway, + * EIP will overwrite DST buffer at auth_icv_offset. + */ + if (rte_pktmbuf_mtod_offset( + dst_mbuf, uint8_t *, + request->auth_icv_offset) == digest) { + return 0; + } + } else {/* sess->sam_sess_params.dir == SAM_DIR_DECRYPT */ + /* + * EIP will look for digest at auth_icv_offset + * offset in SRC buffer. + */ + if (rte_pktmbuf_mtod_offset( + op->sym->m_src, uint8_t *, + request->auth_icv_offset) == digest) { + return 0; + } + } + + /* + * If we landed here it means that digest pointer is + * at different than expected place. + */ + return -1; +} + +/* + *----------------------------------------------------------------------------- + * PMD Framework handlers + *----------------------------------------------------------------------------- + */ + +/** + * Enqueue burst. + * + * @param queue_pair Pointer to queue pair. + * @param ops Pointer to ops requests array. + * @param nb_ops Number of elements in ops requests array. + * @returns Number of elements consumed from ops. + */ +static uint16_t +mrvl_crypto_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + uint16_t iter_ops = 0; + uint16_t to_enq = 0; + uint16_t consumed = 0; + int ret; + struct sam_cio_op_params requests[nb_ops]; + /* + * DPDK uses single fragment buffers, so we can KISS descriptors. + * SAM does not store bd pointers, so on-stack scope will be enough. + */ + struct sam_buf_info src_bd[nb_ops]; + struct sam_buf_info dst_bd[nb_ops]; + struct mrvl_crypto_qp *qp = (struct mrvl_crypto_qp *)queue_pair; + + if (nb_ops == 0) + return 0; + + /* Prepare the burst. */ + memset(&requests, 0, sizeof(requests)); + + /* Iterate through */ + for (; iter_ops < nb_ops; ++iter_ops) { + if (mrvl_request_prepare(&requests[iter_ops], + &src_bd[iter_ops], + &dst_bd[iter_ops], + ops[iter_ops]) < 0) { + MRVL_CRYPTO_LOG_ERR( + "Error while parameters preparation!"); + qp->stats.enqueue_err_count++; + ops[iter_ops]->status = RTE_CRYPTO_OP_STATUS_ERROR; + + /* + * Number of handled ops is increased + * (even if the result of handling is error). + */ + ++consumed; + break; + } + + ops[iter_ops]->status = + RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + /* Increase the number of ops to enqueue. */ + ++to_enq; + } /* for (; iter_ops < nb_ops;... */ + + if (to_enq > 0) { + /* Send the burst */ + ret = sam_cio_enq(qp->cio, requests, &to_enq); + consumed += to_enq; + if (ret < 0) { + /* + * Trust SAM that in this case returned value will be at + * some point correct (now it is returned unmodified). + */ + qp->stats.enqueue_err_count += to_enq; + for (iter_ops = 0; iter_ops < to_enq; ++iter_ops) + ops[iter_ops]->status = + RTE_CRYPTO_OP_STATUS_ERROR; + } + } + + qp->stats.enqueued_count += to_enq; + return consumed; +} + +/** + * Dequeue burst. + * + * @param queue_pair Pointer to queue pair. + * @param ops Pointer to ops requests array. + * @param nb_ops Number of elements in ops requests array. + * @returns Number of elements dequeued. + */ +static uint16_t +mrvl_crypto_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + int ret; + struct mrvl_crypto_qp *qp = queue_pair; + struct sam_cio *cio = qp->cio; + struct sam_cio_op_result results[nb_ops]; + uint16_t i; + + ret = sam_cio_deq(cio, results, &nb_ops); + if (ret < 0) { + /* Count all dequeued as error. */ + qp->stats.dequeue_err_count += nb_ops; + + /* But act as they were dequeued anyway*/ + qp->stats.dequeued_count += nb_ops; + + return 0; + } + + /* Unpack and check results. */ + for (i = 0; i < nb_ops; ++i) { + ops[i] = results[i].cookie; + + switch (results[i].status) { + case SAM_CIO_OK: + ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case SAM_CIO_ERR_ICV: + MRVL_CRYPTO_LOG_DBG("CIO returned SAM_CIO_ERR_ICV."); + ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + break; + default: + MRVL_CRYPTO_LOG_DBG( + "CIO returned Error: %d", results[i].status); + ops[i]->status = RTE_CRYPTO_OP_STATUS_ERROR; + break; + } + } + + qp->stats.dequeued_count += nb_ops; + return nb_ops; +} + +/** + * Create a new crypto device. + * + * @param name Driver name. + * @param vdev Pointer to device structure. + * @param init_params Pointer to initialization parameters. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_create(const char *name, + struct rte_vdev_device *vdev, + struct mrvl_pmd_init_params *init_params) +{ + struct rte_cryptodev *dev; + struct mrvl_crypto_private *internals; + struct sam_init_params sam_params; + int ret; + + dev = rte_cryptodev_pmd_create(name, &vdev->device, + &init_params->common); + if (dev == NULL) { + MRVL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + dev->driver_id = cryptodev_driver_id; + dev->dev_ops = rte_mrvl_crypto_pmd_ops; + + /* Register rx/tx burst functions for data path. */ + dev->enqueue_burst = mrvl_crypto_pmd_enqueue_burst; + dev->dequeue_burst = mrvl_crypto_pmd_dequeue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_HW_ACCELERATED; + + /* Set vector instructions mode supported */ + internals = dev->data->dev_private; + + internals->max_nb_qpairs = init_params->common.max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + + /* + * ret == -EEXIST is correct, it means DMA + * has been already initialized. + */ + ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE); + if (ret < 0) { + if (ret != -EEXIST) + return ret; + + MRVL_CRYPTO_LOG_INFO( + "DMA memory has been already initialized by a different driver."); + } + + sam_params.max_num_sessions = internals->max_nb_sessions; + + return sam_init(&sam_params); + +init_error: + MRVL_CRYPTO_LOG_ERR( + "driver %s: %s failed", init_params->common.name, __func__); + + cryptodev_mrvl_crypto_uninit(vdev); + return -EFAULT; +} + +/** Parse integer from integer argument */ +static int +parse_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + int *i = (int *) extra_args; + + *i = atoi(value); + if (*i < 0) { + MRVL_CRYPTO_LOG_ERR("Argument has to be positive.\n"); + return -EINVAL; + } + + return 0; +} + +/** Parse name */ +static int +parse_name_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct rte_cryptodev_pmd_init_params *params = extra_args; + + if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { + MRVL_CRYPTO_LOG_ERR("Invalid name %s, should be less than " + "%u bytes.\n", value, + RTE_CRYPTODEV_NAME_MAX_LEN - 1); + return -EINVAL; + } + + strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN); + + return 0; +} + +static int +mrvl_pmd_parse_input_args(struct mrvl_pmd_init_params *params, + const char *input_args) +{ + struct rte_kvargs *kvlist = NULL; + int ret = 0; + + if (params == NULL) + return -EINVAL; + + if (input_args) { + kvlist = rte_kvargs_parse(input_args, + mrvl_pmd_valid_params); + if (kvlist == NULL) + return -1; + + /* Common VDEV parameters */ + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_MAX_NB_QP_ARG, + &parse_integer_arg, + ¶ms->common.max_nb_queue_pairs); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_SOCKET_ID_ARG, + &parse_integer_arg, + ¶ms->common.socket_id); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + RTE_CRYPTODEV_PMD_NAME_ARG, + &parse_name_arg, + ¶ms->common); + if (ret < 0) + goto free_kvlist; + + ret = rte_kvargs_process(kvlist, + MRVL_PMD_MAX_NB_SESS_ARG, + &parse_integer_arg, + params); + if (ret < 0) + goto free_kvlist; + + } + +free_kvlist: + rte_kvargs_free(kvlist); + return ret; +} + +/** + * Initialize the crypto device. + * + * @param vdev Pointer to device structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_init(struct rte_vdev_device *vdev) +{ + struct mrvl_pmd_init_params init_params = { + .common = { + .name = "", + .private_data_size = + sizeof(struct mrvl_crypto_private), + .max_nb_queue_pairs = + sam_get_num_inst() * SAM_HW_RING_NUM, + .socket_id = rte_socket_id() + }, + .max_nb_sessions = MRVL_PMD_DEFAULT_MAX_NB_SESSIONS + }; + + const char *name, *args; + int ret; + + name = rte_vdev_device_name(vdev); + if (name == NULL) + return -EINVAL; + args = rte_vdev_device_args(vdev); + + ret = mrvl_pmd_parse_input_args(&init_params, args); + if (ret) { + RTE_LOG(ERR, PMD, + "Failed to parse initialisation arguments[%s]\n", + args); + return -EINVAL; + } + + return cryptodev_mrvl_crypto_create(name, vdev, &init_params); +} + +/** + * Uninitialize the crypto device + * + * @param vdev Pointer to device structure. + * @returns 0 in case of success, negative value otherwise. + */ +static int +cryptodev_mrvl_crypto_uninit(struct rte_vdev_device *vdev) +{ + struct rte_cryptodev *cryptodev; + const char *name = rte_vdev_device_name(vdev); + + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, + "Closing Marvell crypto device %s on numa socket %u\n", + name, rte_socket_id()); + + sam_deinit(); + + cryptodev = rte_cryptodev_pmd_get_named_dev(name); + if (cryptodev == NULL) + return -ENODEV; + + return rte_cryptodev_pmd_destroy(cryptodev); +} + +/** + * Basic driver handlers for use in the constructor. + */ +static struct rte_vdev_driver cryptodev_mrvl_pmd_drv = { + .probe = cryptodev_mrvl_crypto_init, + .remove = cryptodev_mrvl_crypto_uninit +}; + +static struct cryptodev_driver mrvl_crypto_drv; + +/* Register the driver in constructor. */ +RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_MRVL_PMD, cryptodev_mrvl_pmd_drv); +RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD, + "max_nb_queue_pairs= " + "max_nb_sessions= " + "socket_id="); +RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver, + cryptodev_driver_id); diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c new file mode 100644 index 00000000..c045562c --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c @@ -0,0 +1,722 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#include + +#include +#include +#include + +#include "rte_mrvl_pmd_private.h" + +/** + * Capabilities list to be used in reporting to DPDK. + */ +static const struct rte_cryptodev_capabilities + mrvl_crypto_pmd_capabilities[] = { + { /* MD5 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + }, } + }, } + }, + { /* MD5 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_MD5, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA1 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 20, + .max = 20, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA1 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA1, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 20, + .max = 20, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA224 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA224, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 28, + .max = 28, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA256 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, + .block_size = 64, + .key_size = { + .min = 1, + .max = 64, + .increment = 1 + }, + .digest_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA256 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA256, + .block_size = 64, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 32, + .max = 32, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA384 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA384 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA384, + .block_size = 128, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 48, + .max = 48, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA512 HMAC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, + .block_size = 128, + .key_size = { + .min = 1, + .max = 128, + .increment = 1 + }, + .digest_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + }, } + }, } + }, + { /* SHA512 */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_SHA512, + .block_size = 128, + .key_size = { + .min = 0, + .max = 0, + .increment = 0 + }, + .digest_size = { + .min = 64, + .max = 64, + .increment = 0 + }, + }, } + }, } + }, + { /* AES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CBC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, + { /* AES GCM */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, + {.aead = { + .algo = RTE_CRYPTO_AEAD_AES_GCM, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .aad_size = { + .min = 8, + .max = 12, + .increment = 4 + }, + .iv_size = { + .min = 12, + .max = 16, + .increment = 4 + } + }, } + }, } + }, + { /* AES GMAC (AUTH) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_AES_GMAC, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .digest_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 65532, + .increment = 4 + } + }, } + }, } + }, + { /* 3DES CBC */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + { /* 3DES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_3DES_CTR, + .block_size = 8, + .key_size = { + .min = 24, + .max = 24, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + + +/** + * Configure device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param config Pointer to configuration structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) +{ + return 0; +} + +/** + * Start device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** + * Stop device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static void +mrvl_crypto_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** + * Get device statistics (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param stats Pointer to statistics structure [out]. + */ +static void +mrvl_crypto_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->stats.enqueued_count; + stats->dequeued_count += qp->stats.dequeued_count; + + stats->enqueue_err_count += qp->stats.enqueue_err_count; + stats->dequeue_err_count += qp->stats.dequeue_err_count; + } +} + +/** + * Reset device statistics (PMD ops callback). + * + * @param dev Pointer to the device structure. + */ +static void +mrvl_crypto_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct mrvl_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->stats, 0, sizeof(qp->stats)); + } +} + +/** + * Get device info (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param dev_info Pointer to the device info structure [out]. + */ +static void +mrvl_crypto_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct mrvl_crypto_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->driver_id = dev->driver_id; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = mrvl_crypto_pmd_capabilities; + dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + } +} + +/** + * Release queue pair (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param qp_id ID of Queue Pair to release. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct mrvl_crypto_qp *qp = + (struct mrvl_crypto_qp *)dev->data->queue_pairs[qp_id]; + + if (dev->data->queue_pairs[qp_id] != NULL) { + sam_cio_flush(qp->cio); + sam_cio_deinit(qp->cio); + rte_free(dev->data->queue_pairs[qp_id]); + dev->data->queue_pairs[qp_id] = NULL; + } + + return 0; +} + +/** + * Close device (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static int +mrvl_crypto_pmd_close(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) + mrvl_crypto_pmd_qp_release(dev, qp_id); + + return 0; +} + +/** + * Setup a queue pair (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param qp_id ID of the Queue Pair. + * @param qp_conf Queue pair configuration (nb of descriptors). + * @param socket_id NUMA socket to allocate memory on. + * @returns 0 upon success, negative value otherwise. + */ +static int +mrvl_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool) +{ + struct mrvl_crypto_qp *qp = NULL; + char match[RTE_CRYPTODEV_NAME_MAX_LEN]; + unsigned int n; + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("MRVL Crypto PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return -ENOMEM; + + /* Free old qp prior setup if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + mrvl_crypto_pmd_qp_release(dev, qp_id); + + do { /* Error handling block */ + + /* + * This extra check is necessary due to a bug in + * crypto library. + */ + int num = sam_get_num_inst(); + if (num == 0) { + MRVL_CRYPTO_LOG_ERR("No crypto engines detected.\n"); + return -1; + } + + /* + * In case two crypto engines are enabled qps will + * be evenly spread among them. Even and odd qps will + * be handled by cio-0 and cio-1 respectively. qp-cio mapping + * will look as follows: + * + * qp: 0 1 2 3 + * cio-x:y: cio-0:0, cio-1:0, cio-0:1, cio-1:1 + * + * qp: 4 5 6 7 + * cio-x:y: cio-0:2, cio-1:2, cio-0:3, cio-1:3 + * + * In case just one engine is enabled mapping will look as + * follows: + * qp: 0 1 2 3 + * cio-x:y: cio-0:0, cio-0:1, cio-0:2, cio-0:3 + */ + n = snprintf(match, sizeof(match), "cio-%u:%u", + qp_id % num, qp_id / num); + + if (n >= sizeof(match)) + break; + + qp->cio_params.match = match; + qp->cio_params.size = qp_conf->nb_descriptors; + + if (sam_cio_init(&qp->cio_params, &qp->cio) < 0) + break; + + qp->sess_mp = session_pool; + + memset(&qp->stats, 0, sizeof(qp->stats)); + dev->data->queue_pairs[qp_id] = qp; + return 0; + } while (0); + + rte_free(qp); + return -1; +} + +/** Return the number of allocated queue pairs (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @returns Number of allocated queue pairs. + */ +static uint32_t +mrvl_crypto_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the session structure (PMD ops callback). + * + * @param dev Pointer to the device structure [Unused]. + * @returns Size of Marvell crypto session. + */ +static unsigned +mrvl_crypto_pmd_sym_session_get_size(__rte_unused struct rte_cryptodev *dev) +{ + return sizeof(struct mrvl_crypto_session); +} + +/** Configure the session from a crypto xform chain (PMD ops callback). + * + * @param dev Pointer to the device structure. + * @param xform Pointer to the crytpo configuration structure. + * @param sess Pointer to the empty session structure. + * @returns 0 upon success, negative value otherwise. + */ +static int +mrvl_crypto_pmd_sym_session_configure(__rte_unused struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mp) +{ + struct mrvl_crypto_session *mrvl_sess; + void *sess_private_data; + int ret; + + if (sess == NULL) { + MRVL_CRYPTO_LOG_ERR("Invalid session struct."); + return -EINVAL; + } + + if (rte_mempool_get(mp, &sess_private_data)) { + CDEV_LOG_ERR("Couldn't get object from session mempool."); + return -ENOMEM; + } + + ret = mrvl_crypto_set_session_parameters(sess_private_data, xform); + if (ret != 0) { + MRVL_CRYPTO_LOG_ERR("Failed to configure session parameters."); + + /* Return session to mempool */ + rte_mempool_put(mp, sess_private_data); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); + + mrvl_sess = (struct mrvl_crypto_session *)sess_private_data; + if (sam_session_create(&mrvl_sess->sam_sess_params, + &mrvl_sess->sam_sess) < 0) { + MRVL_CRYPTO_LOG_DBG("Failed to create session!"); + return -EIO; + } + + return 0; +} + +/** + * Clear the memory of session so it doesn't leave key material behind. + * + * @param dev Pointer to the device structure. + * @returns 0. Always. + */ +static void +mrvl_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + struct mrvl_crypto_session *mrvl_sess = + (struct mrvl_crypto_session *)sess_priv; + + if (mrvl_sess->sam_sess && + sam_session_destroy(mrvl_sess->sam_sess) < 0) { + MRVL_CRYPTO_LOG_INFO("Error while destroying session!"); + } + + memset(sess, 0, sizeof(struct mrvl_crypto_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +/** + * PMD handlers for crypto ops. + */ +static struct rte_cryptodev_ops mrvl_crypto_pmd_ops = { + .dev_configure = mrvl_crypto_pmd_config, + .dev_start = mrvl_crypto_pmd_start, + .dev_stop = mrvl_crypto_pmd_stop, + .dev_close = mrvl_crypto_pmd_close, + + .dev_infos_get = mrvl_crypto_pmd_info_get, + + .stats_get = mrvl_crypto_pmd_stats_get, + .stats_reset = mrvl_crypto_pmd_stats_reset, + + .queue_pair_setup = mrvl_crypto_pmd_qp_setup, + .queue_pair_release = mrvl_crypto_pmd_qp_release, + .queue_pair_count = mrvl_crypto_pmd_qp_count, + + .sym_session_get_size = mrvl_crypto_pmd_sym_session_get_size, + .sym_session_configure = mrvl_crypto_pmd_sym_session_configure, + .sym_session_clear = mrvl_crypto_pmd_sym_session_clear +}; + +struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops = &mrvl_crypto_pmd_ops; diff --git a/drivers/crypto/mvsam/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h new file mode 100644 index 00000000..c16d95b4 --- /dev/null +++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017 Marvell International Ltd. + * Copyright(c) 2017 Semihalf. + * All rights reserved. + */ + +#ifndef _RTE_MRVL_PMD_PRIVATE_H_ +#define _RTE_MRVL_PMD_PRIVATE_H_ + +#include "rte_mrvl_compat.h" + +#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam +/**< Marvell PMD device name */ + +#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_PMD_MRVL_CRYPTO_DEBUG +#define MRVL_CRYPTO_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#define MRVL_CRYPTO_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + RTE_STR(CRYPTODEV_NAME_MRVL_PMD), \ + __func__, __LINE__, ## args) + +#else +#define MRVL_CRYPTO_LOG_INFO(fmt, args...) +#define MRVL_CRYPTO_LOG_DBG(fmt, args...) +#endif + +/** + * Handy bits->bytes conversion macro. + */ +#define BITS2BYTES(x) ((x) >> 3) + +/** The operation order mode enumerator. */ +enum mrvl_crypto_chain_order { + MRVL_CRYPTO_CHAIN_CIPHER_ONLY, + MRVL_CRYPTO_CHAIN_AUTH_ONLY, + MRVL_CRYPTO_CHAIN_CIPHER_AUTH, + MRVL_CRYPTO_CHAIN_AUTH_CIPHER, + MRVL_CRYPTO_CHAIN_COMBINED, + MRVL_CRYPTO_CHAIN_NOT_SUPPORTED, +}; + +/** Private data structure for each crypto device. */ +struct mrvl_crypto_private { + unsigned int max_nb_qpairs; /**< Max number of queue pairs */ + unsigned int max_nb_sessions; /**< Max number of sessions */ +}; + +/** MRVL crypto queue pair structure. */ +struct mrvl_crypto_qp { + /** SAM CIO (MUSDK Queue Pair equivalent).*/ + struct sam_cio *cio; + + /** Session Mempool. */ + struct rte_mempool *sess_mp; + + /** Queue pair statistics. */ + struct rte_cryptodev_stats stats; + + /** CIO initialization parameters.*/ + struct sam_cio_params cio_params; +} __rte_cache_aligned; + +/** MRVL crypto private session structure. */ +struct mrvl_crypto_session { + /** Crypto operations chain order. */ + enum mrvl_crypto_chain_order chain_order; + + /** Session initialization parameters. */ + struct sam_session_params sam_sess_params; + + /** SAM session pointer. */ + struct sam_sa *sam_sess; + + /** Cipher IV offset. */ + uint16_t cipher_iv_offset; +} __rte_cache_aligned; + +/** Set and validate MRVL crypto session parameters */ +extern int +mrvl_crypto_set_session_parameters(struct mrvl_crypto_session *sess, + const struct rte_crypto_sym_xform *xform); + +/** device specific operations function pointer structure */ +extern struct rte_cryptodev_ops *rte_mrvl_crypto_pmd_ops; + +#endif /* _RTE_MRVL_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/mvsam/rte_pmd_mvsam_version.map b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map new file mode 100644 index 00000000..a7530317 --- /dev/null +++ b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map @@ -0,0 +1,3 @@ +DPDK_17.11 { + local: *; +}; diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c index dc8e5776..6e29a21a 100644 --- a/drivers/crypto/null/null_crypto_pmd.c +++ b/drivers/crypto/null/null_crypto_pmd.c @@ -78,7 +78,7 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(sym_op->session != NULL)) sess = (struct null_crypto_session *) - get_session_private_data( + get_sym_session_private_data( sym_op->session, cryptodev_driver_id); } else { void *_sess = NULL; @@ -99,8 +99,8 @@ get_session(struct null_crypto_qp *qp, struct rte_crypto_op *op) sess = NULL; } sym_op->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(sym_op->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } return sess; @@ -161,10 +161,9 @@ cryptodev_null_create(const char *name, { struct rte_cryptodev *dev; struct null_crypto_private *internals; - dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - NULL_CRYPTO_LOG_ERR("failed to create cryptodev vdev"); + NULL_LOG(ERR, "failed to create cryptodev vdev"); return -EFAULT; } @@ -177,12 +176,11 @@ cryptodev_null_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_IN_PLACE_SGL; internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; } @@ -195,8 +193,7 @@ cryptodev_null_probe(struct rte_vdev_device *dev) "", sizeof(struct null_crypto_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name, *args; int retval; @@ -209,8 +206,9 @@ cryptodev_null_probe(struct rte_vdev_device *dev) retval = rte_cryptodev_pmd_parse_input_args(&init_params, args); if (retval) { - RTE_LOG(ERR, PMD, - "Failed to parse initialisation arguments[%s]\n", args); + NULL_LOG(ERR, + "Failed to parse initialisation arguments[%s]", + args); return -EINVAL; } @@ -245,7 +243,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_NULL_PMD, cryptodev_null_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(null_init_log) +{ + null_logtype_driver = rte_log_register("pmd.crypto.null"); +} diff --git a/drivers/crypto/null/null_crypto_pmd_ops.c b/drivers/crypto/null/null_crypto_pmd_ops.c index f8e5f61f..bb2b6e14 100644 --- a/drivers/crypto/null/null_crypto_pmd_ops.c +++ b/drivers/crypto/null/null_crypto_pmd_ops.c @@ -121,7 +121,8 @@ null_crypto_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = null_crypto_pmd_capabilities; } @@ -163,15 +164,15 @@ null_crypto_pmd_qp_create_processed_pkts_ring(struct null_crypto_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - NULL_CRYPTO_LOG_INFO( - "Reusing existing ring %s for processed packets", - qp->name); + NULL_LOG(INFO, + "Reusing existing ring %s for " + " processed packets", qp->name); return r; } - NULL_CRYPTO_LOG_INFO( - "Unable to reuse existing ring %s for processed packets", - qp->name); + NULL_LOG(INFO, + "Unable to reuse existing ring %s for " + " processed packets", qp->name); return NULL; } @@ -190,7 +191,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, int retval; if (qp_id >= internals->max_nb_qpairs) { - NULL_CRYPTO_LOG_ERR("Invalid qp_id %u, greater than maximum " + NULL_LOG(ERR, "Invalid qp_id %u, greater than maximum " "number of queue pairs supported (%u).", qp_id, internals->max_nb_qpairs); return (-EINVAL); @@ -204,7 +205,7 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, qp = rte_zmalloc_socket("Null Crypto PMD Queue Pair", sizeof(*qp), RTE_CACHE_LINE_SIZE, socket_id); if (qp == NULL) { - NULL_CRYPTO_LOG_ERR("Failed to allocate queue pair memory"); + NULL_LOG(ERR, "Failed to allocate queue pair memory"); return (-ENOMEM); } @@ -213,15 +214,16 @@ null_crypto_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, retval = null_crypto_pmd_qp_set_unique_name(dev, qp); if (retval) { - NULL_CRYPTO_LOG_ERR("Failed to create unique name for null " + NULL_LOG(ERR, "Failed to create unique name for null " "crypto device"); + goto qp_setup_cleanup; } qp->processed_pkts = null_crypto_pmd_qp_create_processed_pkts_ring(qp, qp_conf->nb_descriptors, socket_id); if (qp->processed_pkts == NULL) { - NULL_CRYPTO_LOG_ERR("Failed to create unique name for null " + NULL_LOG(ERR, "Failed to create unique name for null " "crypto device"); goto qp_setup_cleanup; } @@ -239,22 +241,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -null_crypto_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -null_crypto_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t null_crypto_pmd_qp_count(struct rte_cryptodev *dev) @@ -264,14 +250,14 @@ null_crypto_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the NULL crypto session structure */ static unsigned -null_crypto_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +null_crypto_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct null_crypto_session); } /** Configure a null crypto session from a crypto xform chain */ static int -null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +null_crypto_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mp) @@ -280,26 +266,26 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - NULL_CRYPTO_LOG_ERR("invalid session struct"); + NULL_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mp, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); + NULL_LOG(ERR, + "Couldn't get object from session mempool"); return -ENOMEM; } ret = null_crypto_set_session_parameters(sess_private_data, xform); if (ret != 0) { - NULL_CRYPTO_LOG_ERR("failed configure session parameters"); + NULL_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mp, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -307,17 +293,17 @@ null_crypto_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -null_crypto_pmd_session_clear(struct rte_cryptodev *dev, +null_crypto_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct null_crypto_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -335,13 +321,11 @@ struct rte_cryptodev_ops pmd_ops = { .queue_pair_setup = null_crypto_pmd_qp_setup, .queue_pair_release = null_crypto_pmd_qp_release, - .queue_pair_start = null_crypto_pmd_qp_start, - .queue_pair_stop = null_crypto_pmd_qp_stop, .queue_pair_count = null_crypto_pmd_qp_count, - .session_get_size = null_crypto_pmd_session_get_size, - .session_configure = null_crypto_pmd_session_configure, - .session_clear = null_crypto_pmd_session_clear + .sym_session_get_size = null_crypto_pmd_sym_session_get_size, + .sym_session_configure = null_crypto_pmd_sym_session_configure, + .sym_session_clear = null_crypto_pmd_sym_session_clear }; struct rte_cryptodev_ops *null_crypto_pmd_ops = &pmd_ops; diff --git a/drivers/crypto/null/null_crypto_pmd_private.h b/drivers/crypto/null/null_crypto_pmd_private.h index 0fd13362..d5905afd 100644 --- a/drivers/crypto/null/null_crypto_pmd_private.h +++ b/drivers/crypto/null/null_crypto_pmd_private.h @@ -8,31 +8,17 @@ #define CRYPTODEV_NAME_NULL_PMD crypto_null /**< Null crypto PMD device name */ -#define NULL_CRYPTO_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) +int null_logtype_driver; -#ifdef RTE_LIBRTE_NULL_CRYPTO_DEBUG -#define NULL_CRYPTO_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) - -#define NULL_CRYPTO_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_NULL_PMD), \ - __func__, __LINE__, ## args) -#else -#define NULL_CRYPTO_LOG_INFO(fmt, args...) -#define NULL_CRYPTO_LOG_DBG(fmt, args...) -#endif +#define NULL_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, null_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /** private data structure for each NULL crypto device */ struct null_crypto_private { unsigned max_nb_qpairs; /**< Max number of queue pairs */ - unsigned max_nb_sessions; /**< Max number of sessions */ }; /** NULL crypto queue pair */ diff --git a/drivers/crypto/openssl/compat.h b/drivers/crypto/openssl/compat.h new file mode 100644 index 00000000..45f9a33d --- /dev/null +++ b/drivers/crypto/openssl/compat.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 Cavium Networks + */ + +#ifndef __RTA_COMPAT_H__ +#define __RTA_COMPAT_H__ + +#if (OPENSSL_VERSION_NUMBER < 0x10100000L) + +#define set_rsa_params(rsa, p, q, ret) \ + do {rsa->p = p; rsa->q = q; ret = 0; } while (0) + +#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \ + do { \ + rsa->dmp1 = dmp1; \ + rsa->dmq1 = dmq1; \ + rsa->iqmp = iqmp; \ + ret = 0; \ + } while (0) + +#define set_rsa_keys(rsa, n, e, d, ret) \ + do { \ + rsa->n = n; rsa->e = e; rsa->d = d; ret = 0; \ + } while (0) + +#define set_dh_params(dh, p, g, ret) \ + do { \ + dh->p = p; \ + dh->q = NULL; \ + dh->g = g; \ + ret = 0; \ + } while (0) + +#define set_dh_priv_key(dh, priv_key, ret) \ + do { dh->priv_key = priv_key; ret = 0; } while (0) + +#define set_dsa_params(dsa, p, q, g, ret) \ + do { dsa->p = p; dsa->q = q; dsa->g = g; ret = 0; } while (0) + +#define get_dh_pub_key(dh, pub_key) \ + (pub_key = dh->pub_key) + +#define get_dh_priv_key(dh, priv_key) \ + (priv_key = dh->priv_key) + +#define set_dsa_sign(sign, r, s) \ + do { sign->r = r; sign->s = s; } while (0) + +#define get_dsa_sign(sign, r, s) \ + do { r = sign->r; s = sign->s; } while (0) + +#define set_dsa_keys(dsa, pub, priv, ret) \ + do { dsa->pub_key = pub; dsa->priv_key = priv; ret = 0; } while (0) + +#define set_dsa_pub_key(dsa, pub_key) \ + (dsa->pub_key = pub_key) + +#define get_dsa_priv_key(dsa, priv_key) \ + (priv_key = dsa->priv_key) + +#else + +#define set_rsa_params(rsa, p, q, ret) \ + (ret = !RSA_set0_factors(rsa, p, q)) + +#define set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret) \ + (ret = !RSA_set0_crt_params(rsa, dmp1, dmq1, iqmp)) + +/* n, e must be non-null, d can be NULL */ +#define set_rsa_keys(rsa, n, e, d, ret) \ + (ret = !RSA_set0_key(rsa, n, e, d)) + +#define set_dh_params(dh, p, g, ret) \ + (ret = !DH_set0_pqg(dh, p, NULL, g)) + +#define set_dh_priv_key(dh, priv_key, ret) \ + (ret = !DH_set0_key(dh, NULL, priv_key)) + +#define get_dh_pub_key(dh, pub_key) \ + (DH_get0_key(dh_key, &pub_key, NULL)) + +#define get_dh_priv_key(dh, priv_key) \ + (DH_get0_key(dh_key, NULL, &priv_key)) + +#define set_dsa_params(dsa, p, q, g, ret) \ + (ret = !DSA_set0_pqg(dsa, p, q, g)) + +#define set_dsa_priv_key(dsa, priv_key) \ + (DSA_set0_key(dsa, NULL, priv_key)) + +#define set_dsa_sign(sign, r, s) \ + (DSA_SIG_set0(sign, r, s)) + +#define get_dsa_sign(sign, r, s) \ + (DSA_SIG_get0(sign, &r, &s)) + +#define set_dsa_keys(dsa, pub, priv, ret) \ + (ret = !DSA_set0_key(dsa, pub, priv)) + +#define set_dsa_pub_key(dsa, pub_key) \ + (DSA_set0_key(dsa, pub_key, NULL)) + +#define get_dsa_priv_key(dsa, priv_key) \ + (DSA_get0_key(dsa, NULL, &priv_key)) + +#endif /* version < 10100000 */ + +#endif /* __RTA_COMPAT_H__ */ diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c index 0d9bedc0..7d263aba 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd.c +++ b/drivers/crypto/openssl/rte_openssl_pmd.c @@ -14,6 +14,7 @@ #include #include "rte_openssl_pmd_private.h" +#include "compat.h" #define DES_BLOCK_SIZE 8 @@ -119,7 +120,7 @@ get_cipher_key_ede(uint8_t *key, int keylen, uint8_t *key_ede) memcpy(key_ede + 16, key, 8); break; default: - OPENSSL_LOG_ERR("Unsupported key size"); + OPENSSL_LOG(ERR, "Unsupported key size"); res = -EINVAL; } @@ -137,6 +138,9 @@ get_cipher_algo(enum rte_crypto_cipher_algorithm sess_algo, size_t keylen, switch (sess_algo) { case RTE_CRYPTO_CIPHER_3DES_CBC: switch (keylen) { + case 8: + *algo = EVP_des_cbc(); + break; case 16: *algo = EVP_des_ede_cbc(); break; @@ -677,7 +681,7 @@ openssl_set_session_parameters(struct openssl_session *sess, ret = openssl_set_session_cipher_parameters( sess, cipher_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported cipher parameters"); return ret; } @@ -686,7 +690,7 @@ openssl_set_session_parameters(struct openssl_session *sess, if (auth_xform) { ret = openssl_set_session_auth_parameters(sess, auth_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported auth parameters"); return ret; } @@ -695,7 +699,7 @@ openssl_set_session_parameters(struct openssl_session *sess, if (aead_xform) { ret = openssl_set_session_aead_parameters(sess, aead_xform); if (ret != 0) { - OPENSSL_LOG_ERR( + OPENSSL_LOG(ERR, "Invalid/unsupported AEAD parameters"); return ret; } @@ -727,19 +731,36 @@ openssl_reset_session(struct openssl_session *sess) } /** Provide session for operation */ -static struct openssl_session * +static void * get_session(struct openssl_qp *qp, struct rte_crypto_op *op) { struct openssl_session *sess = NULL; + struct openssl_asym_session *asym_sess = NULL; if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { - /* get existing session */ - if (likely(op->sym->session != NULL)) - sess = (struct openssl_session *) - get_session_private_data( - op->sym->session, - cryptodev_driver_id); + if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + /* get existing session */ + if (likely(op->sym->session != NULL)) + sess = (struct openssl_session *) + get_sym_session_private_data( + op->sym->session, + cryptodev_driver_id); + } else { + if (likely(op->asym->session != NULL)) + asym_sess = (struct openssl_asym_session *) + get_asym_session_private_data( + op->asym->session, + cryptodev_driver_id); + if (asym_sess == NULL) + op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return asym_sess; + } } else { + /* sessionless asymmetric not supported */ + if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) + return NULL; + /* provide internal session */ void *_sess = NULL; void *_sess_private_data = NULL; @@ -759,8 +780,8 @@ get_session(struct openssl_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (sess == NULL) @@ -884,7 +905,7 @@ process_openssl_cipher_encrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_encrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher encrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher encrypt failed"); return -EINVAL; } @@ -908,7 +929,7 @@ process_openssl_cipher_bpi_encrypt(uint8_t *src, uint8_t *dst, return 0; process_cipher_encrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher bpi encrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher bpi encrypt failed"); return -EINVAL; } /** Process standard openssl cipher decryption */ @@ -932,7 +953,7 @@ process_openssl_cipher_decrypt(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_decrypt_err: - OPENSSL_LOG_ERR("Process openssl cipher decrypt failed"); + OPENSSL_LOG(ERR, "Process openssl cipher decrypt failed"); return -EINVAL; } @@ -989,7 +1010,7 @@ process_openssl_cipher_des3ctr(struct rte_mbuf *mbuf_src, uint8_t *dst, return 0; process_cipher_des3ctr_err: - OPENSSL_LOG_ERR("Process openssl cipher des 3 ede ctr failed"); + OPENSSL_LOG(ERR, "Process openssl cipher des 3 ede ctr failed"); return -EINVAL; } @@ -1027,7 +1048,7 @@ process_openssl_auth_encryption_gcm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_encryption_gcm_err: - OPENSSL_LOG_ERR("Process openssl auth encryption gcm failed"); + OPENSSL_LOG(ERR, "Process openssl auth encryption gcm failed"); return -EINVAL; } @@ -1068,7 +1089,7 @@ process_openssl_auth_encryption_ccm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_encryption_ccm_err: - OPENSSL_LOG_ERR("Process openssl auth encryption ccm failed"); + OPENSSL_LOG(ERR, "Process openssl auth encryption ccm failed"); return -EINVAL; } @@ -1106,7 +1127,7 @@ process_openssl_auth_decryption_gcm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_decryption_gcm_err: - OPENSSL_LOG_ERR("Process openssl auth decryption gcm failed"); + OPENSSL_LOG(ERR, "Process openssl auth decryption gcm failed"); return -EINVAL; } @@ -1145,7 +1166,7 @@ process_openssl_auth_decryption_ccm(struct rte_mbuf *mbuf_src, int offset, return 0; process_auth_decryption_ccm_err: - OPENSSL_LOG_ERR("Process openssl auth decryption ccm failed"); + OPENSSL_LOG(ERR, "Process openssl auth decryption ccm failed"); return -EINVAL; } @@ -1198,7 +1219,7 @@ process_auth_final: return 0; process_auth_err: - OPENSSL_LOG_ERR("Process openssl auth failed"); + OPENSSL_LOG(ERR, "Process openssl auth failed"); return -EINVAL; } @@ -1251,7 +1272,7 @@ process_auth_final: return 0; process_auth_err: - OPENSSL_LOG_ERR("Process openssl auth failed"); + OPENSSL_LOG(ERR, "Process openssl auth failed"); return -EINVAL; } @@ -1525,6 +1546,433 @@ process_openssl_auth_op(struct openssl_qp *qp, struct rte_crypto_op *op, op->status = RTE_CRYPTO_OP_STATUS_ERROR; } +/* process dsa sign operation */ +static int +process_openssl_dsa_sign_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dsa_op_param *op = &cop->asym->dsa; + DSA *dsa = sess->u.s.dsa; + DSA_SIG *sign = NULL; + + sign = DSA_do_sign(op->message.data, + op->message.length, + dsa); + + if (sign == NULL) { + OPENSSL_LOG(ERR, "%s:%d\n", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } else { + const BIGNUM *r = NULL, *s = NULL; + get_dsa_sign(sign, r, s); + + op->r.length = BN_bn2bin(r, op->r.data); + op->s.length = BN_bn2bin(s, op->s.data); + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + + DSA_SIG_free(sign); + + return 0; +} + +/* process dsa verify operation */ +static int +process_openssl_dsa_verify_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dsa_op_param *op = &cop->asym->dsa; + DSA *dsa = sess->u.s.dsa; + int ret; + DSA_SIG *sign = DSA_SIG_new(); + BIGNUM *r = NULL, *s = NULL; + BIGNUM *pub_key = NULL; + + if (sign == NULL) { + OPENSSL_LOG(ERR, " %s:%d\n", __func__, __LINE__); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + r = BN_bin2bn(op->r.data, + op->r.length, + r); + s = BN_bin2bn(op->s.data, + op->s.length, + s); + pub_key = BN_bin2bn(op->y.data, + op->y.length, + pub_key); + if (!r || !s || !pub_key) { + if (r) + BN_free(r); + if (s) + BN_free(s); + if (pub_key) + BN_free(pub_key); + + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dsa_sign(sign, r, s); + set_dsa_pub_key(dsa, pub_key); + + ret = DSA_do_verify(op->message.data, + op->message.length, + sign, + dsa); + + if (ret != 1) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + DSA_SIG_free(sign); + + return 0; +} + +/* process dh operation */ +static int +process_openssl_dh_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_dh_op_param *op = &cop->asym->dh; + DH *dh_key = sess->u.dh.dh_key; + BIGNUM *priv_key = NULL; + int ret = 0; + + if (sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_SHARED_SECRET_COMPUTE)) { + /* compute shared secret using peer public key + * and current private key + * shared secret = peer_key ^ priv_key mod p + */ + BIGNUM *peer_key = NULL; + + /* copy private key and peer key and compute shared secret */ + peer_key = BN_bin2bn(op->pub_key.data, + op->pub_key.length, + peer_key); + if (peer_key == NULL) { + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + priv_key = BN_bin2bn(op->priv_key.data, + op->priv_key.length, + priv_key); + if (priv_key == NULL) { + BN_free(peer_key); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dh_priv_key(dh_key, priv_key, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to set private key\n"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(peer_key); + BN_free(priv_key); + return 0; + } + + ret = DH_compute_key( + op->shared_secret.data, + peer_key, dh_key); + if (ret < 0) { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(peer_key); + /* priv key is already loaded into dh, + * let's not free that directly here. + * DH_free() will auto free it later. + */ + return 0; + } + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + op->shared_secret.length = ret; + BN_free(peer_key); + return 0; + } + + /* + * other options are public and private key generations. + * + * if user provides private key, + * then first set DH with user provided private key + */ + if ((sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) && + !(sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE))) { + /* generate public key using user-provided private key + * pub_key = g ^ priv_key mod p + */ + + /* load private key into DH */ + priv_key = BN_bin2bn(op->priv_key.data, + op->priv_key.length, + priv_key); + if (priv_key == NULL) { + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + set_dh_priv_key(dh_key, priv_key, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to set private key\n"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + BN_free(priv_key); + return 0; + } + } + + /* generate public and private key pair. + * + * if private key already set, generates only public key. + * + * if private key is not already set, then set it to random value + * and update internal private key. + */ + if (!DH_generate_key(dh_key)) { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + return 0; + } + + if (sess->u.dh.key_op & (1 << RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE)) { + const BIGNUM *pub_key = NULL; + + OPENSSL_LOG(DEBUG, "%s:%d update public key\n", + __func__, __LINE__); + + /* get the generated keys */ + get_dh_pub_key(dh_key, pub_key); + + /* output public key */ + op->pub_key.length = BN_bn2bin(pub_key, + op->pub_key.data); + } + + if (sess->u.dh.key_op & + (1 << RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE)) { + const BIGNUM *priv_key = NULL; + + OPENSSL_LOG(DEBUG, "%s:%d updated priv key\n", + __func__, __LINE__); + + /* get the generated keys */ + get_dh_priv_key(dh_key, priv_key); + + /* provide generated private key back to user */ + op->priv_key.length = BN_bn2bin(priv_key, + op->priv_key.data); + } + + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + return 0; +} + +/* process modinv operation */ +static int +process_openssl_modinv_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_asym_op *op = cop->asym; + BIGNUM *base = BN_CTX_get(sess->u.m.ctx); + BIGNUM *res = BN_CTX_get(sess->u.m.ctx); + + if (unlikely(base == NULL || res == NULL)) { + if (base) + BN_free(base); + if (res) + BN_free(res); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + base = BN_bin2bn((const unsigned char *)op->modinv.base.data, + op->modinv.base.length, base); + + if (BN_mod_inverse(res, base, sess->u.m.modulus, sess->u.m.ctx)) { + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data); + } else { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + return 0; +} + +/* process modexp operation */ +static int +process_openssl_modexp_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + struct rte_crypto_asym_op *op = cop->asym; + BIGNUM *base = BN_CTX_get(sess->u.e.ctx); + BIGNUM *res = BN_CTX_get(sess->u.e.ctx); + + if (unlikely(base == NULL || res == NULL)) { + if (base) + BN_free(base); + if (res) + BN_free(res); + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + return -1; + } + + base = BN_bin2bn((const unsigned char *)op->modinv.base.data, + op->modinv.base.length, base); + + if (BN_mod_exp(res, base, sess->u.e.exp, + sess->u.e.mod, sess->u.e.ctx)) { + op->modinv.base.length = BN_bn2bin(res, op->modinv.base.data); + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else { + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + return 0; +} + +/* process rsa operations */ +static int +process_openssl_rsa_op(struct rte_crypto_op *cop, + struct openssl_asym_session *sess) +{ + int ret = 0; + struct rte_crypto_asym_op *op = cop->asym; + RSA *rsa = sess->u.r.rsa; + uint32_t pad = (op->rsa.pad); + + switch (pad) { + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT0: + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT1: + case RTE_CRYPTO_RSA_PKCS1_V1_5_BT2: + pad = RSA_PKCS1_PADDING; + break; + case RTE_CRYPTO_RSA_PADDING_NONE: + pad = RSA_NO_PADDING; + break; + default: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + OPENSSL_LOG(ERR, + "rsa pad type not supported %d\n", pad); + return 0; + } + + switch (op->rsa.op_type) { + case RTE_CRYPTO_ASYM_OP_ENCRYPT: + ret = RSA_public_encrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.message.data, + rsa, + pad); + + if (ret > 0) + op->rsa.message.length = ret; + OPENSSL_LOG(DEBUG, + "length of encrypted text %d\n", ret); + break; + + case RTE_CRYPTO_ASYM_OP_DECRYPT: + ret = RSA_private_decrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.message.data, + rsa, + pad); + if (ret > 0) + op->rsa.message.length = ret; + break; + + case RTE_CRYPTO_ASYM_OP_SIGN: + ret = RSA_private_encrypt(op->rsa.message.length, + op->rsa.message.data, + op->rsa.sign.data, + rsa, + pad); + if (ret > 0) + op->rsa.sign.length = ret; + break; + + case RTE_CRYPTO_ASYM_OP_VERIFY: + ret = RSA_public_decrypt(op->rsa.sign.length, + op->rsa.sign.data, + op->rsa.sign.data, + rsa, + pad); + + OPENSSL_LOG(DEBUG, + "Length of public_decrypt %d " + "length of message %zd\n", + ret, op->rsa.message.length); + + if (memcmp(op->rsa.sign.data, op->rsa.message.data, + op->rsa.message.length)) { + OPENSSL_LOG(ERR, + "RSA sign Verification failed"); + return -1; + } + break; + + default: + /* allow ops with invalid args to be pushed to + * completion queue + */ + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } + + if (ret < 0) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + + return 0; +} + +static int +process_asym_op(struct openssl_qp *qp, struct rte_crypto_op *op, + struct openssl_asym_session *sess) +{ + int retval = 0; + + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + retval = process_openssl_rsa_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_MODEX: + retval = process_openssl_modexp_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + retval = process_openssl_modinv_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_DH: + retval = process_openssl_dh_op(op, sess); + break; + case RTE_CRYPTO_ASYM_XFORM_DSA: + if (op->asym->dsa.op_type == RTE_CRYPTO_ASYM_OP_SIGN) + retval = process_openssl_dsa_sign_op(op, sess); + else if (op->asym->dsa.op_type == + RTE_CRYPTO_ASYM_OP_VERIFY) + retval = + process_openssl_dsa_verify_op(op, sess); + else + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + default: + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } + if (!retval) { + /* op processed so push to completion queue as processed */ + retval = rte_ring_enqueue(qp->processed_ops, (void *)op); + if (retval) + /* return error if failed to put in completion queue */ + retval = -1; + } + + return retval; +} + /** Process crypto operation for mbuf */ static int process_op(struct openssl_qp *qp, struct rte_crypto_op *op, @@ -1569,7 +2017,7 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op, openssl_reset_session(sess); memset(sess, 0, sizeof(struct openssl_session)); memset(op->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, sess); rte_mempool_put(qp->sess_mp, op->sym->session); op->sym->session = NULL; @@ -1597,7 +2045,7 @@ static uint16_t openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, uint16_t nb_ops) { - struct openssl_session *sess; + void *sess; struct openssl_qp *qp = queue_pair; int i, retval; @@ -1606,7 +2054,12 @@ openssl_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, if (unlikely(sess == NULL)) goto enqueue_err; - retval = process_op(qp, ops[i], sess); + if (ops[i]->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) + retval = process_op(qp, ops[i], + (struct openssl_session *) sess); + else + retval = process_asym_op(qp, ops[i], + (struct openssl_asym_session *) sess); if (unlikely(retval < 0)) goto enqueue_err; } @@ -1646,7 +2099,7 @@ cryptodev_openssl_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - OPENSSL_LOG_ERR("failed to create cryptodev vdev"); + OPENSSL_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -1660,18 +2113,19 @@ cryptodev_openssl_create(const char *name, dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | RTE_CRYPTODEV_FF_CPU_AESNI | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | + RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO; /* Set vector instructions mode supported */ internals = dev->data->dev_private; internals->max_nb_qpairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - OPENSSL_LOG_ERR("driver %s: cryptodev_openssl_create failed", + OPENSSL_LOG(ERR, "driver %s: create failed", init_params->name); cryptodev_openssl_remove(vdev); @@ -1686,8 +2140,7 @@ cryptodev_openssl_probe(struct rte_vdev_device *vdev) "", sizeof(struct openssl_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -1731,7 +2184,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_OPENSSL_PMD, cryptodev_openssl_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, + cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(openssl_init_log) +{ + openssl_logtype_driver = rte_log_register("pmd.crypto.openssl"); +} diff --git a/drivers/crypto/openssl/rte_openssl_pmd_ops.c b/drivers/crypto/openssl/rte_openssl_pmd_ops.c index 1cb87d59..de228439 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_ops.c +++ b/drivers/crypto/openssl/rte_openssl_pmd_ops.c @@ -9,6 +9,7 @@ #include #include "rte_openssl_pmd_private.h" +#include "compat.h" static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { @@ -397,7 +398,7 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { .algo = RTE_CRYPTO_CIPHER_3DES_CBC, .block_size = 8, .key_size = { - .min = 16, + .min = 8, .max = 24, .increment = 8 }, @@ -469,6 +470,105 @@ static const struct rte_cryptodev_capabilities openssl_pmd_capabilities[] = { }, } }, } }, + { /* RSA */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_RSA, + .op_types = ((1 << RTE_CRYPTO_ASYM_OP_SIGN) | + (1 << RTE_CRYPTO_ASYM_OP_VERIFY) | + (1 << RTE_CRYPTO_ASYM_OP_ENCRYPT) | + (1 << RTE_CRYPTO_ASYM_OP_DECRYPT)), + { + .modlen = { + /* min length is based on openssl rsa keygen */ + .min = 30, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* modexp */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_MODEX, + .op_types = 0, + { + .modlen = { + /* value 0 symbolizes no limit on min length */ + .min = 0, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* modinv */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_MODINV, + .op_types = 0, + { + .modlen = { + /* value 0 symbolizes no limit on min length */ + .min = 0, + /* value 0 symbolizes no limit on max length */ + .max = 0, + .increment = 1 + }, } + } + }, + } + }, + { /* dh */ + .op = RTE_CRYPTO_OP_TYPE_ASYMMETRIC, + {.asym = { + .xform_capa = { + .xform_type = RTE_CRYPTO_ASYM_XFORM_DH, + .op_types = + ((1<feature_flags = dev->feature_flags; dev_info->capabilities = openssl_pmd_capabilities; dev_info->max_nb_queue_pairs = internals->max_nb_qpairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; } } @@ -588,14 +689,14 @@ openssl_pmd_qp_create_processed_ops_ring(struct openssl_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - OPENSSL_LOG_INFO( - "Reusing existing ring %s for processed ops", + OPENSSL_LOG(INFO, + "Reusing existing ring %s for processed ops", qp->name); return r; } - OPENSSL_LOG_ERR( - "Unable to reuse existing ring %s for processed ops", + OPENSSL_LOG(ERR, + "Unable to reuse existing ring %s for processed ops", qp->name); return NULL; } @@ -647,22 +748,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -openssl_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -openssl_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t openssl_pmd_qp_count(struct rte_cryptodev *dev) @@ -670,16 +755,23 @@ openssl_pmd_qp_count(struct rte_cryptodev *dev) return dev->data->nb_queue_pairs; } -/** Returns the size of the session structure */ +/** Returns the size of the symmetric session structure */ static unsigned -openssl_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +openssl_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct openssl_session); } +/** Returns the size of the asymmetric session structure */ +static unsigned +openssl_pmd_asym_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct openssl_asym_session); +} + /** Configure the session from a crypto xform chain */ static int -openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +openssl_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -688,46 +780,460 @@ openssl_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - OPENSSL_LOG_ERR("invalid session struct"); + OPENSSL_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + OPENSSL_LOG(ERR, "Couldn't get object from session mempool"); return -ENOMEM; } ret = openssl_set_session_parameters(sess_private_data, xform); if (ret != 0) { - OPENSSL_LOG_ERR("failed configure session parameters"); + OPENSSL_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; } +static int openssl_set_asym_session_parameters( + struct openssl_asym_session *asym_session, + struct rte_crypto_asym_xform *xform) +{ + int ret = 0; + + if ((xform->xform_type != RTE_CRYPTO_ASYM_XFORM_DH) && + (xform->next != NULL)) { + OPENSSL_LOG(ERR, "chained xfrms are not supported on %s", + rte_crypto_asym_xform_strings[xform->xform_type]); + return -1; + } + + switch (xform->xform_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + { + BIGNUM *n = NULL; + BIGNUM *e = NULL; + BIGNUM *d = NULL; + BIGNUM *p = NULL, *q = NULL, *dmp1 = NULL; + BIGNUM *iqmp = NULL, *dmq1 = NULL; + + /* copy xfrm data into rsa struct */ + n = BN_bin2bn((const unsigned char *)xform->rsa.n.data, + xform->rsa.n.length, n); + e = BN_bin2bn((const unsigned char *)xform->rsa.e.data, + xform->rsa.e.length, e); + + if (!n || !e) + goto err_rsa; + + RSA *rsa = RSA_new(); + if (rsa == NULL) + goto err_rsa; + + if (xform->rsa.key_type == RTE_RSA_KEY_TYPE_EXP) { + d = BN_bin2bn( + (const unsigned char *)xform->rsa.d.data, + xform->rsa.d.length, + d); + if (!d) { + RSA_free(rsa); + goto err_rsa; + } + } else { + p = BN_bin2bn((const unsigned char *) + xform->rsa.qt.p.data, + xform->rsa.qt.p.length, + p); + q = BN_bin2bn((const unsigned char *) + xform->rsa.qt.q.data, + xform->rsa.qt.q.length, + q); + dmp1 = BN_bin2bn((const unsigned char *) + xform->rsa.qt.dP.data, + xform->rsa.qt.dP.length, + dmp1); + dmq1 = BN_bin2bn((const unsigned char *) + xform->rsa.qt.dQ.data, + xform->rsa.qt.dQ.length, + dmq1); + iqmp = BN_bin2bn((const unsigned char *) + xform->rsa.qt.qInv.data, + xform->rsa.qt.qInv.length, + iqmp); + + if (!p || !q || !dmp1 || !dmq1 || !iqmp) { + RSA_free(rsa); + goto err_rsa; + } + set_rsa_params(rsa, p, q, ret); + if (ret) { + OPENSSL_LOG(ERR, + "failed to set rsa params\n"); + RSA_free(rsa); + goto err_rsa; + } + set_rsa_crt_params(rsa, dmp1, dmq1, iqmp, ret); + if (ret) { + OPENSSL_LOG(ERR, + "failed to set crt params\n"); + RSA_free(rsa); + /* + * set already populated params to NULL + * as its freed by call to RSA_free + */ + p = q = NULL; + goto err_rsa; + } + } + + set_rsa_keys(rsa, n, e, d, ret); + if (ret) { + OPENSSL_LOG(ERR, "Failed to load rsa keys\n"); + RSA_free(rsa); + return -1; + } + asym_session->u.r.rsa = rsa; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_RSA; + break; +err_rsa: + if (n) + BN_free(n); + if (e) + BN_free(e); + if (d) + BN_free(d); + if (p) + BN_free(p); + if (q) + BN_free(q); + if (dmp1) + BN_free(dmp1); + if (dmq1) + BN_free(dmq1); + if (iqmp) + BN_free(iqmp); + + return -1; + } + case RTE_CRYPTO_ASYM_XFORM_MODEX: + { + struct rte_crypto_modex_xform *xfrm = &(xform->modex); + + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + return -1; + } + BN_CTX_start(ctx); + BIGNUM *mod = BN_CTX_get(ctx); + BIGNUM *exp = BN_CTX_get(ctx); + if (mod == NULL || exp == NULL) { + BN_CTX_end(ctx); + BN_CTX_free(ctx); + return -1; + } + + mod = BN_bin2bn((const unsigned char *) + xfrm->modulus.data, + xfrm->modulus.length, mod); + exp = BN_bin2bn((const unsigned char *) + xfrm->exponent.data, + xfrm->exponent.length, exp); + asym_session->u.e.ctx = ctx; + asym_session->u.e.mod = mod; + asym_session->u.e.exp = exp; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODEX; + break; + } + case RTE_CRYPTO_ASYM_XFORM_MODINV: + { + struct rte_crypto_modinv_xform *xfrm = &(xform->modinv); + + BN_CTX *ctx = BN_CTX_new(); + if (ctx == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + return -1; + } + BN_CTX_start(ctx); + BIGNUM *mod = BN_CTX_get(ctx); + if (mod == NULL) { + BN_CTX_end(ctx); + BN_CTX_free(ctx); + return -1; + } + + mod = BN_bin2bn((const unsigned char *) + xfrm->modulus.data, + xfrm->modulus.length, + mod); + asym_session->u.m.ctx = ctx; + asym_session->u.m.modulus = mod; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_MODINV; + break; + } + case RTE_CRYPTO_ASYM_XFORM_DH: + { + BIGNUM *p = NULL; + BIGNUM *g = NULL; + + p = BN_bin2bn((const unsigned char *) + xform->dh.p.data, + xform->dh.p.length, + p); + g = BN_bin2bn((const unsigned char *) + xform->dh.g.data, + xform->dh.g.length, + g); + if (!p || !g) + goto err_dh; + + DH *dh = DH_new(); + if (dh == NULL) { + OPENSSL_LOG(ERR, + "failed to allocate resources\n"); + goto err_dh; + } + set_dh_params(dh, p, g, ret); + if (ret) { + DH_free(dh); + goto err_dh; + } + + /* + * setup xfrom for + * public key generate, or + * DH Priv key generate, or both + * public and private key generate + */ + asym_session->u.dh.key_op = (1 << xform->dh.type); + + if (xform->dh.type == + RTE_CRYPTO_ASYM_OP_PRIVATE_KEY_GENERATE) { + /* check if next is pubkey */ + if ((xform->next != NULL) && + (xform->next->xform_type == + RTE_CRYPTO_ASYM_XFORM_DH) && + (xform->next->dh.type == + RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE) + ) { + /* + * setup op as pub/priv key + * pair generationi + */ + asym_session->u.dh.key_op |= + (1 << + RTE_CRYPTO_ASYM_OP_PUBLIC_KEY_GENERATE); + } + } + asym_session->u.dh.dh_key = dh; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DH; + break; + +err_dh: + OPENSSL_LOG(ERR, " failed to set dh params\n"); + if (p) + BN_free(p); + if (g) + BN_free(g); + return -1; + } + case RTE_CRYPTO_ASYM_XFORM_DSA: + { + BIGNUM *p = NULL, *g = NULL; + BIGNUM *q = NULL, *priv_key = NULL; + BIGNUM *pub_key = BN_new(); + BN_zero(pub_key); + + p = BN_bin2bn((const unsigned char *) + xform->dsa.p.data, + xform->dsa.p.length, + p); + + g = BN_bin2bn((const unsigned char *) + xform->dsa.g.data, + xform->dsa.g.length, + g); + + q = BN_bin2bn((const unsigned char *) + xform->dsa.q.data, + xform->dsa.q.length, + q); + if (!p || !q || !g) + goto err_dsa; + + priv_key = BN_bin2bn((const unsigned char *) + xform->dsa.x.data, + xform->dsa.x.length, + priv_key); + if (priv_key == NULL) + goto err_dsa; + + DSA *dsa = DSA_new(); + if (dsa == NULL) { + OPENSSL_LOG(ERR, + " failed to allocate resources\n"); + goto err_dsa; + } + + set_dsa_params(dsa, p, q, g, ret); + if (ret) { + DSA_free(dsa); + OPENSSL_LOG(ERR, "Failed to dsa params\n"); + goto err_dsa; + } + + /* + * openssl 1.1.0 mandate that public key can't be + * NULL in very first call. so set a dummy pub key. + * to keep consistency, lets follow same approach for + * both versions + */ + /* just set dummy public for very 1st call */ + set_dsa_keys(dsa, pub_key, priv_key, ret); + if (ret) { + DSA_free(dsa); + OPENSSL_LOG(ERR, "Failed to set keys\n"); + return -1; + } + asym_session->u.s.dsa = dsa; + asym_session->xfrm_type = RTE_CRYPTO_ASYM_XFORM_DSA; + break; + +err_dsa: + if (p) + BN_free(p); + if (q) + BN_free(q); + if (g) + BN_free(g); + if (priv_key) + BN_free(priv_key); + if (pub_key) + BN_free(pub_key); + return -1; + } + default: + return -1; + } + + return 0; +} + +/** Configure the session from a crypto xform chain */ +static int +openssl_pmd_asym_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *mempool) +{ + void *asym_sess_private_data; + int ret; + + if (unlikely(sess == NULL)) { + OPENSSL_LOG(ERR, "invalid asymmetric session struct"); + return -EINVAL; + } + + if (rte_mempool_get(mempool, &asym_sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + ret = openssl_set_asym_session_parameters(asym_sess_private_data, + xform); + if (ret != 0) { + OPENSSL_LOG(ERR, "failed configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, asym_sess_private_data); + return ret; + } + + set_asym_session_private_data(sess, dev->driver_id, + asym_sess_private_data); + + return 0; +} /** Clear the memory of session so it doesn't leave key material behind */ static void -openssl_pmd_session_clear(struct rte_cryptodev *dev, +openssl_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { openssl_reset_session(sess_priv); memset(sess_priv, 0, sizeof(struct openssl_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +static void openssl_reset_asym_session(struct openssl_asym_session *sess) +{ + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + if (sess->u.r.rsa) + RSA_free(sess->u.r.rsa); + break; + case RTE_CRYPTO_ASYM_XFORM_MODEX: + if (sess->u.e.ctx) { + BN_CTX_end(sess->u.e.ctx); + BN_CTX_free(sess->u.e.ctx); + } + break; + case RTE_CRYPTO_ASYM_XFORM_MODINV: + if (sess->u.m.ctx) { + BN_CTX_end(sess->u.m.ctx); + BN_CTX_free(sess->u.m.ctx); + } + break; + case RTE_CRYPTO_ASYM_XFORM_DH: + if (sess->u.dh.dh_key) + DH_free(sess->u.dh.dh_key); + break; + case RTE_CRYPTO_ASYM_XFORM_DSA: + if (sess->u.s.dsa) + DSA_free(sess->u.s.dsa); + break; + default: + break; + } +} + +/** Clear the memory of asymmetric session + * so it doesn't leave key material behind + */ +static void +openssl_pmd_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_asym_session_private_data(sess, index); + + /* Zero out the whole structure */ + if (sess_priv) { + openssl_reset_asym_session(sess_priv); + memset(sess_priv, 0, sizeof(struct openssl_asym_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + set_asym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -745,13 +1251,14 @@ struct rte_cryptodev_ops openssl_pmd_ops = { .queue_pair_setup = openssl_pmd_qp_setup, .queue_pair_release = openssl_pmd_qp_release, - .queue_pair_start = openssl_pmd_qp_start, - .queue_pair_stop = openssl_pmd_qp_stop, .queue_pair_count = openssl_pmd_qp_count, - .session_get_size = openssl_pmd_session_get_size, - .session_configure = openssl_pmd_session_configure, - .session_clear = openssl_pmd_session_clear + .sym_session_get_size = openssl_pmd_sym_session_get_size, + .asym_session_get_size = openssl_pmd_asym_session_get_size, + .sym_session_configure = openssl_pmd_sym_session_configure, + .asym_session_configure = openssl_pmd_asym_session_configure, + .sym_session_clear = openssl_pmd_sym_session_clear, + .asym_session_clear = openssl_pmd_asym_session_clear }; struct rte_cryptodev_ops *rte_openssl_pmd_ops = &openssl_pmd_ops; diff --git a/drivers/crypto/openssl/rte_openssl_pmd_private.h b/drivers/crypto/openssl/rte_openssl_pmd_private.h index bc8dc7cd..a8f2c848 100644 --- a/drivers/crypto/openssl/rte_openssl_pmd_private.h +++ b/drivers/crypto/openssl/rte_openssl_pmd_private.h @@ -8,29 +8,19 @@ #include #include #include +#include +#include +#include #define CRYPTODEV_NAME_OPENSSL_PMD crypto_openssl /**< Open SSL Crypto PMD device name */ -#define OPENSSL_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_OPENSSL_DEBUG -#define OPENSSL_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) - -#define OPENSSL_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD), \ - __func__, __LINE__, ## args) -#else -#define OPENSSL_LOG_INFO(fmt, args...) -#define OPENSSL_LOG_DBG(fmt, args...) -#endif +/** OPENSSL PMD LOGTYPE DRIVER */ +int openssl_logtype_driver; +#define OPENSSL_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, openssl_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) /* Maximum length for digest (SHA-512 needs 64 bytes) */ #define DIGEST_LENGTH_MAX 64 @@ -62,8 +52,6 @@ enum openssl_auth_mode { struct openssl_private { unsigned int max_nb_qpairs; /**< Max number of queue pairs */ - unsigned int max_nb_sessions; - /**< Max number of sessions */ }; /** OPENSSL crypto queue pair */ @@ -157,6 +145,31 @@ struct openssl_session { } __rte_cache_aligned; +/** OPENSSL crypto private asymmetric session structure */ +struct openssl_asym_session { + enum rte_crypto_asym_xform_type xfrm_type; + union { + struct rsa { + RSA *rsa; + } r; + struct exp { + BIGNUM *exp; + BIGNUM *mod; + BN_CTX *ctx; + } e; + struct mod { + BIGNUM *modulus; + BN_CTX *ctx; + } m; + struct dh { + DH *dh_key; + uint32_t key_op; + } dh; + struct { + DSA *dsa; + } s; + } u; +} __rte_cache_aligned; /** Set and validate OPENSSL crypto session parameters */ extern int openssl_set_session_parameters(struct openssl_session *sess, diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile deleted file mode 100644 index 260912dc..00000000 --- a/drivers/crypto/qat/Makefile +++ /dev/null @@ -1,35 +0,0 @@ -# SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2015 Intel Corporation - -include $(RTE_SDK)/mk/rte.vars.mk - -# library name -LIB = librte_pmd_qat.a - -# library version -LIBABIVER := 1 - -# build flags -CFLAGS += $(WERROR_FLAGS) -CFLAGS += -O3 - -# external library include paths -CFLAGS += -I$(SRCDIR)/qat_adf -LDLIBS += -lcrypto -LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring -LDLIBS += -lrte_cryptodev -LDLIBS += -lrte_pci -lrte_bus_pci - -# library source files -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_crypto.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_qp.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat_adf/qat_algs_build_desc.c -SRCS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += rte_qat_cryptodev.c - -# export include files -SYMLINK-y-include += - -# versioning export map -EXPORT_MAP := rte_pmd_qat_version.map - -include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/qat/README b/drivers/crypto/qat/README new file mode 100644 index 00000000..444ae605 --- /dev/null +++ b/drivers/crypto/qat/README @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2015-2018 Intel Corporation + +Makefile for crypto QAT PMD is in common/qat directory. +The build for the QAT driver is done from there as only one library is built for the +whole QAT pci device and that library includes all the services (crypto, compression) +which are enabled on the device. diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build index 7b904630..9cc98d2c 100644 --- a/drivers/crypto/qat/meson.build +++ b/drivers/crypto/qat/meson.build @@ -1,14 +1,18 @@ # SPDX-License-Identifier: BSD-3-Clause -# Copyright(c) 2017 Intel Corporation +# Copyright(c) 2017-2018 Intel Corporation +# this does not build the QAT driver, instead that is done in the compression +# driver which comes later. Here we just add our sources files to the list +build = false dep = dependency('libcrypto', required: false) -if not dep.found() - build = false +qat_includes += include_directories('.') +qat_deps += 'cryptodev' +if dep.found() + # Add our sources files to the list + qat_sources += files('qat_sym_pmd.c', + 'qat_sym.c', + 'qat_sym_session.c') + qat_ext_deps += dep + pkgconfig_extra_libs += '-lcrypto' + qat_cflags += '-DBUILD_QAT_SYM' endif -sources = files('qat_crypto.c', 'qat_qp.c', - 'qat_adf/qat_algs_build_desc.c', - 'rte_qat_cryptodev.c') -includes += include_directories('qat_adf') -deps += ['bus_pci'] -ext_deps += dep -pkgconfig_extra_libs += '-lcrypto' diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h deleted file mode 100644 index 4f8f3d13..00000000 --- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef ADF_TRANSPORT_ACCESS_MACROS_H -#define ADF_TRANSPORT_ACCESS_MACROS_H - -#include - -/* CSR write macro */ -#define ADF_CSR_WR(csrAddr, csrOffset, val) \ - rte_write32(val, (((uint8_t *)csrAddr) + csrOffset)) - -/* CSR read macro */ -#define ADF_CSR_RD(csrAddr, csrOffset) \ - rte_read32((((uint8_t *)csrAddr) + csrOffset)) - -#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL -#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL -#define ADF_RING_CSR_RING_CONFIG 0x000 -#define ADF_RING_CSR_RING_LBASE 0x040 -#define ADF_RING_CSR_RING_UBASE 0x080 -#define ADF_RING_CSR_RING_HEAD 0x0C0 -#define ADF_RING_CSR_RING_TAIL 0x100 -#define ADF_RING_CSR_E_STAT 0x14C -#define ADF_RING_CSR_INT_SRCSEL 0x174 -#define ADF_RING_CSR_INT_SRCSEL_2 0x178 -#define ADF_RING_CSR_INT_COL_EN 0x17C -#define ADF_RING_CSR_INT_COL_CTL 0x180 -#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184 -#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000 -#define ADF_RING_BUNDLE_SIZE 0x1000 -#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A -#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05 -#define ADF_COALESCING_MIN_TIME 0x1FF -#define ADF_COALESCING_MAX_TIME 0xFFFFF -#define ADF_COALESCING_DEF_TIME 0x27FF -#define ADF_RING_NEAR_WATERMARK_512 0x08 -#define ADF_RING_NEAR_WATERMARK_0 0x00 -#define ADF_RING_EMPTY_SIG 0x7F7F7F7F -#define ADF_RING_EMPTY_SIG_BYTE 0x7F - -/* Valid internal ring size values */ -#define ADF_RING_SIZE_128 0x01 -#define ADF_RING_SIZE_256 0x02 -#define ADF_RING_SIZE_512 0x03 -#define ADF_RING_SIZE_4K 0x06 -#define ADF_RING_SIZE_16K 0x08 -#define ADF_RING_SIZE_4M 0x10 -#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128 -#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M -#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K - -#define ADF_NUM_BUNDLES_PER_DEV 1 -#define ADF_NUM_SYM_QPS_PER_BUNDLE 2 - -/* Valid internal msg size values */ -#define ADF_MSG_SIZE_32 0x01 -#define ADF_MSG_SIZE_64 0x02 -#define ADF_MSG_SIZE_128 0x04 -#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32 -#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128 - -/* Size to bytes conversion macros for ring and msg size values */ -#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5) -#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5) -#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7) -#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7) - -/* Minimum ring bufer size for memory allocation */ -#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \ - ADF_RING_SIZE_4K : SIZE) -#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6) -#define ADF_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \ - SIZE) & ~0x4) -/* Max outstanding requests */ -#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \ - ((((1 << (RING_SIZE - 1)) << 3) >> ADF_SIZE_TO_POW(MSG_SIZE)) - 1) -#define BUILD_RING_CONFIG(size) \ - ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \ - | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ - | size) -#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \ - ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \ - | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \ - | size) -#define BUILD_RING_BASE_ADDR(addr, size) \ - ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size)) -#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_HEAD + (ring << 2)) -#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_TAIL + (ring << 2)) -#define READ_CSR_E_STAT(csr_base_addr, bank) \ - ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_E_STAT) -#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_CONFIG + (ring << 2), value) -#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \ -do { \ - uint32_t l_base = 0, u_base = 0; \ - l_base = (uint32_t)(value & 0xFFFFFFFF); \ - u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \ -} while (0) -#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_HEAD + (ring << 2), value) -#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_RING_TAIL + (ring << 2), value) -#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \ -do { \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0); \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \ -} while (0) -#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_COL_EN, value) -#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_COL_CTL, \ - ADF_RING_CSR_INT_COL_CTL_ENABLE | value) -#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \ - ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \ - ADF_RING_CSR_INT_FLAG_AND_COL, value) -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h deleted file mode 100644 index 5de34d55..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h +++ /dev/null @@ -1,316 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_FW_H_ -#define _ICP_QAT_FW_H_ -#include -#include "icp_qat_hw.h" - -#define QAT_FIELD_SET(flags, val, bitpos, mask) \ -{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \ - (((val) & (mask)) << (bitpos))) ; } - -#define QAT_FIELD_GET(flags, bitpos, mask) \ - (((flags) >> (bitpos)) & (mask)) - -#define ICP_QAT_FW_REQ_DEFAULT_SZ 128 -#define ICP_QAT_FW_RESP_DEFAULT_SZ 32 -#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8 -#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF -#define ICP_QAT_FW_NUM_LONGWORDS_1 1 -#define ICP_QAT_FW_NUM_LONGWORDS_2 2 -#define ICP_QAT_FW_NUM_LONGWORDS_3 3 -#define ICP_QAT_FW_NUM_LONGWORDS_4 4 -#define ICP_QAT_FW_NUM_LONGWORDS_5 5 -#define ICP_QAT_FW_NUM_LONGWORDS_6 6 -#define ICP_QAT_FW_NUM_LONGWORDS_7 7 -#define ICP_QAT_FW_NUM_LONGWORDS_10 10 -#define ICP_QAT_FW_NUM_LONGWORDS_13 13 -#define ICP_QAT_FW_NULL_REQ_SERV_ID 1 - -enum icp_qat_fw_comn_resp_serv_id { - ICP_QAT_FW_COMN_RESP_SERV_NULL, - ICP_QAT_FW_COMN_RESP_SERV_CPM_FW, - ICP_QAT_FW_COMN_RESP_SERV_DELIMITER -}; - -enum icp_qat_fw_comn_request_id { - ICP_QAT_FW_COMN_REQ_NULL = 0, - ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3, - ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4, - ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7, - ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9, - ICP_QAT_FW_COMN_REQ_DELIMITER -}; - -struct icp_qat_fw_comn_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t serv_specif_fields[4]; - } s1; - } u; -}; - -struct icp_qat_fw_comn_req_mid { - uint64_t opaque_data; - uint64_t src_data_addr; - uint64_t dest_data_addr; - uint32_t src_length; - uint32_t dst_length; -}; - -struct icp_qat_fw_comn_req_cd_ctrl { - uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5]; -}; - -struct icp_qat_fw_comn_req_hdr { - uint8_t resrvd1; - uint8_t service_cmd_id; - uint8_t service_type; - uint8_t hdr_flags; - uint16_t serv_specif_flags; - uint16_t comn_req_flags; -}; - -struct icp_qat_fw_comn_req_rqpars { - uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13]; -}; - -struct icp_qat_fw_comn_req { - struct icp_qat_fw_comn_req_hdr comn_hdr; - struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; - struct icp_qat_fw_comn_req_mid comn_mid; - struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; - struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; -}; - -struct icp_qat_fw_comn_error { - uint8_t xlat_err_code; - uint8_t cmp_err_code; -}; - -struct icp_qat_fw_comn_resp_hdr { - uint8_t resrvd1; - uint8_t service_id; - uint8_t response_type; - uint8_t hdr_flags; - struct icp_qat_fw_comn_error comn_error; - uint8_t comn_status; - uint8_t cmd_id; -}; - -struct icp_qat_fw_comn_resp { - struct icp_qat_fw_comn_resp_hdr comn_hdr; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; -}; - -#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1 -#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0 -#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7 -#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1 -#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F - -#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \ - icp_qat_fw_comn_req_hdr_t.service_type - -#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \ - icp_qat_fw_comn_req_hdr_t.service_type = val - -#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \ - icp_qat_fw_comn_req_hdr_t.service_cmd_id - -#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \ - icp_qat_fw_comn_req_hdr_t.service_cmd_id = val - -#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \ - ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags) - -#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \ - ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) - -#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \ - QAT_FIELD_GET(hdr_flags, \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ - ICP_QAT_FW_COMN_VALID_FLAG_MASK) - -#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \ - (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK) - -#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \ - QAT_FIELD_SET((hdr_t.hdr_flags), (val), \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \ - ICP_QAT_FW_COMN_VALID_FLAG_MASK) - -#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \ - (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \ - ICP_QAT_FW_COMN_VALID_FLAG_BITPOS) - -#define QAT_COMN_PTR_TYPE_BITPOS 0 -#define QAT_COMN_PTR_TYPE_MASK 0x1 -#define QAT_COMN_CD_FLD_TYPE_BITPOS 1 -#define QAT_COMN_CD_FLD_TYPE_MASK 0x1 -#define QAT_COMN_PTR_TYPE_FLAT 0x0 -#define QAT_COMN_PTR_TYPE_SGL 0x1 -#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0 -#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1 - -#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \ - ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \ - | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS)) - -#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK) - -#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \ - QAT_COMN_CD_FLD_TYPE_MASK) - -#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \ - QAT_COMN_PTR_TYPE_MASK) - -#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \ - QAT_COMN_CD_FLD_TYPE_MASK) - -#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4 -#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0 -#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0 -#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F - -#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ - >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ - { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)); } - -#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \ - { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); } - -#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7 -#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1 -#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5 -#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1 -#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4 -#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1 -#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3 -#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1 - -#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \ - ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \ - QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \ - (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \ - QAT_COMN_RESP_CMP_STATUS_BITPOS) | \ - (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \ - QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \ - (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \ - QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS)) - -#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \ - QAT_COMN_RESP_CRYPTO_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \ - QAT_COMN_RESP_CMP_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \ - QAT_COMN_RESP_XLAT_STATUS_MASK) - -#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \ - QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \ - QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) - -#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0 -#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1 -#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0 -#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1 -#define ERR_CODE_NO_ERROR 0 -#define ERR_CODE_INVALID_BLOCK_TYPE -1 -#define ERR_CODE_NO_MATCH_ONES_COMP -2 -#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3 -#define ERR_CODE_INCOMPLETE_LEN -4 -#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5 -#define ERR_CODE_RPT_GT_SPEC_LEN -6 -#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7 -#define ERR_CODE_INV_DIS_CODE_LEN -8 -#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9 -#define ERR_CODE_DIS_TOO_FAR_BACK -10 -#define ERR_CODE_OVERFLOW_ERROR -11 -#define ERR_CODE_SOFT_ERROR -12 -#define ERR_CODE_FATAL_ERROR -13 -#define ERR_CODE_SSM_ERROR -14 -#define ERR_CODE_ENDPOINT_ERROR -15 - -enum icp_qat_fw_slice { - ICP_QAT_FW_SLICE_NULL = 0, - ICP_QAT_FW_SLICE_CIPHER = 1, - ICP_QAT_FW_SLICE_AUTH = 2, - ICP_QAT_FW_SLICE_DRAM_RD = 3, - ICP_QAT_FW_SLICE_DRAM_WR = 4, - ICP_QAT_FW_SLICE_COMP = 5, - ICP_QAT_FW_SLICE_XLAT = 6, - ICP_QAT_FW_SLICE_DELIMITER -}; -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h deleted file mode 100644 index fbf2b839..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h +++ /dev/null @@ -1,404 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_FW_LA_H_ -#define _ICP_QAT_FW_LA_H_ -#include "icp_qat_fw.h" - -enum icp_qat_fw_la_cmd_id { - ICP_QAT_FW_LA_CMD_CIPHER = 0, - ICP_QAT_FW_LA_CMD_AUTH = 1, - ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2, - ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3, - ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4, - ICP_QAT_FW_LA_CMD_TRNG_TEST = 5, - ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6, - ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7, - ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8, - ICP_QAT_FW_LA_CMD_MGF1 = 9, - ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10, - ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11, - ICP_QAT_FW_LA_CMD_DELIMITER = 12 -}; - -#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK -#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR -#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK -#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR - -struct icp_qat_fw_la_bulk_req { - struct icp_qat_fw_comn_req_hdr comn_hdr; - struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars; - struct icp_qat_fw_comn_req_mid comn_mid; - struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars; - struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl; -}; - -#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1 -#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0 -#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12 -#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1 -#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1 -#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11 -#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1 -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1 -#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0 -#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10 -#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1 -#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4 -#define ICP_QAT_FW_LA_GCM_PROTO 2 -#define ICP_QAT_FW_LA_CCM_PROTO 1 -#define ICP_QAT_FW_LA_NO_PROTO 0 -#define QAT_LA_PROTO_BITPOS 7 -#define QAT_LA_PROTO_MASK 0x7 -#define ICP_QAT_FW_LA_CMP_AUTH_RES 1 -#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0 -#define QAT_LA_CMP_AUTH_RES_BITPOS 6 -#define QAT_LA_CMP_AUTH_RES_MASK 0x1 -#define ICP_QAT_FW_LA_RET_AUTH_RES 1 -#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0 -#define QAT_LA_RET_AUTH_RES_BITPOS 5 -#define QAT_LA_RET_AUTH_RES_MASK 0x1 -#define ICP_QAT_FW_LA_UPDATE_STATE 1 -#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0 -#define QAT_LA_UPDATE_STATE_BITPOS 4 -#define QAT_LA_UPDATE_STATE_MASK 0x1 -#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0 -#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1 -#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3 -#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1 -#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0 -#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1 -#define QAT_LA_CIPH_IV_FLD_BITPOS 2 -#define QAT_LA_CIPH_IV_FLD_MASK 0x1 -#define ICP_QAT_FW_LA_PARTIAL_NONE 0 -#define ICP_QAT_FW_LA_PARTIAL_START 1 -#define ICP_QAT_FW_LA_PARTIAL_MID 3 -#define ICP_QAT_FW_LA_PARTIAL_END 2 -#define QAT_LA_PARTIAL_BITPOS 0 -#define QAT_LA_PARTIAL_MASK 0x3 -#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \ - cmp_auth, ret_auth, update_state, \ - ciph_iv, ciphcfg, partial) \ - (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \ - ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \ - QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \ - ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \ - QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \ - ((proto & QAT_LA_PROTO_MASK) << \ - QAT_LA_PROTO_BITPOS) | \ - ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \ - QAT_LA_CMP_AUTH_RES_BITPOS) | \ - ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \ - QAT_LA_RET_AUTH_RES_BITPOS) | \ - ((update_state & QAT_LA_UPDATE_STATE_MASK) << \ - QAT_LA_UPDATE_STATE_BITPOS) | \ - ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \ - QAT_LA_CIPH_IV_FLD_BITPOS) | \ - ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \ - ((partial & QAT_LA_PARTIAL_MASK) << \ - QAT_LA_PARTIAL_BITPOS)) - -#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \ - QAT_LA_CIPH_IV_FLD_MASK) - -#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) - -#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) - -#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ - QAT_LA_GCM_IV_LEN_FLAG_MASK) - -#define ICP_QAT_FW_LA_PROTO_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK) - -#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \ - QAT_LA_CMP_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \ - QAT_LA_RET_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ - QAT_LA_DIGEST_IN_BUFFER_MASK) - -#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \ - QAT_LA_UPDATE_STATE_MASK) - -#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \ - QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \ - QAT_LA_PARTIAL_MASK) - -#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \ - QAT_LA_CIPH_IV_FLD_MASK) - -#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \ - QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) - -#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \ - QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) - -#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \ - QAT_LA_GCM_IV_LEN_FLAG_MASK) - -#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \ - QAT_LA_PROTO_MASK) - -#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \ - QAT_LA_CMP_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \ - QAT_LA_RET_AUTH_RES_MASK) - -#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \ - QAT_LA_DIGEST_IN_BUFFER_MASK) - -#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \ - QAT_LA_UPDATE_STATE_MASK) - -#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \ - QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \ - QAT_LA_PARTIAL_MASK) - -struct icp_qat_fw_cipher_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - } s1; - } u; -}; - -struct icp_qat_fw_cipher_auth_req_hdr_cd_pars { - union { - struct { - uint64_t content_desc_addr; - uint16_t content_desc_resrvd1; - uint8_t content_desc_params_sz; - uint8_t content_desc_hdr_resrvd2; - uint32_t content_desc_resrvd3; - } s; - struct { - uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - } sl; - } u; -}; - -struct icp_qat_fw_cipher_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id; - uint8_t cipher_padding_sz; - uint8_t resrvd1; - uint16_t resrvd2; - uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3]; -}; - -struct icp_qat_fw_auth_cd_ctrl_hdr { - uint32_t resrvd1; - uint8_t resrvd2; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id; - uint8_t resrvd3; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd4; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; -}; - -struct icp_qat_fw_cipher_auth_cd_ctrl_hdr { - uint8_t cipher_state_sz; - uint8_t cipher_key_sz; - uint8_t cipher_cfg_offset; - uint8_t next_curr_id_cipher; - uint8_t cipher_padding_sz; - uint8_t hash_flags; - uint8_t hash_cfg_offset; - uint8_t next_curr_id_auth; - uint8_t resrvd1; - uint8_t outer_prefix_sz; - uint8_t final_sz; - uint8_t inner_res_sz; - uint8_t resrvd2; - uint8_t inner_state1_sz; - uint8_t inner_state2_offset; - uint8_t inner_state2_sz; - uint8_t outer_config_offset; - uint8_t outer_state1_sz; - uint8_t outer_res_sz; - uint8_t outer_prefix_offset; -}; - -#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1 -#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0 -#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX 240 -#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \ - (sizeof(struct icp_qat_fw_la_cipher_req_params_t)) -#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0) - -struct icp_qat_fw_la_cipher_req_params { - uint32_t cipher_offset; - uint32_t cipher_length; - union { - uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4]; - struct { - uint64_t cipher_IV_ptr; - uint64_t resrvd1; - } s; - } u; -}; - -struct icp_qat_fw_la_auth_req_params { - uint32_t auth_off; - uint32_t auth_len; - union { - uint64_t auth_partial_st_prefix; - uint64_t aad_adr; - } u1; - uint64_t auth_res_addr; - union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; - } u2; - uint8_t resrvd1; - uint8_t hash_state_sz; - uint8_t auth_res_sz; -} __rte_packed; - -struct icp_qat_fw_la_auth_req_params_resrvd_flds { - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6]; - union { - uint8_t inner_prefix_sz; - uint8_t aad_sz; - } u2; - uint8_t resrvd1; - uint16_t resrvd2; -}; - -struct icp_qat_fw_la_resp { - struct icp_qat_fw_comn_resp_hdr comn_resp; - uint64_t opaque_data; - uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4]; -}; - -#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \ - ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } - -#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \ - ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } - -#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \ - >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS)) - -#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) | \ - ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK)) } - -#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \ - (((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_CURR_ID_MASK) - -#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \ -{ (cd_ctrl_hdr_t)->next_curr_id_auth = \ - ((((cd_ctrl_hdr_t)->next_curr_id_auth) \ - & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \ - ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) } - -#endif diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h deleted file mode 100644 index d03688c7..00000000 --- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h +++ /dev/null @@ -1,329 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_HW_H_ -#define _ICP_QAT_HW_H_ - -enum icp_qat_hw_ae_id { - ICP_QAT_HW_AE_0 = 0, - ICP_QAT_HW_AE_1 = 1, - ICP_QAT_HW_AE_2 = 2, - ICP_QAT_HW_AE_3 = 3, - ICP_QAT_HW_AE_4 = 4, - ICP_QAT_HW_AE_5 = 5, - ICP_QAT_HW_AE_6 = 6, - ICP_QAT_HW_AE_7 = 7, - ICP_QAT_HW_AE_8 = 8, - ICP_QAT_HW_AE_9 = 9, - ICP_QAT_HW_AE_10 = 10, - ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 -}; - -enum icp_qat_hw_qat_id { - ICP_QAT_HW_QAT_0 = 0, - ICP_QAT_HW_QAT_1 = 1, - ICP_QAT_HW_QAT_2 = 2, - ICP_QAT_HW_QAT_3 = 3, - ICP_QAT_HW_QAT_4 = 4, - ICP_QAT_HW_QAT_5 = 5, - ICP_QAT_HW_QAT_DELIMITER = 6 -}; - -enum icp_qat_hw_auth_algo { - ICP_QAT_HW_AUTH_ALGO_NULL = 0, - ICP_QAT_HW_AUTH_ALGO_SHA1 = 1, - ICP_QAT_HW_AUTH_ALGO_MD5 = 2, - ICP_QAT_HW_AUTH_ALGO_SHA224 = 3, - ICP_QAT_HW_AUTH_ALGO_SHA256 = 4, - ICP_QAT_HW_AUTH_ALGO_SHA384 = 5, - ICP_QAT_HW_AUTH_ALGO_SHA512 = 6, - ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7, - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8, - ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9, - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10, - ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11, - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12, - ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13, - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14, - ICP_QAT_HW_AUTH_RESERVED_1 = 15, - ICP_QAT_HW_AUTH_RESERVED_2 = 16, - ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17, - ICP_QAT_HW_AUTH_RESERVED_3 = 18, - ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19, - ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20 -}; - -enum icp_qat_hw_auth_mode { - ICP_QAT_HW_AUTH_MODE0 = 0, - ICP_QAT_HW_AUTH_MODE1 = 1, - ICP_QAT_HW_AUTH_MODE2 = 2, - ICP_QAT_HW_AUTH_MODE_DELIMITER = 3 -}; - -struct icp_qat_hw_auth_config { - uint32_t config; - uint32_t reserved; -}; - -#define QAT_AUTH_MODE_BITPOS 4 -#define QAT_AUTH_MODE_MASK 0xF -#define QAT_AUTH_ALGO_BITPOS 0 -#define QAT_AUTH_ALGO_MASK 0xF -#define QAT_AUTH_CMP_BITPOS 8 -#define QAT_AUTH_CMP_MASK 0x7F -#define QAT_AUTH_SHA3_PADDING_BITPOS 16 -#define QAT_AUTH_SHA3_PADDING_MASK 0x1 -#define QAT_AUTH_ALGO_SHA3_BITPOS 22 -#define QAT_AUTH_ALGO_SHA3_MASK 0x3 -#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \ - (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \ - ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \ - (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \ - QAT_AUTH_ALGO_SHA3_BITPOS) | \ - (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \ - (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \ - & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \ - ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS)) - -struct icp_qat_hw_auth_counter { - uint32_t counter; - uint32_t reserved; -}; - -#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF -#define QAT_AUTH_COUNT_BITPOS 0 -#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \ - (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS) - -struct icp_qat_hw_auth_setup { - struct icp_qat_hw_auth_config auth_config; - struct icp_qat_hw_auth_counter auth_counter; -}; - -#define QAT_HW_DEFAULT_ALIGNMENT 8 -#define QAT_HW_ROUND_UP(val, n) (((val) + ((n) - 1)) & (~(n - 1))) -#define ICP_QAT_HW_NULL_STATE1_SZ 32 -#define ICP_QAT_HW_MD5_STATE1_SZ 16 -#define ICP_QAT_HW_SHA1_STATE1_SZ 20 -#define ICP_QAT_HW_SHA224_STATE1_SZ 32 -#define ICP_QAT_HW_SHA256_STATE1_SZ 32 -#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32 -#define ICP_QAT_HW_SHA384_STATE1_SZ 64 -#define ICP_QAT_HW_SHA512_STATE1_SZ 64 -#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64 -#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28 -#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48 -#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16 -#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16 -#define ICP_QAT_HW_AES_F9_STATE1_SZ 32 -#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16 -#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16 -#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8 -#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8 -#define ICP_QAT_HW_NULL_STATE2_SZ 32 -#define ICP_QAT_HW_MD5_STATE2_SZ 16 -#define ICP_QAT_HW_SHA1_STATE2_SZ 20 -#define ICP_QAT_HW_SHA224_STATE2_SZ 32 -#define ICP_QAT_HW_SHA256_STATE2_SZ 32 -#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0 -#define ICP_QAT_HW_SHA384_STATE2_SZ 64 -#define ICP_QAT_HW_SHA512_STATE2_SZ 64 -#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0 -#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0 -#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0 -#define ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ 48 -#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16 -#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16 -#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16 -#define ICP_QAT_HW_F9_IK_SZ 16 -#define ICP_QAT_HW_F9_FK_SZ 16 -#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \ - ICP_QAT_HW_F9_FK_SZ) -#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ -#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24 -#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32 -#define ICP_QAT_HW_GALOIS_H_SZ 16 -#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8 -#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16 - -struct icp_qat_hw_auth_sha512 { - struct icp_qat_hw_auth_setup inner_setup; - uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ]; - struct icp_qat_hw_auth_setup outer_setup; - uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ]; -}; - -struct icp_qat_hw_auth_algo_blk { - struct icp_qat_hw_auth_sha512 sha; -}; - -#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0 -#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF - -enum icp_qat_hw_cipher_algo { - ICP_QAT_HW_CIPHER_ALGO_NULL = 0, - ICP_QAT_HW_CIPHER_ALGO_DES = 1, - ICP_QAT_HW_CIPHER_ALGO_3DES = 2, - ICP_QAT_HW_CIPHER_ALGO_AES128 = 3, - ICP_QAT_HW_CIPHER_ALGO_AES192 = 4, - ICP_QAT_HW_CIPHER_ALGO_AES256 = 5, - ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6, - ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7, - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8, - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9, - ICP_QAT_HW_CIPHER_DELIMITER = 10 -}; - -enum icp_qat_hw_cipher_mode { - ICP_QAT_HW_CIPHER_ECB_MODE = 0, - ICP_QAT_HW_CIPHER_CBC_MODE = 1, - ICP_QAT_HW_CIPHER_CTR_MODE = 2, - ICP_QAT_HW_CIPHER_F8_MODE = 3, - ICP_QAT_HW_CIPHER_XTS_MODE = 6, - ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7 -}; - -struct icp_qat_hw_cipher_config { - uint32_t val; - uint32_t reserved; -}; - -enum icp_qat_hw_cipher_dir { - ICP_QAT_HW_CIPHER_ENCRYPT = 0, - ICP_QAT_HW_CIPHER_DECRYPT = 1, -}; - -enum icp_qat_hw_auth_op { - ICP_QAT_HW_AUTH_VERIFY = 0, - ICP_QAT_HW_AUTH_GENERATE = 1, -}; - -enum icp_qat_hw_cipher_convert { - ICP_QAT_HW_CIPHER_NO_CONVERT = 0, - ICP_QAT_HW_CIPHER_KEY_CONVERT = 1, -}; - -#define QAT_CIPHER_MODE_BITPOS 4 -#define QAT_CIPHER_MODE_MASK 0xF -#define QAT_CIPHER_ALGO_BITPOS 0 -#define QAT_CIPHER_ALGO_MASK 0xF -#define QAT_CIPHER_CONVERT_BITPOS 9 -#define QAT_CIPHER_CONVERT_MASK 0x1 -#define QAT_CIPHER_DIR_BITPOS 8 -#define QAT_CIPHER_DIR_MASK 0x1 -#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2 -#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2 -#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \ - (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \ - ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \ - ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \ - ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS)) -#define ICP_QAT_HW_DES_BLK_SZ 8 -#define ICP_QAT_HW_3DES_BLK_SZ 8 -#define ICP_QAT_HW_NULL_BLK_SZ 8 -#define ICP_QAT_HW_AES_BLK_SZ 16 -#define ICP_QAT_HW_KASUMI_BLK_SZ 8 -#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8 -#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8 -#define ICP_QAT_HW_NULL_KEY_SZ 256 -#define ICP_QAT_HW_DES_KEY_SZ 8 -#define ICP_QAT_HW_3DES_KEY_SZ 24 -#define ICP_QAT_HW_AES_128_KEY_SZ 16 -#define ICP_QAT_HW_AES_192_KEY_SZ 24 -#define ICP_QAT_HW_AES_256_KEY_SZ 32 -#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_KASUMI_KEY_SZ 16 -#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \ - QAT_CIPHER_MODE_F8_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \ - QAT_CIPHER_MODE_XTS_KEY_SZ_MULT) -#define ICP_QAT_HW_ARC4_KEY_SZ 256 -#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16 -#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16 -#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16 -#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16 -#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2 - -#define ICP_QAT_HW_CIPHER_MAX_KEY_SZ ICP_QAT_HW_AES_256_F8_KEY_SZ - -/* These defines describe position of the bit-fields - * in the flags byte in B0 - */ -#define ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT 6 -#define ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT 3 - -#define ICP_QAT_HW_CCM_BUILD_B0_FLAGS(Adata, t, q) \ - ((((Adata) > 0 ? 1 : 0) << ICP_QAT_HW_CCM_B0_FLAGS_ADATA_SHIFT) \ - | ((((t) - 2) >> 1) << ICP_QAT_HW_CCM_B0_FLAGS_T_SHIFT) \ - | ((q) - 1)) - -#define ICP_QAT_HW_CCM_NQ_CONST 15 -#define ICP_QAT_HW_CCM_AAD_B0_LEN 16 -#define ICP_QAT_HW_CCM_AAD_LEN_INFO 2 -#define ICP_QAT_HW_CCM_AAD_DATA_OFFSET (ICP_QAT_HW_CCM_AAD_B0_LEN + \ - ICP_QAT_HW_CCM_AAD_LEN_INFO) -#define ICP_QAT_HW_CCM_AAD_ALIGNMENT 16 -#define ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE 4 -#define ICP_QAT_HW_CCM_NONCE_OFFSET 1 - -struct icp_qat_hw_cipher_algo_blk { - struct icp_qat_hw_cipher_config cipher_config; - uint8_t key[ICP_QAT_HW_CIPHER_MAX_KEY_SZ]; -} __rte_cache_aligned; - -#endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h deleted file mode 100644 index 802ba95d..00000000 --- a/drivers/crypto/qat/qat_adf/qat_algs.h +++ /dev/null @@ -1,169 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015-2016 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015-2017 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ -#ifndef _ICP_QAT_ALGS_H_ -#define _ICP_QAT_ALGS_H_ -#include -#include -#include "icp_qat_hw.h" -#include "icp_qat_fw.h" -#include "icp_qat_fw_la.h" -#include "../qat_crypto.h" - -/* - * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR - * Integrity Key (IK) - */ -#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA - -#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555 - -/* 3DES key sizes */ -#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */ -#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */ - -#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ - ICP_QAT_HW_CIPHER_NO_CONVERT, \ - ICP_QAT_HW_CIPHER_ENCRYPT) - -#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ - ICP_QAT_HW_CIPHER_KEY_CONVERT, \ - ICP_QAT_HW_CIPHER_DECRYPT) - -struct qat_alg_buf { - uint32_t len; - uint32_t resrvd; - uint64_t addr; -} __rte_packed; - -enum qat_crypto_proto_flag { - QAT_CRYPTO_PROTO_FLAG_NONE = 0, - QAT_CRYPTO_PROTO_FLAG_CCM = 1, - QAT_CRYPTO_PROTO_FLAG_GCM = 2, - QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3, - QAT_CRYPTO_PROTO_FLAG_ZUC = 4 -}; - -/* - * Maximum number of SGL entries - */ -#define QAT_SGL_MAX_NUMBER 16 - -struct qat_alg_buf_list { - uint64_t resrvd; - uint32_t num_bufs; - uint32_t num_mapped_bufs; - struct qat_alg_buf bufers[QAT_SGL_MAX_NUMBER]; -} __rte_packed __rte_cache_aligned; - -struct qat_crypto_op_cookie { - struct qat_alg_buf_list qat_sgl_list_src; - struct qat_alg_buf_list qat_sgl_list_dst; - rte_iova_t qat_sgl_src_phys_addr; - rte_iova_t qat_sgl_dst_phys_addr; -}; - -/* Common content descriptor */ -struct qat_alg_cd { - struct icp_qat_hw_cipher_algo_blk cipher; - struct icp_qat_hw_auth_algo_blk hash; -} __rte_packed __rte_cache_aligned; - -struct qat_session { - enum icp_qat_fw_la_cmd_id qat_cmd; - enum icp_qat_hw_cipher_algo qat_cipher_alg; - enum icp_qat_hw_cipher_dir qat_dir; - enum icp_qat_hw_cipher_mode qat_mode; - enum icp_qat_hw_auth_algo qat_hash_alg; - enum icp_qat_hw_auth_op auth_op; - void *bpi_ctx; - struct qat_alg_cd cd; - uint8_t *cd_cur_ptr; - rte_iova_t cd_paddr; - struct icp_qat_fw_la_bulk_req fw_req; - uint8_t aad_len; - struct qat_crypto_instance *inst; - struct { - uint16_t offset; - uint16_t length; - } cipher_iv; - struct { - uint16_t offset; - uint16_t length; - } auth_iv; - uint16_t digest_length; - rte_spinlock_t lock; /* protects this struct */ - enum qat_device_gen min_qat_dev_gen; -}; - -int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg); - -int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cd, - uint8_t *enckey, - uint32_t enckeylen); - -int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, - uint8_t *authkey, - uint32_t authkeylen, - uint32_t aad_length, - uint32_t digestsize, - unsigned int operation); - -void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - enum qat_crypto_proto_flag proto_flags); - -int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_aes_docsisbpi_key(int key_len, - enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg); -int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg); -#endif diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c deleted file mode 100644 index 26f854c2..00000000 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ /dev/null @@ -1,1059 +0,0 @@ -/* - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * Copyright(c) 2015-2016 Intel Corporation. - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * Contact Information: - * qat-linux@intel.com - * - * BSD LICENSE - * Copyright(c) 2015-2017 Intel Corporation. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include "../qat_logs.h" - -#include /* Needed to calculate pre-compute values */ -#include /* Needed to calculate pre-compute values */ -#include /* Needed to calculate pre-compute values */ - -#include "qat_algs.h" - -/* returns block size in bytes per cipher algo */ -int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) -{ - switch (qat_cipher_alg) { - case ICP_QAT_HW_CIPHER_ALGO_DES: - return ICP_QAT_HW_DES_BLK_SZ; - case ICP_QAT_HW_CIPHER_ALGO_3DES: - return ICP_QAT_HW_3DES_BLK_SZ; - case ICP_QAT_HW_CIPHER_ALGO_AES128: - case ICP_QAT_HW_CIPHER_ALGO_AES192: - case ICP_QAT_HW_CIPHER_ALGO_AES256: - return ICP_QAT_HW_AES_BLK_SZ; - default: - PMD_DRV_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* - * Returns size in bytes per hash algo for state1 size field in cd_ctrl - * This is digest size rounded up to nearest quadword - */ -static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: - return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_MD5: - return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_NULL: - return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum state1 size in this case */ - return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, - QAT_HW_DEFAULT_ALIGNMENT); - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* returns digest size in bytes per hash algo */ -static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return ICP_QAT_HW_SHA1_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return ICP_QAT_HW_SHA224_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return ICP_QAT_HW_SHA256_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return ICP_QAT_HW_SHA384_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return ICP_QAT_HW_SHA512_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_MD5: - return ICP_QAT_HW_MD5_STATE1_SZ; - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum digest size in this case */ - return ICP_QAT_HW_SHA512_STATE1_SZ; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -/* returns block size in byes per hash algo */ -static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) -{ - switch (qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - return SHA_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - return SHA256_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - return SHA256_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - return SHA512_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - return SHA512_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - return 16; - case ICP_QAT_HW_AUTH_ALGO_MD5: - return MD5_CBLOCK; - case ICP_QAT_HW_AUTH_ALGO_DELIMITER: - /* return maximum block size in this case */ - return SHA512_CBLOCK; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", qat_hash_alg); - return -EFAULT; - }; - return -EFAULT; -} - -static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) -{ - SHA_CTX ctx; - - if (!SHA1_Init(&ctx)) - return -EFAULT; - SHA1_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) -{ - SHA256_CTX ctx; - - if (!SHA224_Init(&ctx)) - return -EFAULT; - SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) -{ - SHA256_CTX ctx; - - if (!SHA256_Init(&ctx)) - return -EFAULT; - SHA256_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) -{ - SHA512_CTX ctx; - - if (!SHA384_Init(&ctx)) - return -EFAULT; - SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) -{ - SHA512_CTX ctx; - - if (!SHA512_Init(&ctx)) - return -EFAULT; - SHA512_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); - return 0; -} - -static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) -{ - MD5_CTX ctx; - - if (!MD5_Init(&ctx)) - return -EFAULT; - MD5_Transform(&ctx, data_in); - rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); - - return 0; -} - -static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, - uint8_t *data_in, - uint8_t *data_out) -{ - int digest_size; - uint8_t digest[qat_hash_get_digest_size( - ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - uint32_t *hash_state_out_be32; - uint64_t *hash_state_out_be64; - int i; - - PMD_INIT_FUNC_TRACE(); - digest_size = qat_hash_get_digest_size(hash_alg); - if (digest_size <= 0) - return -EFAULT; - - hash_state_out_be32 = (uint32_t *)data_out; - hash_state_out_be64 = (uint64_t *)data_out; - - switch (hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (partial_hash_sha1(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - if (partial_hash_sha224(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (partial_hash_sha256(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) - *hash_state_out_be32 = - rte_bswap32(*(((uint32_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - if (partial_hash_sha384(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) - *hash_state_out_be64 = - rte_bswap64(*(((uint64_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (partial_hash_sha512(data_in, digest)) - return -EFAULT; - for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) - *hash_state_out_be64 = - rte_bswap64(*(((uint64_t *)digest)+i)); - break; - case ICP_QAT_HW_AUTH_ALGO_MD5: - if (partial_hash_md5(data_in, data_out)) - return -EFAULT; - break; - default: - PMD_DRV_LOG(ERR, "invalid hash alg %u", hash_alg); - return -EFAULT; - } - - return 0; -} -#define HMAC_IPAD_VALUE 0x36 -#define HMAC_OPAD_VALUE 0x5c -#define HASH_XCBC_PRECOMP_KEY_NUM 3 - -static int qat_alg_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, - const uint8_t *auth_key, - uint16_t auth_keylen, - uint8_t *p_state_buf, - uint16_t *p_state_len) -{ - int block_size; - uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; - int i; - - PMD_INIT_FUNC_TRACE(); - if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { - static uint8_t qat_aes_xcbc_key_seed[ - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, - }; - - uint8_t *in = NULL; - uint8_t *out = p_state_buf; - int x; - AES_KEY enc_key; - - in = rte_zmalloc("working mem for key", - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); - if (in == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory"); - return -ENOMEM; - } - - rte_memcpy(in, qat_aes_xcbc_key_seed, - ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); - for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { - if (AES_set_encrypt_key(auth_key, auth_keylen << 3, - &enc_key) != 0) { - rte_free(in - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); - memset(out - - (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), - 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); - return -EFAULT; - } - AES_encrypt(in, out, &enc_key); - in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; - out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; - } - *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; - rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); - return 0; - } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || - (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { - uint8_t *in = NULL; - uint8_t *out = p_state_buf; - AES_KEY enc_key; - - memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ + - ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ); - in = rte_zmalloc("working mem for key", - ICP_QAT_HW_GALOIS_H_SZ, 16); - if (in == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc memory"); - return -ENOMEM; - } - - memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ); - if (AES_set_encrypt_key(auth_key, auth_keylen << 3, - &enc_key) != 0) { - return -EFAULT; - } - AES_encrypt(in, out, &enc_key); - *p_state_len = ICP_QAT_HW_GALOIS_H_SZ + - ICP_QAT_HW_GALOIS_LEN_A_SZ + - ICP_QAT_HW_GALOIS_E_CTR0_SZ; - rte_free(in); - return 0; - } - - block_size = qat_hash_get_block_size(hash_alg); - if (block_size <= 0) - return -EFAULT; - /* init ipad and opad from key and xor with fixed values */ - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - - if (auth_keylen > (unsigned int)block_size) { - PMD_DRV_LOG(ERR, "invalid keylen %u", auth_keylen); - return -EFAULT; - } - rte_memcpy(ipad, auth_key, auth_keylen); - rte_memcpy(opad, auth_key, auth_keylen); - - for (i = 0; i < block_size; i++) { - uint8_t *ipad_ptr = ipad + i; - uint8_t *opad_ptr = opad + i; - *ipad_ptr ^= HMAC_IPAD_VALUE; - *opad_ptr ^= HMAC_OPAD_VALUE; - } - - /* do partial hash of ipad and copy to state1 */ - if (partial_hash_compute(hash_alg, ipad, p_state_buf)) { - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - PMD_DRV_LOG(ERR, "ipad precompute failed"); - return -EFAULT; - } - - /* - * State len is a multiple of 8, so may be larger than the digest. - * Put the partial hash of opad state_len bytes after state1 - */ - *p_state_len = qat_hash_get_state1_size(hash_alg); - if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) { - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - PMD_DRV_LOG(ERR, "opad precompute failed"); - return -EFAULT; - } - - /* don't leave data lying around */ - memset(ipad, 0, block_size); - memset(opad, 0, block_size); - return 0; -} - -void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, - enum qat_crypto_proto_flag proto_flags) -{ - PMD_INIT_FUNC_TRACE(); - header->hdr_flags = - ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); - header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; - header->comn_req_flags = - ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, - QAT_COMN_PTR_TYPE_FLAT); - ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_PARTIAL_NONE); - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, - ICP_QAT_FW_CIPH_IV_16BYTE_DATA); - - switch (proto_flags) { - case QAT_CRYPTO_PROTO_FLAG_NONE: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_CCM: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CCM_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_GCM: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_GCM_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_SNOW3G: - ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_SNOW_3G_PROTO); - break; - case QAT_CRYPTO_PROTO_FLAG_ZUC: - ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_ZUC_3G_PROTO); - break; - } - - ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_UPDATE_STATE); - ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); -} - -/* - * Snow3G and ZUC should never use this function - * and set its protocol flag in both cipher and auth part of content - * descriptor building function - */ -static enum qat_crypto_proto_flag -qat_get_crypto_proto_flag(uint16_t flags) -{ - int proto = ICP_QAT_FW_LA_PROTO_GET(flags); - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - - switch (proto) { - case ICP_QAT_FW_LA_GCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; - break; - case ICP_QAT_FW_LA_CCM_PROTO: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; - break; - } - - return qat_proto_flag; -} - -int qat_alg_aead_session_create_content_desc_cipher(struct qat_session *cdesc, - uint8_t *cipherkey, - uint32_t cipherkeylen) -{ - struct icp_qat_hw_cipher_algo_blk *cipher; - struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - void *ptr = &req_tmpl->cd_ctrl; - struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; - struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; - enum icp_qat_hw_cipher_convert key_convert; - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - uint32_t total_key_size; - uint16_t cipher_offset, cd_size; - uint32_t wordIndex = 0; - uint32_t *temp_key = NULL; - PMD_INIT_FUNC_TRACE(); - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) { - PMD_DRV_LOG(ERR, "Invalid param, must be a cipher command."); - return -EFAULT; - } - - if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { - /* - * CTR Streaming ciphers are a special case. Decrypt = encrypt - * Overriding default values previously set - */ - cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 - || cdesc->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; - else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) - key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; - else - key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; - - if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { - total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + - ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - cipher_cd_ctrl->cipher_state_sz = - ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; - - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; - cipher_cd_ctrl->cipher_padding_sz = - (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3; - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { - total_key_size = ICP_QAT_HW_3DES_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { - total_key_size = ICP_QAT_HW_DES_KEY_SZ; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } else if (cdesc->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + - ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; - cipher_cd_ctrl->cipher_state_sz = - ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - cdesc->min_qat_dev_gen = QAT_GEN2; - } else { - total_key_size = cipherkeylen; - cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; - qat_proto_flag = - qat_get_crypto_proto_flag(header->serv_specif_flags); - } - cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; - cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); - cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; - - header->service_cmd_id = cdesc->qat_cmd; - qat_alg_init_common_hdr(header, qat_proto_flag); - - cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; - cipher->cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, - cdesc->qat_cipher_alg, key_convert, - cdesc->qat_dir); - - if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { - temp_key = (uint32_t *)(cdesc->cd_cur_ptr + - sizeof(struct icp_qat_hw_cipher_config) - + cipherkeylen); - memcpy(cipher->key, cipherkey, cipherkeylen); - memcpy(temp_key, cipherkey, cipherkeylen); - - /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ - for (wordIndex = 0; wordIndex < (cipherkeylen >> 2); - wordIndex++) - temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES; - - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - cipherkeylen + cipherkeylen; - } else { - memcpy(cipher->key, cipherkey, cipherkeylen); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - cipherkeylen; - } - - if (total_key_size > cipherkeylen) { - uint32_t padding_size = total_key_size-cipherkeylen; - if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) - && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) - /* K3 not provided so use K1 = K3*/ - memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size); - else - memset(cdesc->cd_cur_ptr, 0, padding_size); - cdesc->cd_cur_ptr += padding_size; - } - cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; - cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; - - return 0; -} - -int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, - uint8_t *authkey, - uint32_t authkeylen, - uint32_t aad_length, - uint32_t digestsize, - unsigned int operation) -{ - struct icp_qat_hw_auth_setup *hash; - struct icp_qat_hw_cipher_algo_blk *cipherconfig; - struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; - struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; - struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; - void *ptr = &req_tmpl->cd_ctrl; - struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; - struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; - struct icp_qat_fw_la_auth_req_params *auth_param = - (struct icp_qat_fw_la_auth_req_params *) - ((char *)&req_tmpl->serv_specif_rqpars + - sizeof(struct icp_qat_fw_la_cipher_req_params)); - uint16_t state1_size = 0, state2_size = 0; - uint16_t hash_offset, cd_size; - uint32_t *aad_len = NULL; - uint32_t wordIndex = 0; - uint32_t *pTempKey; - enum qat_crypto_proto_flag qat_proto_flag = - QAT_CRYPTO_PROTO_FLAG_NONE; - - PMD_INIT_FUNC_TRACE(); - - if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { - ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_AUTH); - ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_CIPHER); - ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, - ICP_QAT_FW_SLICE_DRAM_WR); - cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; - } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - PMD_DRV_LOG(ERR, "Invalid param, must be a hash command."); - return -EFAULT; - } - - if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_CMP_AUTH_RES); - cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY; - } else { - ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_RET_AUTH_RES); - ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, - ICP_QAT_FW_LA_NO_CMP_AUTH_RES); - cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE; - } - - /* - * Setup the inner hash config - */ - hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); - hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; - hash->auth_config.reserved = 0; - hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, - cdesc->qat_hash_alg, digestsize); - - if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 - || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) - hash->auth_counter.counter = 0; - else - hash->auth_counter.counter = rte_bswap32( - qat_hash_get_block_size(cdesc->qat_hash_alg)); - - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); - - /* - * cd_cur_ptr now points at the state1 information. - */ - switch (cdesc->qat_hash_alg) { - case ICP_QAT_HW_AUTH_ALGO_SHA1: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); - break; - case ICP_QAT_HW_AUTH_ALGO_SHA224: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA256: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA384: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_SHA512: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, - authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { - PMD_DRV_LOG(ERR, "(SHA)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_SHA512_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: - state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, - authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { - PMD_DRV_LOG(ERR, "(XCBC)precompute failed"); - return -EFAULT; - } - break; - case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: - case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; - state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; - if (qat_alg_do_precomputes(cdesc->qat_hash_alg, - authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, - &state2_size)) { - PMD_DRV_LOG(ERR, "(GCM)precompute failed"); - return -EFAULT; - } - /* - * Write (the length of AAD) into bytes 16-19 of state2 - * in big-endian format. This field is 8 bytes - */ - auth_param->u2.aad_sz = - RTE_ALIGN_CEIL(aad_length, 16); - auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; - - aad_len = (uint32_t *)(cdesc->cd_cur_ptr + - ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ); - *aad_len = rte_bswap32(aad_length); - cdesc->aad_len = aad_length; - break; - case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); - state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); - - cipherconfig = (struct icp_qat_hw_cipher_algo_blk *) - (cdesc->cd_cur_ptr + state1_size + state2_size); - cipherconfig->cipher_config.val = - ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE, - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, - ICP_QAT_HW_CIPHER_KEY_CONVERT, - ICP_QAT_HW_CIPHER_ENCRYPT); - memcpy(cipherconfig->key, authkey, authkeylen); - memset(cipherconfig->key + authkeylen, - 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); - cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + - authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; - auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; - break; - case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: - hash->auth_config.config = - ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, - cdesc->qat_hash_alg, digestsize); - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); - state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size - + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); - - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - cdesc->cd_cur_ptr += state1_size + state2_size - + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; - auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; - cdesc->min_qat_dev_gen = QAT_GEN2; - - break; - case ICP_QAT_HW_AUTH_ALGO_MD5: - if (qat_alg_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, - authkey, authkeylen, cdesc->cd_cur_ptr, - &state1_size)) { - PMD_DRV_LOG(ERR, "(MD5)precompute failed"); - return -EFAULT; - } - state2_size = ICP_QAT_HW_MD5_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_NULL: - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_NULL); - state2_size = ICP_QAT_HW_NULL_STATE2_SZ; - break; - case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: - qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); - state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + - ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ; - - if (aad_length > 0) { - aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN + - ICP_QAT_HW_CCM_AAD_LEN_INFO; - auth_param->u2.aad_sz = - RTE_ALIGN_CEIL(aad_length, - ICP_QAT_HW_CCM_AAD_ALIGNMENT); - } else { - auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN; - } - - cdesc->aad_len = aad_length; - hash->auth_counter.counter = 0; - - hash_cd_ctrl->outer_prefix_sz = digestsize; - auth_param->hash_state_sz = digestsize; - - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - break; - case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: - state1_size = qat_hash_get_state1_size( - ICP_QAT_HW_AUTH_ALGO_KASUMI_F9); - state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ; - memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); - pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size - + authkeylen); - /* - * The Inner Hash Initial State2 block must contain IK - * (Initialisation Key), followed by IK XOR-ed with KM - * (Key Modifier): IK||(IK^KM). - */ - /* write the auth key */ - memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); - /* initialise temp key with auth key */ - memcpy(pTempKey, authkey, authkeylen); - /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ - for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++) - pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES; - break; - default: - PMD_DRV_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg); - return -EFAULT; - } - - /* Request template setup */ - qat_alg_init_common_hdr(header, qat_proto_flag); - header->service_cmd_id = cdesc->qat_cmd; - - /* Auth CD config setup */ - hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; - hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; - hash_cd_ctrl->inner_res_sz = digestsize; - hash_cd_ctrl->final_sz = digestsize; - hash_cd_ctrl->inner_state1_sz = state1_size; - auth_param->auth_res_sz = digestsize; - - hash_cd_ctrl->inner_state2_sz = state2_size; - hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + - ((sizeof(struct icp_qat_hw_auth_setup) + - RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) - >> 3); - - cdesc->cd_cur_ptr += state1_size + state2_size; - cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; - - cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; - cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; - - return 0; -} - -int qat_alg_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_AES_128_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; - break; - case ICP_QAT_HW_AES_192_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; - break; - case ICP_QAT_HW_AES_256_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_aes_docsisbpi_key(int key_len, - enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_AES_128_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_KASUMI_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_DES_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_DES; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case QAT_3DES_KEY_SZ_OPT1: - case QAT_3DES_KEY_SZ_OPT2: - *alg = ICP_QAT_HW_CIPHER_ALGO_3DES; - break; - default: - return -EINVAL; - } - return 0; -} - -int qat_alg_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) -{ - switch (key_len) { - case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: - *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; - break; - default: - return -EINVAL; - } - return 0; -} diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c deleted file mode 100644 index 4afe159d..00000000 --- a/drivers/crypto/qat/qat_crypto.c +++ /dev/null @@ -1,1696 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2017 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "qat_logs.h" -#include "qat_algs.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" - -#define BYTE_LENGTH 8 -/* bpi is only used for partial blocks of DES and AES - * so AES block len can be assumed as max len for iv, src and dst - */ -#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ - -static int -qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, - struct qat_pmd_private *internals) { - int i = 0; - const struct rte_cryptodev_capabilities *capability; - - while ((capability = &(internals->qat_dev_capabilities[i++]))->op != - RTE_CRYPTO_OP_TYPE_UNDEFINED) { - if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) - continue; - - if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) - continue; - - if (capability->sym.cipher.algo == algo) - return 1; - } - return 0; -} - -static int -qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, - struct qat_pmd_private *internals) { - int i = 0; - const struct rte_cryptodev_capabilities *capability; - - while ((capability = &(internals->qat_dev_capabilities[i++]))->op != - RTE_CRYPTO_OP_TYPE_UNDEFINED) { - if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) - continue; - - if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) - continue; - - if (capability->sym.auth.algo == algo) - return 1; - } - return 0; -} - -/** Encrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt the IV, then XOR this with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_encrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_encrypt_err: - PMD_DRV_LOG(ERR, "libcrypto ECB cipher encrypt failed"); - return -EINVAL; -} - -/** Decrypt a single partial block - * Depends on openssl libcrypto - * Uses ECB+XOR to do CFB encryption, same result, more performant - */ -static inline int -bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, - uint8_t *iv, int ivlen, int srclen, - void *bpi_ctx) -{ - EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; - int encrypted_ivlen; - uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; - uint8_t *encr = encrypted_iv; - - /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ - if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) - <= 0) - goto cipher_decrypt_err; - - for (; srclen != 0; --srclen, ++dst, ++src, ++encr) - *dst = *src ^ *encr; - - return 0; - -cipher_decrypt_err: - PMD_DRV_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); - return -EINVAL; -} - -/** Creates a context in either AES or DES in ECB mode - * Depends on openssl libcrypto - */ -static int -bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, - enum rte_crypto_cipher_operation direction __rte_unused, - uint8_t *key, void **ctx) -{ - const EVP_CIPHER *algo = NULL; - int ret; - *ctx = EVP_CIPHER_CTX_new(); - - if (*ctx == NULL) { - ret = -ENOMEM; - goto ctx_init_err; - } - - if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) - algo = EVP_des_ecb(); - else - algo = EVP_aes_128_ecb(); - - /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ - if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) { - ret = -EINVAL; - goto ctx_init_err; - } - - return 0; - -ctx_init_err: - if (*ctx != NULL) - EVP_CIPHER_CTX_free(*ctx); - return ret; -} - -/** Frees a context previously created - * Depends on openssl libcrypto - */ -static void -bpi_cipher_ctx_free(void *bpi_ctx) -{ - if (bpi_ctx != NULL) - EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); -} - -static inline uint32_t -adf_modulo(uint32_t data, uint32_t shift); - -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, - struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp); - -void -qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *sess) -{ - PMD_INIT_FUNC_TRACE(); - uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); - struct qat_session *s = (struct qat_session *)sess_priv; - - if (sess_priv) { - if (s->bpi_ctx) - bpi_cipher_ctx_free(s->bpi_ctx); - memset(s, 0, qat_crypto_sym_get_session_private_size(dev)); - struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); - rte_mempool_put(sess_mp, sess_priv); - } -} - -static int -qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) -{ - /* Cipher Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_CIPHER; - - /* Authentication Only */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) - return ICP_QAT_FW_LA_CMD_AUTH; - - /* AEAD */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - /* AES-GCM and AES-CCM works with different direction - * GCM first encrypts and generate hash where AES-CCM - * first generate hash and encrypts. Similar relation - * applies to decryption. - */ - if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) - if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - else - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - else - if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - else - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - } - - if (xform->next == NULL) - return -1; - - /* Cipher then Authenticate */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && - xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return ICP_QAT_FW_LA_CMD_CIPHER_HASH; - - /* Authenticate then Cipher */ - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && - xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return ICP_QAT_FW_LA_CMD_HASH_CIPHER; - - return -1; -} - -static struct rte_crypto_auth_xform * -qat_get_auth_xform(struct rte_crypto_sym_xform *xform) -{ - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) - return &xform->auth; - - xform = xform->next; - } while (xform); - - return NULL; -} - -static struct rte_crypto_cipher_xform * -qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) -{ - do { - if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) - return &xform->cipher; - - xform = xform->next; - } while (xform); - - return NULL; -} - -int -qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct qat_pmd_private *internals = dev->data->dev_private; - struct rte_crypto_cipher_xform *cipher_xform = NULL; - int ret; - - /* Get cipher xform from crypto xform chain */ - cipher_xform = qat_get_cipher_xform(xform); - - session->cipher_iv.offset = cipher_xform->iv.offset; - session->cipher_iv.length = cipher_xform->iv.length; - - switch (cipher_xform->algo) { - case RTE_CRYPTO_CIPHER_AES_CBC: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_CTR: - if (qat_alg_validate_aes_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: - if (qat_alg_validate_snow3g_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid SNOW 3G cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_NULL: - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_KASUMI_F8: - if (qat_alg_validate_kasumi_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid KASUMI cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CBC: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_DES_CBC: - if (qat_alg_validate_des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_CTR: - if (qat_alg_validate_3des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid 3DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - break; - case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: - ret = bpi_cipher_ctx_init( - cipher_xform->algo, - cipher_xform->op, - cipher_xform->key.data, - &session->bpi_ctx); - if (ret != 0) { - PMD_DRV_LOG(ERR, "failed to create DES BPI ctx"); - goto error_out; - } - if (qat_alg_validate_des_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid DES cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: - ret = bpi_cipher_ctx_init( - cipher_xform->algo, - cipher_xform->op, - cipher_xform->key.data, - &session->bpi_ctx); - if (ret != 0) { - PMD_DRV_LOG(ERR, "failed to create AES BPI ctx"); - goto error_out; - } - if (qat_alg_validate_aes_docsisbpi_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES DOCSISBPI key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; - break; - case RTE_CRYPTO_CIPHER_ZUC_EEA3: - if (!qat_is_cipher_alg_supported( - cipher_xform->algo, internals)) { - PMD_DRV_LOG(ERR, "%s not supported on this device", - rte_crypto_cipher_algorithm_strings - [cipher_xform->algo]); - ret = -ENOTSUP; - goto error_out; - } - if (qat_alg_validate_zuc_key(cipher_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid ZUC cipher key size"); - ret = -EINVAL; - goto error_out; - } - session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; - break; - case RTE_CRYPTO_CIPHER_3DES_ECB: - case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_F8: - case RTE_CRYPTO_CIPHER_AES_XTS: - case RTE_CRYPTO_CIPHER_ARC4: - PMD_DRV_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", - cipher_xform->algo); - ret = -ENOTSUP; - goto error_out; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", - cipher_xform->algo); - ret = -EINVAL; - goto error_out; - } - - if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - else - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - cipher_xform->key.data, - cipher_xform->key.length)) { - ret = -EINVAL; - goto error_out; - } - - return 0; - -error_out: - if (session->bpi_ctx) { - bpi_cipher_ctx_free(session->bpi_ctx); - session->bpi_ctx = NULL; - } - return ret; -} - -int -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mempool) -{ - void *sess_private_data; - int ret; - - if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( - "Couldn't get object from session mempool"); - return -ENOMEM; - } - - ret = qat_crypto_set_session_parameters(dev, xform, sess_private_data); - if (ret != 0) { - PMD_DRV_LOG(ERR, "Crypto QAT PMD: failed to configure " - "session parameters"); - - /* Return session to mempool */ - rte_mempool_put(mempool, sess_private_data); - return ret; - } - - set_session_private_data(sess, dev->driver_id, - sess_private_data); - - return 0; -} - -int -qat_crypto_set_session_parameters(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private) -{ - struct qat_session *session = session_private; - int ret; - - int qat_cmd_id; - PMD_INIT_FUNC_TRACE(); - - /* Set context descriptor physical address */ - session->cd_paddr = rte_mempool_virt2iova(session) + - offsetof(struct qat_session, cd); - - session->min_qat_dev_gen = QAT_GEN1; - - /* Get requested QAT command id */ - qat_cmd_id = qat_get_cmd_id(xform); - if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { - PMD_DRV_LOG(ERR, "Unsupported xform chain requested"); - return -ENOTSUP; - } - session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; - switch (session->qat_cmd) { - case ICP_QAT_FW_LA_CMD_CIPHER: - ret = qat_crypto_sym_configure_session_cipher(dev, xform, session); - if (ret < 0) - return ret; - break; - case ICP_QAT_FW_LA_CMD_AUTH: - ret = qat_crypto_sym_configure_session_auth(dev, xform, session); - if (ret < 0) - return ret; - break; - case ICP_QAT_FW_LA_CMD_CIPHER_HASH: - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_crypto_sym_configure_session_aead(xform, - session); - if (ret < 0) - return ret; - } else { - ret = qat_crypto_sym_configure_session_cipher(dev, - xform, session); - if (ret < 0) - return ret; - ret = qat_crypto_sym_configure_session_auth(dev, - xform, session); - if (ret < 0) - return ret; - } - break; - case ICP_QAT_FW_LA_CMD_HASH_CIPHER: - if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { - ret = qat_crypto_sym_configure_session_aead(xform, - session); - if (ret < 0) - return ret; - } else { - ret = qat_crypto_sym_configure_session_auth(dev, - xform, session); - if (ret < 0) - return ret; - ret = qat_crypto_sym_configure_session_cipher(dev, - xform, session); - if (ret < 0) - return ret; - } - break; - case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: - case ICP_QAT_FW_LA_CMD_TRNG_TEST: - case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: - case ICP_QAT_FW_LA_CMD_MGF1: - case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: - case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: - case ICP_QAT_FW_LA_CMD_DELIMITER: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - return -ENOTSUP; - default: - PMD_DRV_LOG(ERR, "Unsupported Service %u", - session->qat_cmd); - return -ENOTSUP; - } - - return 0; -} - -int -qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct rte_crypto_auth_xform *auth_xform = NULL; - struct qat_pmd_private *internals = dev->data->dev_private; - auth_xform = qat_get_auth_xform(xform); - uint8_t *key_data = auth_xform->key.data; - uint8_t key_length = auth_xform->key.length; - - switch (auth_xform->algo) { - case RTE_CRYPTO_AUTH_SHA1_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; - break; - case RTE_CRYPTO_AUTH_SHA224_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; - break; - case RTE_CRYPTO_AUTH_SHA256_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; - break; - case RTE_CRYPTO_AUTH_SHA384_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; - break; - case RTE_CRYPTO_AUTH_SHA512_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; - break; - case RTE_CRYPTO_AUTH_AES_XCBC_MAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; - break; - case RTE_CRYPTO_AUTH_AES_GMAC: - if (qat_alg_validate_aes_key(auth_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - - break; - case RTE_CRYPTO_AUTH_SNOW3G_UIA2: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; - break; - case RTE_CRYPTO_AUTH_MD5_HMAC: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; - break; - case RTE_CRYPTO_AUTH_NULL: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; - break; - case RTE_CRYPTO_AUTH_KASUMI_F9: - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; - break; - case RTE_CRYPTO_AUTH_ZUC_EIA3: - if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { - PMD_DRV_LOG(ERR, "%s not supported on this device", - rte_crypto_auth_algorithm_strings - [auth_xform->algo]); - return -ENOTSUP; - } - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; - break; - case RTE_CRYPTO_AUTH_SHA1: - case RTE_CRYPTO_AUTH_SHA256: - case RTE_CRYPTO_AUTH_SHA512: - case RTE_CRYPTO_AUTH_SHA224: - case RTE_CRYPTO_AUTH_SHA384: - case RTE_CRYPTO_AUTH_MD5: - case RTE_CRYPTO_AUTH_AES_CMAC: - case RTE_CRYPTO_AUTH_AES_CBC_MAC: - PMD_DRV_LOG(ERR, "Crypto: Unsupported hash alg %u", - auth_xform->algo); - return -ENOTSUP; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined Hash algo %u specified", - auth_xform->algo); - return -EINVAL; - } - - session->auth_iv.offset = auth_xform->iv.offset; - session->auth_iv.length = auth_xform->iv.length; - - if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { - if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { - session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - /* - * It needs to create cipher desc content first, - * then authentication - */ - if (qat_alg_aead_session_create_content_desc_cipher(session, - auth_xform->key.data, - auth_xform->key.length)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - } else { - session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER; - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - /* - * It needs to create authentication desc content first, - * then cipher - */ - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - auth_xform->key.data, - auth_xform->key.length)) - return -EINVAL; - } - /* Restore to authentication only only */ - session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; - } else { - if (qat_alg_aead_session_create_content_desc_auth(session, - key_data, - key_length, - 0, - auth_xform->digest_length, - auth_xform->op)) - return -EINVAL; - } - - session->digest_length = auth_xform->digest_length; - return 0; -} - -int -qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform, - struct qat_session *session) -{ - struct rte_crypto_aead_xform *aead_xform = &xform->aead; - enum rte_crypto_auth_operation crypto_operation; - - /* - * Store AEAD IV parameters as cipher IV, - * to avoid unnecessary memory usage - */ - session->cipher_iv.offset = xform->aead.iv.offset; - session->cipher_iv.length = xform->aead.iv.length; - - switch (aead_xform->algo) { - case RTE_CRYPTO_AEAD_AES_GCM: - if (qat_alg_validate_aes_key(aead_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; - break; - case RTE_CRYPTO_AEAD_AES_CCM: - if (qat_alg_validate_aes_key(aead_xform->key.length, - &session->qat_cipher_alg) != 0) { - PMD_DRV_LOG(ERR, "Invalid AES key size"); - return -EINVAL; - } - session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; - session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; - break; - default: - PMD_DRV_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", - aead_xform->algo); - return -EINVAL; - } - - if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && - aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || - (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && - aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) { - session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; - /* - * It needs to create cipher desc content first, - * then authentication - */ - - crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? - RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - aead_xform->key.data, - aead_xform->key.length)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_auth(session, - aead_xform->key.data, - aead_xform->key.length, - aead_xform->aad_length, - aead_xform->digest_length, - crypto_operation)) - return -EINVAL; - } else { - session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; - /* - * It needs to create authentication desc content first, - * then cipher - */ - - crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? - RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE; - - if (qat_alg_aead_session_create_content_desc_auth(session, - aead_xform->key.data, - aead_xform->key.length, - aead_xform->aad_length, - aead_xform->digest_length, - crypto_operation)) - return -EINVAL; - - if (qat_alg_aead_session_create_content_desc_cipher(session, - aead_xform->key.data, - aead_xform->key.length)) - return -EINVAL; - } - - session->digest_length = aead_xform->digest_length; - return 0; -} - -unsigned qat_crypto_sym_get_session_private_size( - struct rte_cryptodev *dev __rte_unused) -{ - return RTE_ALIGN_CEIL(sizeof(struct qat_session), 8); -} - -static inline uint32_t -qat_bpicipher_preprocess(struct qat_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len && - ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { - - /* Decrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely(sym_op->m_dst != NULL)) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = last_block - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "BPI: src before pre-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst before pre-process:", dst, - last_block_len); -#endif - bpi_cipher_decrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "BPI: src after pre-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst after pre-process:", dst, - last_block_len); -#endif - } - - return sym_op->cipher.data.length - last_block_len; -} - -static inline uint32_t -qat_bpicipher_postprocess(struct qat_session *ctx, - struct rte_crypto_op *op) -{ - int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); - struct rte_crypto_sym_op *sym_op = op->sym; - uint8_t last_block_len = block_len > 0 ? - sym_op->cipher.data.length % block_len : 0; - - if (last_block_len > 0 && - ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { - - /* Encrypt last block */ - uint8_t *last_block, *dst, *iv; - uint32_t last_block_offset; - - last_block_offset = sym_op->cipher.data.offset + - sym_op->cipher.data.length - last_block_len; - last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, - uint8_t *, last_block_offset); - - if (unlikely(sym_op->m_dst != NULL)) - /* out-of-place operation (OOP) */ - dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, - uint8_t *, last_block_offset); - else - dst = last_block; - - if (last_block_len < sym_op->cipher.data.length) - /* use previous block ciphertext as IV */ - iv = dst - block_len; - else - /* runt block, i.e. less than one full block */ - iv = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "BPI: src before post-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst before post-process:", - dst, last_block_len); -#endif - bpi_cipher_encrypt(last_block, dst, iv, block_len, - last_block_len, ctx->bpi_ctx); -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "BPI: src after post-process:", last_block, - last_block_len); - if (sym_op->m_dst != NULL) - rte_hexdump(stdout, "BPI: dst after post-process:", dst, - last_block_len); -#endif - } - return sym_op->cipher.data.length - last_block_len; -} - -static inline void -txq_write_tail(struct qat_qp *qp, struct qat_queue *q) { - WRITE_CSR_RING_TAIL(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, q->tail); - q->nb_pending_requests = 0; - q->csr_tail = q->tail; -} - -uint16_t -qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - register struct qat_queue *queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - register uint32_t nb_ops_sent = 0; - register struct rte_crypto_op **cur_op = ops; - register int ret; - uint16_t nb_ops_possible = nb_ops; - register uint8_t *base_addr; - register uint32_t tail; - int overflow; - - if (unlikely(nb_ops == 0)) - return 0; - - /* read params used a lot in main loop into registers */ - queue = &(tmp_qp->tx_q); - base_addr = (uint8_t *)queue->base_addr; - tail = queue->tail; - - /* Find how many can actually fit on the ring */ - tmp_qp->inflights16 += nb_ops; - overflow = tmp_qp->inflights16 - queue->max_inflights; - if (overflow > 0) { - tmp_qp->inflights16 -= overflow; - nb_ops_possible = nb_ops - overflow; - if (nb_ops_possible == 0) - return 0; - } - - while (nb_ops_sent != nb_ops_possible) { - ret = qat_write_hw_desc_entry(*cur_op, base_addr + tail, - tmp_qp->op_cookies[tail / queue->msg_size], tmp_qp); - if (ret != 0) { - tmp_qp->stats.enqueue_err_count++; - /* - * This message cannot be enqueued, - * decrease number of ops that wasn't sent - */ - tmp_qp->inflights16 -= nb_ops_possible - nb_ops_sent; - if (nb_ops_sent == 0) - return 0; - goto kick_tail; - } - - tail = adf_modulo(tail + queue->msg_size, queue->modulo); - nb_ops_sent++; - cur_op++; - } -kick_tail: - queue->tail = tail; - tmp_qp->stats.enqueued_count += nb_ops_sent; - queue->nb_pending_requests += nb_ops_sent; - if (tmp_qp->inflights16 < QAT_CSR_TAIL_FORCE_WRITE_THRESH || - queue->nb_pending_requests > QAT_CSR_TAIL_WRITE_THRESH) { - txq_write_tail(tmp_qp, queue); - } - return nb_ops_sent; -} - -static inline -void rxq_free_desc(struct qat_qp *qp, struct qat_queue *q) -{ - uint32_t old_head, new_head; - uint32_t max_head; - - old_head = q->csr_head; - new_head = q->head; - max_head = qp->nb_descriptors * q->msg_size; - - /* write out free descriptors */ - void *cur_desc = (uint8_t *)q->base_addr + old_head; - - if (new_head < old_head) { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, max_head - old_head); - memset(q->base_addr, ADF_RING_EMPTY_SIG_BYTE, new_head); - } else { - memset(cur_desc, ADF_RING_EMPTY_SIG_BYTE, new_head - old_head); - } - q->nb_processed_responses = 0; - q->csr_head = new_head; - - /* write current head to CSR */ - WRITE_CSR_RING_HEAD(qp->mmap_bar_addr, q->hw_bundle_number, - q->hw_queue_number, new_head); -} - -uint16_t -qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops) -{ - struct qat_queue *rx_queue, *tx_queue; - struct qat_qp *tmp_qp = (struct qat_qp *)qp; - uint32_t msg_counter = 0; - struct rte_crypto_op *rx_op; - struct icp_qat_fw_comn_resp *resp_msg; - uint32_t head; - - rx_queue = &(tmp_qp->rx_q); - tx_queue = &(tmp_qp->tx_q); - head = rx_queue->head; - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)rx_queue->base_addr + head); - - while (*(uint32_t *)resp_msg != ADF_RING_EMPTY_SIG && - msg_counter != nb_ops) { - rx_op = (struct rte_crypto_op *)(uintptr_t) - (resp_msg->opaque_data); - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX - rte_hexdump(stdout, "qat_response:", (uint8_t *)resp_msg, - sizeof(struct icp_qat_fw_comn_resp)); -#endif - if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != - ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( - resp_msg->comn_hdr.comn_status)) { - rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; - } else { - struct qat_session *sess = (struct qat_session *) - get_session_private_data( - rx_op->sym->session, - cryptodev_qat_driver_id); - - if (sess->bpi_ctx) - qat_bpicipher_postprocess(sess, rx_op); - rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; - } - - head = adf_modulo(head + rx_queue->msg_size, rx_queue->modulo); - resp_msg = (struct icp_qat_fw_comn_resp *) - ((uint8_t *)rx_queue->base_addr + head); - *ops = rx_op; - ops++; - msg_counter++; - } - if (msg_counter > 0) { - rx_queue->head = head; - tmp_qp->stats.dequeued_count += msg_counter; - rx_queue->nb_processed_responses += msg_counter; - tmp_qp->inflights16 -= msg_counter; - - if (rx_queue->nb_processed_responses > QAT_CSR_HEAD_WRITE_THRESH) - rxq_free_desc(tmp_qp, rx_queue); - } - /* also check if tail needs to be advanced */ - if (tmp_qp->inflights16 <= QAT_CSR_TAIL_FORCE_WRITE_THRESH && - tx_queue->tail != tx_queue->csr_tail) { - txq_write_tail(tmp_qp, tx_queue); - } - return msg_counter; -} - -static inline int -qat_sgl_fill_array(struct rte_mbuf *buf, uint64_t buff_start, - struct qat_alg_buf_list *list, uint32_t data_len) -{ - int nr = 1; - - uint32_t buf_len = rte_pktmbuf_iova(buf) - - buff_start + rte_pktmbuf_data_len(buf); - - list->bufers[0].addr = buff_start; - list->bufers[0].resrvd = 0; - list->bufers[0].len = buf_len; - - if (data_len <= buf_len) { - list->num_bufs = nr; - list->bufers[0].len = data_len; - return 0; - } - - buf = buf->next; - while (buf) { - if (unlikely(nr == QAT_SGL_MAX_NUMBER)) { - PMD_DRV_LOG(ERR, "QAT PMD exceeded size of QAT SGL" - " entry(%u)", - QAT_SGL_MAX_NUMBER); - return -EINVAL; - } - - list->bufers[nr].len = rte_pktmbuf_data_len(buf); - list->bufers[nr].resrvd = 0; - list->bufers[nr].addr = rte_pktmbuf_iova(buf); - - buf_len += list->bufers[nr].len; - buf = buf->next; - - if (buf_len > data_len) { - list->bufers[nr].len -= - buf_len - data_len; - buf = NULL; - } - ++nr; - } - list->num_bufs = nr; - - return 0; -} - -static inline void -set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, - struct icp_qat_fw_la_bulk_req *qat_req) -{ - /* copy IV into request if it fits */ - if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { - rte_memcpy(cipher_param->u.cipher_IV_array, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset), - iv_length); - } else { - ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_CIPH_IV_64BIT_PTR); - cipher_param->u.s.cipher_IV_ptr = - rte_crypto_op_ctophys_offset(op, - iv_offset); - } -} - -/** Set IV for CCM is special case, 0th byte is set to q-1 - * where q is padding of nonce in 16 byte block - */ -static inline void -set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset, - struct icp_qat_fw_la_cipher_req_params *cipher_param, - struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz) -{ - rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + - ICP_QAT_HW_CCM_NONCE_OFFSET, - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); - *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = - q - ICP_QAT_HW_CCM_NONCE_OFFSET; - - if (aad_len_field_sz) - rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET], - rte_crypto_op_ctod_offset(op, uint8_t *, - iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, - iv_length); -} - -static inline int -qat_write_hw_desc_entry(struct rte_crypto_op *op, uint8_t *out_msg, - struct qat_crypto_op_cookie *qat_op_cookie, struct qat_qp *qp) -{ - int ret = 0; - struct qat_session *ctx; - struct icp_qat_fw_la_cipher_req_params *cipher_param; - struct icp_qat_fw_la_auth_req_params *auth_param; - register struct icp_qat_fw_la_bulk_req *qat_req; - uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; - uint32_t cipher_len = 0, cipher_ofs = 0; - uint32_t auth_len = 0, auth_ofs = 0; - uint32_t min_ofs = 0; - uint64_t src_buf_start = 0, dst_buf_start = 0; - uint8_t do_sgl = 0; - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports symmetric crypto " - "operation requests, op (%p) is not a " - "symmetric operation.", op); - return -EINVAL; - } -#endif - if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { - PMD_DRV_LOG(ERR, "QAT PMD only supports session oriented" - " requests, op (%p) is sessionless.", op); - return -EINVAL; - } - - ctx = (struct qat_session *)get_session_private_data( - op->sym->session, cryptodev_qat_driver_id); - - if (unlikely(ctx == NULL)) { - PMD_DRV_LOG(ERR, "Session was not created for this device"); - return -EINVAL; - } - - if (unlikely(ctx->min_qat_dev_gen > qp->qat_dev_gen)) { - PMD_DRV_LOG(ERR, "Session alg not supported on this device gen"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - return -EINVAL; - } - - - - qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; - rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); - qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; - cipher_param = (void *)&qat_req->serv_specif_rqpars; - auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); - - if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || - ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { - /* AES-GCM or AES-CCM */ - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || - (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 - && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE - && ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { - do_aead = 1; - } else { - do_auth = 1; - do_cipher = 1; - } - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { - do_auth = 1; - do_cipher = 0; - } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { - do_auth = 0; - do_cipher = 1; - } - - if (do_cipher) { - - if (ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || - ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || - ctx->qat_cipher_alg == - ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { - - if (unlikely( - (cipher_param->cipher_length % BYTE_LENGTH != 0) - || (cipher_param->cipher_offset - % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, - "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - cipher_len = op->sym->cipher.data.length >> 3; - cipher_ofs = op->sym->cipher.data.offset >> 3; - - } else if (ctx->bpi_ctx) { - /* DOCSIS - only send complete blocks to device - * Process any partial block using CFB mode. - * Even if 0 complete blocks, still send this to device - * to get into rx queue for post-process and dequeuing - */ - cipher_len = qat_bpicipher_preprocess(ctx, op); - cipher_ofs = op->sym->cipher.data.offset; - } else { - cipher_len = op->sym->cipher.data.length; - cipher_ofs = op->sym->cipher.data.offset; - } - - set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, - cipher_param, op, qat_req); - min_ofs = cipher_ofs; - } - - if (do_auth) { - - if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || - ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { - if (unlikely((auth_param->auth_off % BYTE_LENGTH != 0) - || (auth_param->auth_len % BYTE_LENGTH != 0))) { - PMD_DRV_LOG(ERR, - "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); - op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - return -EINVAL; - } - auth_ofs = op->sym->auth.data.offset >> 3; - auth_len = op->sym->auth.data.length >> 3; - - auth_param->u1.aad_adr = - rte_crypto_op_ctophys_offset(op, - ctx->auth_iv.offset); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - /* AES-GMAC */ - set_cipher_iv(ctx->auth_iv.length, - ctx->auth_iv.offset, - cipher_param, op, qat_req); - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - - auth_param->u1.aad_adr = 0; - auth_param->u2.aad_sz = 0; - - /* - * If len(iv)==12B fw computes J0 - */ - if (ctx->auth_iv.length == 12) { - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - - } - } else { - auth_ofs = op->sym->auth.data.offset; - auth_len = op->sym->auth.data.length; - - } - min_ofs = auth_ofs; - - if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL)) - auth_param->auth_res_addr = - op->sym->auth.digest.phys_addr; - - } - - if (do_aead) { - /* - * This address may used for setting AAD physical pointer - * into IV offset from op - */ - rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr; - if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || - ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { - /* - * If len(iv)==12B fw computes J0 - */ - if (ctx->cipher_iv.length == 12) { - ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( - qat_req->comn_hdr.serv_specif_flags, - ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); - } - - set_cipher_iv(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, qat_req); - - } else if (ctx->qat_hash_alg == - ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { - - /* In case of AES-CCM this may point to user selected memory - * or iv offset in cypto_op - */ - uint8_t *aad_data = op->sym->aead.aad.data; - /* This is true AAD length, it not includes 18 bytes of - * preceding data - */ - uint8_t aad_ccm_real_len = 0; - - uint8_t aad_len_field_sz = 0; - uint32_t msg_len_be = - rte_bswap32(op->sym->aead.data.length); - - if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { - aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; - aad_ccm_real_len = ctx->aad_len - - ICP_QAT_HW_CCM_AAD_B0_LEN - - ICP_QAT_HW_CCM_AAD_LEN_INFO; - } else { - /* - * aad_len not greater than 18, so no actual aad data, - * then use IV after op for B0 block - */ - aad_data = rte_crypto_op_ctod_offset(op, uint8_t *, - ctx->cipher_iv.offset); - aad_phys_addr_aead = - rte_crypto_op_ctophys_offset(op, - ctx->cipher_iv.offset); - } - - uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - ctx->cipher_iv.length; - - aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS(aad_len_field_sz, - ctx->digest_length, q); - - if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET - + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), - (uint8_t *)&msg_len_be, - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); - } else { - memcpy(aad_data + ctx->cipher_iv.length + - ICP_QAT_HW_CCM_NONCE_OFFSET, - (uint8_t *)&msg_len_be - + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE - - q), q); - } - - if (aad_len_field_sz > 0) { - *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] - = rte_bswap16(aad_ccm_real_len); - - if ((aad_ccm_real_len + aad_len_field_sz) - % ICP_QAT_HW_CCM_AAD_B0_LEN) { - uint8_t pad_len = 0; - uint8_t pad_idx = 0; - - pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - - ((aad_ccm_real_len + aad_len_field_sz) % - ICP_QAT_HW_CCM_AAD_B0_LEN); - pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + - aad_ccm_real_len + aad_len_field_sz; - memset(&aad_data[pad_idx], - 0, pad_len); - } - - } - - set_cipher_iv_ccm(ctx->cipher_iv.length, - ctx->cipher_iv.offset, - cipher_param, op, q, - aad_len_field_sz); - - } - - cipher_len = op->sym->aead.data.length; - cipher_ofs = op->sym->aead.data.offset; - auth_len = op->sym->aead.data.length; - auth_ofs = op->sym->aead.data.offset; - - auth_param->u1.aad_adr = aad_phys_addr_aead; - auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; - min_ofs = op->sym->aead.data.offset; - } - - if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) - do_sgl = 1; - - /* adjust for chain case */ - if (do_cipher && do_auth) - min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; - - if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) - min_ofs = 0; - - if (unlikely(op->sym->m_dst != NULL)) { - /* Out-of-place operation (OOP) - * Don't align DMA start. DMA the minimum data-set - * so as not to overwrite data in dest buffer - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); - dst_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs); - - } else { - /* In-place operation - * Start DMA at nearest aligned address below min_ofs - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs) - & QAT_64_BTYE_ALIGN_MASK; - - if (unlikely((rte_pktmbuf_iova(op->sym->m_src) - - rte_pktmbuf_headroom(op->sym->m_src)) - > src_buf_start)) { - /* alignment has pushed addr ahead of start of mbuf - * so revert and take the performance hit - */ - src_buf_start = - rte_pktmbuf_iova_offset(op->sym->m_src, - min_ofs); - } - dst_buf_start = src_buf_start; - } - - if (do_cipher || do_aead) { - cipher_param->cipher_offset = - (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, cipher_ofs) - src_buf_start; - cipher_param->cipher_length = cipher_len; - } else { - cipher_param->cipher_offset = 0; - cipher_param->cipher_length = 0; - } - - if (do_auth || do_aead) { - auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset( - op->sym->m_src, auth_ofs) - src_buf_start; - auth_param->auth_len = auth_len; - } else { - auth_param->auth_off = 0; - auth_param->auth_len = 0; - } - - qat_req->comn_mid.dst_length = - qat_req->comn_mid.src_length = - (cipher_param->cipher_offset + cipher_param->cipher_length) - > (auth_param->auth_off + auth_param->auth_len) ? - (cipher_param->cipher_offset + cipher_param->cipher_length) - : (auth_param->auth_off + auth_param->auth_len); - - if (do_sgl) { - - ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, - QAT_COMN_PTR_TYPE_SGL); - ret = qat_sgl_fill_array(op->sym->m_src, src_buf_start, - &qat_op_cookie->qat_sgl_list_src, - qat_req->comn_mid.src_length); - if (ret) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot fill sgl array"); - return ret; - } - - if (likely(op->sym->m_dst == NULL)) - qat_req->comn_mid.dest_data_addr = - qat_req->comn_mid.src_data_addr = - qat_op_cookie->qat_sgl_src_phys_addr; - else { - ret = qat_sgl_fill_array(op->sym->m_dst, - dst_buf_start, - &qat_op_cookie->qat_sgl_list_dst, - qat_req->comn_mid.dst_length); - - if (ret) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot " - "fill sgl array"); - return ret; - } - - qat_req->comn_mid.src_data_addr = - qat_op_cookie->qat_sgl_src_phys_addr; - qat_req->comn_mid.dest_data_addr = - qat_op_cookie->qat_sgl_dst_phys_addr; - } - } else { - qat_req->comn_mid.src_data_addr = src_buf_start; - qat_req->comn_mid.dest_data_addr = dst_buf_start; - } - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX - rte_hexdump(stdout, "qat_req:", qat_req, - sizeof(struct icp_qat_fw_la_bulk_req)); - rte_hexdump(stdout, "src_data:", - rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), - rte_pktmbuf_data_len(op->sym->m_src)); - if (do_cipher) { - uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->cipher_iv.offset); - rte_hexdump(stdout, "cipher iv:", cipher_iv_ptr, - ctx->cipher_iv.length); - } - - if (do_auth) { - if (ctx->auth_iv.length) { - uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, - uint8_t *, - ctx->auth_iv.offset); - rte_hexdump(stdout, "auth iv:", auth_iv_ptr, - ctx->auth_iv.length); - } - rte_hexdump(stdout, "digest:", op->sym->auth.digest.data, - ctx->digest_length); - } - - if (do_aead) { - rte_hexdump(stdout, "digest:", op->sym->aead.digest.data, - ctx->digest_length); - rte_hexdump(stdout, "aad:", op->sym->aead.aad.data, - ctx->aad_len); - } -#endif - return 0; -} - -static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) -{ - uint32_t div = data >> shift; - uint32_t mult = div << shift; - - return data - mult; -} - -int qat_dev_config(__rte_unused struct rte_cryptodev *dev, - __rte_unused struct rte_cryptodev_config *config) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -int qat_dev_start(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); - return 0; -} - -void qat_dev_stop(__rte_unused struct rte_cryptodev *dev) -{ - PMD_INIT_FUNC_TRACE(); -} - -int qat_dev_close(struct rte_cryptodev *dev) -{ - int i, ret; - - PMD_INIT_FUNC_TRACE(); - - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - ret = qat_crypto_sym_qp_release(dev, i); - if (ret < 0) - return ret; - } - - return 0; -} - -void qat_dev_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *info) -{ - struct qat_pmd_private *internals = dev->data->dev_private; - - PMD_INIT_FUNC_TRACE(); - if (info != NULL) { - info->max_nb_queue_pairs = - ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV; - info->feature_flags = dev->feature_flags; - info->capabilities = internals->qat_dev_capabilities; - info->sym.max_nb_sessions = internals->max_nb_sessions; - info->driver_id = cryptodev_qat_driver_id; - info->pci_dev = RTE_DEV_TO_PCI(dev->device); - } -} - -void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); - - PMD_INIT_FUNC_TRACE(); - if (stats == NULL) { - PMD_DRV_LOG(ERR, "invalid stats ptr NULL"); - return; - } - for (i = 0; i < dev->data->nb_queue_pairs; i++) { - if (qp[i] == NULL) { - PMD_DRV_LOG(DEBUG, "Uninitialised queue pair"); - continue; - } - - stats->enqueued_count += qp[i]->stats.enqueued_count; - stats->dequeued_count += qp[i]->stats.dequeued_count; - stats->enqueue_err_count += qp[i]->stats.enqueue_err_count; - stats->dequeue_err_count += qp[i]->stats.dequeue_err_count; - } -} - -void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev) -{ - int i; - struct qat_qp **qp = (struct qat_qp **)(dev->data->queue_pairs); - - PMD_INIT_FUNC_TRACE(); - for (i = 0; i < dev->data->nb_queue_pairs; i++) - memset(&(qp[i]->stats), 0, sizeof(qp[i]->stats)); - PMD_DRV_LOG(DEBUG, "QAT crypto: stats cleared"); -} diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h deleted file mode 100644 index c182af2e..00000000 --- a/drivers/crypto/qat/qat_crypto.h +++ /dev/null @@ -1,150 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2016 Intel Corporation - */ - -#ifndef _QAT_CRYPTO_H_ -#define _QAT_CRYPTO_H_ - -#include -#include - -#include "qat_crypto_capabilities.h" - -#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat -/**< Intel QAT Symmetric Crypto PMD device name */ - -/* - * This macro rounds up a number to a be a multiple of - * the alignment when the alignment is a power of 2 - */ -#define ALIGN_POW2_ROUNDUP(num, align) \ - (((num) + (align) - 1) & ~((align) - 1)) -#define QAT_64_BTYE_ALIGN_MASK (~0x3f) - -#define QAT_CSR_HEAD_WRITE_THRESH 32U -/* number of requests to accumulate before writing head CSR */ -#define QAT_CSR_TAIL_WRITE_THRESH 32U -/* number of requests to accumulate before writing tail CSR */ -#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U -/* number of inflights below which no tail write coalescing should occur */ - -struct qat_session; - -enum qat_device_gen { - QAT_GEN1 = 1, - QAT_GEN2, -}; - -/** - * Structure associated with each queue. - */ -struct qat_queue { - char memz_name[RTE_MEMZONE_NAMESIZE]; - void *base_addr; /* Base address */ - rte_iova_t base_phys_addr; /* Queue physical address */ - uint32_t head; /* Shadow copy of the head */ - uint32_t tail; /* Shadow copy of the tail */ - uint32_t modulo; - uint32_t msg_size; - uint16_t max_inflights; - uint32_t queue_size; - uint8_t hw_bundle_number; - uint8_t hw_queue_number; - /* HW queue aka ring offset on bundle */ - uint32_t csr_head; /* last written head value */ - uint32_t csr_tail; /* last written tail value */ - uint16_t nb_processed_responses; - /* number of responses processed since last CSR head write */ - uint16_t nb_pending_requests; - /* number of requests pending since last CSR tail write */ -}; - -struct qat_qp { - void *mmap_bar_addr; - uint16_t inflights16; - struct qat_queue tx_q; - struct qat_queue rx_q; - struct rte_cryptodev_stats stats; - struct rte_mempool *op_cookie_pool; - void **op_cookies; - uint32_t nb_descriptors; - enum qat_device_gen qat_dev_gen; -} __rte_cache_aligned; - -/** private data structure for each QAT device */ -struct qat_pmd_private { - unsigned max_nb_queue_pairs; - /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ - enum qat_device_gen qat_dev_gen; - /**< QAT device generation */ - const struct rte_cryptodev_capabilities *qat_dev_capabilities; -}; - -extern uint8_t cryptodev_qat_driver_id; - -int qat_dev_config(struct rte_cryptodev *dev, - struct rte_cryptodev_config *config); -int qat_dev_start(struct rte_cryptodev *dev); -void qat_dev_stop(struct rte_cryptodev *dev); -int qat_dev_close(struct rte_cryptodev *dev); -void qat_dev_info_get(struct rte_cryptodev *dev, - struct rte_cryptodev_info *info); - -void qat_crypto_sym_stats_get(struct rte_cryptodev *dev, - struct rte_cryptodev_stats *stats); -void qat_crypto_sym_stats_reset(struct rte_cryptodev *dev); - -int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *rx_conf, int socket_id, - struct rte_mempool *session_pool); -int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, - uint16_t queue_pair_id); - -int -qat_pmd_session_mempool_create(struct rte_cryptodev *dev, - unsigned nb_objs, unsigned obj_cache_size, int socket_id); - -extern unsigned -qat_crypto_sym_get_session_private_size(struct rte_cryptodev *dev); - -extern int -qat_crypto_sym_configure_session(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct rte_cryptodev_sym_session *sess, - struct rte_mempool *mempool); - - -int -qat_crypto_set_session_parameters(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, void *session_private); - -int -qat_crypto_sym_configure_session_aead(struct rte_crypto_sym_xform *xform, - struct qat_session *session); - -int -qat_crypto_sym_configure_session_auth(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session); - -int -qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, - struct rte_crypto_sym_xform *xform, - struct qat_session *session); - - -extern void -qat_crypto_sym_clear_session(struct rte_cryptodev *dev, - struct rte_cryptodev_sym_session *session); - -extern uint16_t -qat_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -extern uint16_t -qat_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, - uint16_t nb_ops); - -#endif /* _QAT_CRYPTO_H_ */ diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h deleted file mode 100644 index 37a6b7cb..00000000 --- a/drivers/crypto/qat/qat_crypto_capabilities.h +++ /dev/null @@ -1,557 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2017 Intel Corporation - */ - -#ifndef _QAT_CRYPTO_CAPABILITIES_H_ -#define _QAT_CRYPTO_CAPABILITIES_H_ - -#define QAT_BASE_GEN1_SYM_CAPABILITIES \ - { /* SHA1 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 20, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA224 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 28, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA256 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 32, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA384 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ - .block_size = 128, \ - .key_size = { \ - .min = 1, \ - .max = 128, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 48, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* SHA512 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ - .block_size = 128, \ - .key_size = { \ - .min = 1, \ - .max = 128, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* MD5 HMAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ - .block_size = 64, \ - .key_size = { \ - .min = 1, \ - .max = 64, \ - .increment = 1 \ - }, \ - .digest_size = { \ - .min = 1, \ - .max = 16, \ - .increment = 1 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* AES XCBC MAC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .aad_size = { 0 }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* AES CCM */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_AES_CCM, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 16, \ - .increment = 2 \ - }, \ - .aad_size = { \ - .min = 0, \ - .max = 224, \ - .increment = 1 \ - }, \ - .iv_size = { \ - .min = 7, \ - .max = 13, \ - .increment = 1 \ - }, \ - }, } \ - }, } \ - }, \ - { /* AES GCM */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ - {.aead = { \ - .algo = RTE_CRYPTO_AEAD_AES_GCM, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .digest_size = { \ - .min = 8, \ - .max = 16, \ - .increment = 4 \ - }, \ - .aad_size = { \ - .min = 0, \ - .max = 240, \ - .increment = 1 \ - }, \ - .iv_size = { \ - .min = 12, \ - .max = 12, \ - .increment = 0 \ - }, \ - }, } \ - }, } \ - }, \ - { /* AES GMAC (AUTH) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_AES_GMAC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .digest_size = { \ - .min = 8, \ - .max = 16, \ - .increment = 4 \ - }, \ - .iv_size = { \ - .min = 12, \ - .max = 12, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* SNOW 3G (UIA2) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES DOCSIS BPI */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* SNOW 3G (UEA2) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* AES CTR */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 32, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* NULL (AUTH) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_NULL, \ - .block_size = 1, \ - .key_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .iv_size = { 0 } \ - }, }, \ - }, }, \ - }, \ - { /* NULL (CIPHER) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_NULL, \ - .block_size = 1, \ - .key_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 0, \ - .max = 0, \ - .increment = 0 \ - } \ - }, }, \ - }, } \ - }, \ - { /* KASUMI (F8) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* KASUMI (F9) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { 0 } \ - }, } \ - }, } \ - }, \ - { /* 3DES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 24, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* 3DES CTR */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \ - .block_size = 8, \ - .key_size = { \ - .min = 16, \ - .max = 24, \ - .increment = 8 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* DES CBC */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_DES_CBC, \ - .block_size = 8, \ - .key_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* DES DOCSISBPI */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\ - .block_size = 8, \ - .key_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 8, \ - .max = 8, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - } - -#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ - { /* ZUC (EEA3) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ - {.cipher = { \ - .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - }, \ - { /* ZUC (EIA3) */ \ - .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ - {.sym = { \ - .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ - {.auth = { \ - .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \ - .block_size = 16, \ - .key_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - }, \ - .digest_size = { \ - .min = 4, \ - .max = 4, \ - .increment = 0 \ - }, \ - .iv_size = { \ - .min = 16, \ - .max = 16, \ - .increment = 0 \ - } \ - }, } \ - }, } \ - } - -#endif /* _QAT_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h deleted file mode 100644 index 565089a4..00000000 --- a/drivers/crypto/qat/qat_logs.h +++ /dev/null @@ -1,49 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2015 Intel Corporation - */ - -#ifndef _QAT_LOGS_H_ -#define _QAT_LOGS_H_ - -#define PMD_INIT_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \ - "PMD: %s(): " fmt "\n", __func__, ##args) - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_INIT -#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") -#else -#define PMD_INIT_FUNC_TRACE() do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_RX -#define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_RX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX -#define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_TX_FREE -#define PMD_TX_FREE_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) -#else -#define PMD_TX_FREE_LOG(level, fmt, args...) do { } while (0) -#endif - -#ifdef RTE_LIBRTE_PMD_QAT_DEBUG_DRIVER -#define PMD_DRV_LOG_RAW(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) -#else -#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) -#endif - -#define PMD_DRV_LOG(level, fmt, args...) \ - PMD_DRV_LOG_RAW(level, fmt "\n", ## args) - -#endif /* _QAT_LOGS_H_ */ diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c deleted file mode 100644 index 87b9ce0b..00000000 --- a/drivers/crypto/qat/qat_qp.c +++ /dev/null @@ -1,470 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2010-2015 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "qat_logs.h" -#include "qat_crypto.h" -#include "qat_algs.h" -#include "adf_transport_access_macros.h" - -#define ADF_MAX_SYM_DESC 4096 -#define ADF_MIN_SYM_DESC 128 -#define ADF_SYM_TX_RING_DESC_SIZE 128 -#define ADF_SYM_RX_RING_DESC_SIZE 32 -#define ADF_SYM_TX_QUEUE_STARTOFF 2 -/* Offset from bundle start to 1st Sym Tx queue */ -#define ADF_SYM_RX_QUEUE_STARTOFF 10 -#define ADF_ARB_REG_SLOT 0x1000 -#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C - -#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \ - ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \ - (ADF_ARB_REG_SLOT * index), value) - -static int qat_qp_check_queue_alignment(uint64_t phys_addr, - uint32_t queue_size_bytes); -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t id, uint32_t nb_desc, - int socket_id); -static void qat_queue_delete(struct qat_queue *queue); -static int qat_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint32_t nb_desc, uint8_t desc_size, - int socket_id); -static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, - uint32_t *queue_size_for_csr); -static void adf_configure_queues(struct qat_qp *queue); -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr); -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr); - -static const struct rte_memzone * -queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size, - int socket_id) -{ - const struct rte_memzone *mz; - unsigned memzone_flags = 0; - const struct rte_memseg *ms; - - PMD_INIT_FUNC_TRACE(); - mz = rte_memzone_lookup(queue_name); - if (mz != 0) { - if (((size_t)queue_size <= mz->len) && - ((socket_id == SOCKET_ID_ANY) || - (socket_id == mz->socket_id))) { - PMD_DRV_LOG(DEBUG, "re-use memzone already " - "allocated for %s", queue_name); - return mz; - } - - PMD_DRV_LOG(ERR, "Incompatible memzone already " - "allocated %s, size %u, socket %d. " - "Requested size %u, socket %u", - queue_name, (uint32_t)mz->len, - mz->socket_id, queue_size, socket_id); - return NULL; - } - - PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u", - queue_name, queue_size, socket_id); - ms = rte_eal_get_physmem_layout(); - switch (ms[0].hugepage_sz) { - case(RTE_PGSIZE_2M): - memzone_flags = RTE_MEMZONE_2MB; - break; - case(RTE_PGSIZE_1G): - memzone_flags = RTE_MEMZONE_1GB; - break; - case(RTE_PGSIZE_16M): - memzone_flags = RTE_MEMZONE_16MB; - break; - case(RTE_PGSIZE_16G): - memzone_flags = RTE_MEMZONE_16GB; - break; - default: - memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY; - } - return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id, - memzone_flags, queue_size); -} - -int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, - const struct rte_cryptodev_qp_conf *qp_conf, - int socket_id, struct rte_mempool *session_pool __rte_unused) -{ - struct qat_qp *qp; - struct rte_pci_device *pci_dev; - int ret; - char op_cookie_pool_name[RTE_RING_NAMESIZE]; - uint32_t i; - - PMD_INIT_FUNC_TRACE(); - - /* If qp is already in use free ring memory and qp metadata. */ - if (dev->data->queue_pairs[queue_pair_id] != NULL) { - ret = qat_crypto_sym_qp_release(dev, queue_pair_id); - if (ret < 0) - return ret; - } - - if ((qp_conf->nb_descriptors > ADF_MAX_SYM_DESC) || - (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) { - PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors", - qp_conf->nb_descriptors); - return -EINVAL; - } - - pci_dev = RTE_DEV_TO_PCI(dev->device); - - if (pci_dev->mem_resource[0].addr == NULL) { - PMD_DRV_LOG(ERR, "Could not find VF config space " - "(UIO driver attached?)."); - return -EINVAL; - } - - if (queue_pair_id >= - (ADF_NUM_SYM_QPS_PER_BUNDLE * - ADF_NUM_BUNDLES_PER_DEV)) { - PMD_DRV_LOG(ERR, "qp_id %u invalid for this device", - queue_pair_id); - return -EINVAL; - } - /* Allocate the queue pair data structure. */ - qp = rte_zmalloc("qat PMD qp metadata", - sizeof(*qp), RTE_CACHE_LINE_SIZE); - if (qp == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct"); - return -ENOMEM; - } - qp->nb_descriptors = qp_conf->nb_descriptors; - qp->op_cookies = rte_zmalloc("qat PMD op cookie pointer", - qp_conf->nb_descriptors * sizeof(*qp->op_cookies), - RTE_CACHE_LINE_SIZE); - if (qp->op_cookies == NULL) { - PMD_DRV_LOG(ERR, "Failed to alloc mem for cookie"); - rte_free(qp); - return -ENOMEM; - } - - qp->mmap_bar_addr = pci_dev->mem_resource[0].addr; - qp->inflights16 = 0; - - if (qat_tx_queue_create(dev, &(qp->tx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { - PMD_INIT_LOG(ERR, "Tx queue create failed " - "queue_pair_id=%u", queue_pair_id); - goto create_err; - } - - if (qat_rx_queue_create(dev, &(qp->rx_q), - queue_pair_id, qp_conf->nb_descriptors, socket_id) != 0) { - PMD_DRV_LOG(ERR, "Rx queue create failed " - "queue_pair_id=%hu", queue_pair_id); - qat_queue_delete(&(qp->tx_q)); - goto create_err; - } - - adf_configure_queues(qp); - adf_queue_arb_enable(&qp->tx_q, qp->mmap_bar_addr); - snprintf(op_cookie_pool_name, RTE_RING_NAMESIZE, "%s_qp_op_%d_%hu", - pci_dev->driver->driver.name, dev->data->dev_id, - queue_pair_id); - - qp->op_cookie_pool = rte_mempool_lookup(op_cookie_pool_name); - if (qp->op_cookie_pool == NULL) - qp->op_cookie_pool = rte_mempool_create(op_cookie_pool_name, - qp->nb_descriptors, - sizeof(struct qat_crypto_op_cookie), 64, 0, - NULL, NULL, NULL, NULL, socket_id, - 0); - if (!qp->op_cookie_pool) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot create" - " op mempool"); - goto create_err; - } - - for (i = 0; i < qp->nb_descriptors; i++) { - if (rte_mempool_get(qp->op_cookie_pool, &qp->op_cookies[i])) { - PMD_DRV_LOG(ERR, "QAT PMD Cannot get op_cookie"); - goto create_err; - } - - struct qat_crypto_op_cookie *sql_cookie = - qp->op_cookies[i]; - - sql_cookie->qat_sgl_src_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_src); - - sql_cookie->qat_sgl_dst_phys_addr = - rte_mempool_virt2iova(sql_cookie) + - offsetof(struct qat_crypto_op_cookie, - qat_sgl_list_dst); - } - - struct qat_pmd_private *internals - = dev->data->dev_private; - qp->qat_dev_gen = internals->qat_dev_gen; - - dev->data->queue_pairs[queue_pair_id] = qp; - return 0; - -create_err: - if (qp->op_cookie_pool) - rte_mempool_free(qp->op_cookie_pool); - rte_free(qp->op_cookies); - rte_free(qp); - return -EFAULT; -} - -int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) -{ - struct qat_qp *qp = - (struct qat_qp *)dev->data->queue_pairs[queue_pair_id]; - uint32_t i; - - PMD_INIT_FUNC_TRACE(); - if (qp == NULL) { - PMD_DRV_LOG(DEBUG, "qp already freed"); - return 0; - } - - /* Don't free memory if there are still responses to be processed */ - if (qp->inflights16 == 0) { - qat_queue_delete(&(qp->tx_q)); - qat_queue_delete(&(qp->rx_q)); - } else { - return -EAGAIN; - } - - adf_queue_arb_disable(&(qp->tx_q), qp->mmap_bar_addr); - - for (i = 0; i < qp->nb_descriptors; i++) - rte_mempool_put(qp->op_cookie_pool, qp->op_cookies[i]); - - if (qp->op_cookie_pool) - rte_mempool_free(qp->op_cookie_pool); - - rte_free(qp->op_cookies); - rte_free(qp); - dev->data->queue_pairs[queue_pair_id] = NULL; - return 0; -} - -static int qat_tx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, - uint32_t nb_desc, int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_TX_QUEUE_STARTOFF; - PMD_DRV_LOG(DEBUG, "TX ring for %u msgs: qp_id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_TX_RING_DESC_SIZE, socket_id); -} - -static int qat_rx_queue_create(struct rte_cryptodev *dev, - struct qat_queue *queue, uint8_t qp_id, uint32_t nb_desc, - int socket_id) -{ - PMD_INIT_FUNC_TRACE(); - queue->hw_bundle_number = qp_id/ADF_NUM_SYM_QPS_PER_BUNDLE; - queue->hw_queue_number = (qp_id%ADF_NUM_SYM_QPS_PER_BUNDLE) + - ADF_SYM_RX_QUEUE_STARTOFF; - - PMD_DRV_LOG(DEBUG, "RX ring for %u msgs: qp id %d, bundle %u, ring %u", - nb_desc, qp_id, queue->hw_bundle_number, - queue->hw_queue_number); - return qat_queue_create(dev, queue, nb_desc, - ADF_SYM_RX_RING_DESC_SIZE, socket_id); -} - -static void qat_queue_delete(struct qat_queue *queue) -{ - const struct rte_memzone *mz; - int status = 0; - - if (queue == NULL) { - PMD_DRV_LOG(DEBUG, "Invalid queue"); - return; - } - mz = rte_memzone_lookup(queue->memz_name); - if (mz != NULL) { - /* Write an unused pattern to the queue memory. */ - memset(queue->base_addr, 0x7F, queue->queue_size); - status = rte_memzone_free(mz); - if (status != 0) - PMD_DRV_LOG(ERR, "Error %d on freeing queue %s", - status, queue->memz_name); - } else { - PMD_DRV_LOG(DEBUG, "queue %s doesn't exist", - queue->memz_name); - } -} - -static int -qat_queue_create(struct rte_cryptodev *dev, struct qat_queue *queue, - uint32_t nb_desc, uint8_t desc_size, int socket_id) -{ - uint64_t queue_base; - void *io_addr; - const struct rte_memzone *qp_mz; - uint32_t queue_size_bytes = nb_desc*desc_size; - struct rte_pci_device *pci_dev; - - PMD_INIT_FUNC_TRACE(); - if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) { - PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size); - return -EINVAL; - } - - pci_dev = RTE_DEV_TO_PCI(dev->device); - - /* - * Allocate a memzone for the queue - create a unique name. - */ - snprintf(queue->memz_name, sizeof(queue->memz_name), "%s_%s_%d_%d_%d", - pci_dev->driver->driver.name, "qp_mem", dev->data->dev_id, - queue->hw_bundle_number, queue->hw_queue_number); - qp_mz = queue_dma_zone_reserve(queue->memz_name, queue_size_bytes, - socket_id); - if (qp_mz == NULL) { - PMD_DRV_LOG(ERR, "Failed to allocate ring memzone"); - return -ENOMEM; - } - - queue->base_addr = (char *)qp_mz->addr; - queue->base_phys_addr = qp_mz->iova; - if (qat_qp_check_queue_alignment(queue->base_phys_addr, - queue_size_bytes)) { - PMD_DRV_LOG(ERR, "Invalid alignment on queue create " - " 0x%"PRIx64"\n", - queue->base_phys_addr); - return -EFAULT; - } - - if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size)) - != 0) { - PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; - } - - queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size, - ADF_BYTES_TO_MSG_SIZE(desc_size)); - queue->modulo = ADF_RING_SIZE_MODULO(queue->queue_size); - PMD_DRV_LOG(DEBUG, "RING size in CSR: %u, in bytes %u, nb msgs %u," - " msg_size %u, max_inflights %u modulo %u", - queue->queue_size, queue_size_bytes, - nb_desc, desc_size, queue->max_inflights, - queue->modulo); - - if (queue->max_inflights < 2) { - PMD_DRV_LOG(ERR, "Invalid num inflights"); - return -EINVAL; - } - queue->head = 0; - queue->tail = 0; - queue->msg_size = desc_size; - - /* - * Write an unused pattern to the queue memory. - */ - memset(queue->base_addr, 0x7F, queue_size_bytes); - - queue_base = BUILD_RING_BASE_ADDR(queue->base_phys_addr, - queue->queue_size); - - io_addr = pci_dev->mem_resource[0].addr; - - WRITE_CSR_RING_BASE(io_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_base); - return 0; -} - -static int qat_qp_check_queue_alignment(uint64_t phys_addr, - uint32_t queue_size_bytes) -{ - PMD_INIT_FUNC_TRACE(); - if (((queue_size_bytes - 1) & phys_addr) != 0) - return -EINVAL; - return 0; -} - -static int adf_verify_queue_size(uint32_t msg_size, uint32_t msg_num, - uint32_t *p_queue_size_for_csr) -{ - uint8_t i = ADF_MIN_RING_SIZE; - - PMD_INIT_FUNC_TRACE(); - for (; i <= ADF_MAX_RING_SIZE; i++) - if ((msg_size * msg_num) == - (uint32_t)ADF_SIZE_TO_RING_SIZE_IN_BYTES(i)) { - *p_queue_size_for_csr = i; - return 0; - } - PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num); - return -EINVAL; -} - -static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr) -{ - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; - - PMD_INIT_FUNC_TRACE(); - value = ADF_CSR_RD(base_addr, arb_csr_offset); - value |= (0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); -} - -static void adf_queue_arb_disable(struct qat_queue *txq, void *base_addr) -{ - uint32_t arb_csr_offset = ADF_ARB_RINGSRVARBEN_OFFSET + - (ADF_ARB_REG_SLOT * - txq->hw_bundle_number); - uint32_t value; - - PMD_INIT_FUNC_TRACE(); - value = ADF_CSR_RD(base_addr, arb_csr_offset); - value ^= (0x01 << txq->hw_queue_number); - ADF_CSR_WR(base_addr, arb_csr_offset, value); -} - -static void adf_configure_queues(struct qat_qp *qp) -{ - uint32_t queue_config; - struct qat_queue *queue = &qp->tx_q; - - PMD_INIT_FUNC_TRACE(); - queue_config = BUILD_RING_CONFIG(queue->queue_size); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); - - queue = &qp->rx_q; - queue_config = - BUILD_RESP_RING_CONFIG(queue->queue_size, - ADF_RING_NEAR_WATERMARK_512, - ADF_RING_NEAR_WATERMARK_0); - - WRITE_CSR_RING_CONFIG(qp->mmap_bar_addr, queue->hw_bundle_number, - queue->hw_queue_number, queue_config); -} diff --git a/drivers/crypto/qat/qat_sym.c b/drivers/crypto/qat/qat_sym.c new file mode 100644 index 00000000..10cdf2e1 --- /dev/null +++ b/drivers/crypto/qat/qat_sym.c @@ -0,0 +1,569 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include + +#include "qat_sym.h" + +/** Decrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_decrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt (not decrypt!) the IV, then XOR with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_decrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_decrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher decrypt for BPI IV failed"); + return -EINVAL; +} + + +static inline uint32_t +qat_bpicipher_preprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len && + ctx->qat_dir == ICP_QAT_HW_CIPHER_DECRYPT) { + + /* Decrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = last_block - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI:dst before pre-process:", + dst, last_block_len); +#endif + bpi_cipher_decrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after pre-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: dst after pre-process:", + dst, last_block_len); +#endif + } + + return sym_op->cipher.data.length - last_block_len; +} + +static inline void +set_cipher_iv(uint16_t iv_length, uint16_t iv_offset, + struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_op *op, + struct icp_qat_fw_la_bulk_req *qat_req) +{ + /* copy IV into request if it fits */ + if (iv_length <= sizeof(cipher_param->u.cipher_IV_array)) { + rte_memcpy(cipher_param->u.cipher_IV_array, + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset), + iv_length); + } else { + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_CIPH_IV_64BIT_PTR); + cipher_param->u.s.cipher_IV_ptr = + rte_crypto_op_ctophys_offset(op, + iv_offset); + } +} + +/** Set IV for CCM is special case, 0th byte is set to q-1 + * where q is padding of nonce in 16 byte block + */ +static inline void +set_cipher_iv_ccm(uint16_t iv_length, uint16_t iv_offset, + struct icp_qat_fw_la_cipher_req_params *cipher_param, + struct rte_crypto_op *op, uint8_t q, uint8_t aad_len_field_sz) +{ + rte_memcpy(((uint8_t *)cipher_param->u.cipher_IV_array) + + ICP_QAT_HW_CCM_NONCE_OFFSET, + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, + iv_length); + *(uint8_t *)&cipher_param->u.cipher_IV_array[0] = + q - ICP_QAT_HW_CCM_NONCE_OFFSET; + + if (aad_len_field_sz) + rte_memcpy(&op->sym->aead.aad.data[ICP_QAT_HW_CCM_NONCE_OFFSET], + rte_crypto_op_ctod_offset(op, uint8_t *, + iv_offset) + ICP_QAT_HW_CCM_NONCE_OFFSET, + iv_length); +} + +int +qat_sym_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, enum qat_device_gen qat_dev_gen) +{ + int ret = 0; + struct qat_sym_session *ctx; + struct icp_qat_fw_la_cipher_req_params *cipher_param; + struct icp_qat_fw_la_auth_req_params *auth_param; + register struct icp_qat_fw_la_bulk_req *qat_req; + uint8_t do_auth = 0, do_cipher = 0, do_aead = 0; + uint32_t cipher_len = 0, cipher_ofs = 0; + uint32_t auth_len = 0, auth_ofs = 0; + uint32_t min_ofs = 0; + uint64_t src_buf_start = 0, dst_buf_start = 0; + uint8_t do_sgl = 0; + struct rte_crypto_op *op = (struct rte_crypto_op *)in_op; + struct qat_sym_op_cookie *cookie = + (struct qat_sym_op_cookie *)op_cookie; + + if (unlikely(op->type != RTE_CRYPTO_OP_TYPE_SYMMETRIC)) { + QAT_DP_LOG(ERR, "QAT PMD only supports symmetric crypto " + "operation requests, op (%p) is not a " + "symmetric operation.", op); + return -EINVAL; + } + + if (unlikely(op->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + QAT_DP_LOG(ERR, "QAT PMD only supports session oriented" + " requests, op (%p) is sessionless.", op); + return -EINVAL; + } + + ctx = (struct qat_sym_session *)get_sym_session_private_data( + op->sym->session, cryptodev_qat_driver_id); + + if (unlikely(ctx == NULL)) { + QAT_DP_LOG(ERR, "Session was not created for this device"); + return -EINVAL; + } + + if (unlikely(ctx->min_qat_dev_gen > qat_dev_gen)) { + QAT_DP_LOG(ERR, "Session alg not supported on this device gen"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + return -EINVAL; + } + + qat_req = (struct icp_qat_fw_la_bulk_req *)out_msg; + rte_mov128((uint8_t *)qat_req, (const uint8_t *)&(ctx->fw_req)); + qat_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)op; + cipher_param = (void *)&qat_req->serv_specif_rqpars; + auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param)); + + if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER || + ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + /* AES-GCM or AES-CCM */ + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64 || + (ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_AES128 + && ctx->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE + && ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC)) { + do_aead = 1; + } else { + do_auth = 1; + do_cipher = 1; + } + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + do_auth = 1; + do_cipher = 0; + } else if (ctx->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + do_auth = 0; + do_cipher = 1; + } + + if (do_cipher) { + + if (ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 || + ctx->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI || + ctx->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + + if (unlikely( + (op->sym->cipher.data.length % BYTE_LENGTH != 0) || + (op->sym->cipher.data.offset % BYTE_LENGTH != 0))) { + QAT_DP_LOG(ERR, + "SNOW3G/KASUMI/ZUC in QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + cipher_len = op->sym->cipher.data.length >> 3; + cipher_ofs = op->sym->cipher.data.offset >> 3; + + } else if (ctx->bpi_ctx) { + /* DOCSIS - only send complete blocks to device + * Process any partial block using CFB mode. + * Even if 0 complete blocks, still send this to device + * to get into rx queue for post-process and dequeuing + */ + cipher_len = qat_bpicipher_preprocess(ctx, op); + cipher_ofs = op->sym->cipher.data.offset; + } else { + cipher_len = op->sym->cipher.data.length; + cipher_ofs = op->sym->cipher.data.offset; + } + + set_cipher_iv(ctx->cipher_iv.length, ctx->cipher_iv.offset, + cipher_param, op, qat_req); + min_ofs = cipher_ofs; + } + + if (do_auth) { + + if (ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 || + ctx->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) { + if (unlikely( + (op->sym->auth.data.offset % BYTE_LENGTH != 0) || + (op->sym->auth.data.length % BYTE_LENGTH != 0))) { + QAT_DP_LOG(ERR, + "For SNOW3G/KASUMI/ZUC, QAT PMD only supports byte aligned values"); + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + return -EINVAL; + } + auth_ofs = op->sym->auth.data.offset >> 3; + auth_len = op->sym->auth.data.length >> 3; + + auth_param->u1.aad_adr = + rte_crypto_op_ctophys_offset(op, + ctx->auth_iv.offset); + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* AES-GMAC */ + set_cipher_iv(ctx->auth_iv.length, + ctx->auth_iv.offset, + cipher_param, op, qat_req); + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + + auth_param->u1.aad_adr = 0; + auth_param->u2.aad_sz = 0; + + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->auth_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + + } + } else { + auth_ofs = op->sym->auth.data.offset; + auth_len = op->sym->auth.data.length; + + } + min_ofs = auth_ofs; + + if (likely(ctx->qat_hash_alg != ICP_QAT_HW_AUTH_ALGO_NULL)) + auth_param->auth_res_addr = + op->sym->auth.digest.phys_addr; + + } + + if (do_aead) { + /* + * This address may used for setting AAD physical pointer + * into IV offset from op + */ + rte_iova_t aad_phys_addr_aead = op->sym->aead.aad.phys_addr; + if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_128 || + ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_GALOIS_64) { + /* + * If len(iv)==12B fw computes J0 + */ + if (ctx->cipher_iv.length == 12) { + ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET( + qat_req->comn_hdr.serv_specif_flags, + ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS); + } + set_cipher_iv(ctx->cipher_iv.length, + ctx->cipher_iv.offset, + cipher_param, op, qat_req); + + } else if (ctx->qat_hash_alg == + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) { + + /* In case of AES-CCM this may point to user selected + * memory or iv offset in cypto_op + */ + uint8_t *aad_data = op->sym->aead.aad.data; + /* This is true AAD length, it not includes 18 bytes of + * preceding data + */ + uint8_t aad_ccm_real_len = 0; + uint8_t aad_len_field_sz = 0; + uint32_t msg_len_be = + rte_bswap32(op->sym->aead.data.length); + + if (ctx->aad_len > ICP_QAT_HW_CCM_AAD_DATA_OFFSET) { + aad_len_field_sz = ICP_QAT_HW_CCM_AAD_LEN_INFO; + aad_ccm_real_len = ctx->aad_len - + ICP_QAT_HW_CCM_AAD_B0_LEN - + ICP_QAT_HW_CCM_AAD_LEN_INFO; + } else { + /* + * aad_len not greater than 18, so no actual aad + * data, then use IV after op for B0 block + */ + aad_data = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->cipher_iv.offset); + aad_phys_addr_aead = + rte_crypto_op_ctophys_offset(op, + ctx->cipher_iv.offset); + } + + uint8_t q = ICP_QAT_HW_CCM_NQ_CONST - + ctx->cipher_iv.length; + + aad_data[0] = ICP_QAT_HW_CCM_BUILD_B0_FLAGS( + aad_len_field_sz, + ctx->digest_length, q); + + if (q > ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE) { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET + + (q - ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE), + (uint8_t *)&msg_len_be, + ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE); + } else { + memcpy(aad_data + ctx->cipher_iv.length + + ICP_QAT_HW_CCM_NONCE_OFFSET, + (uint8_t *)&msg_len_be + + (ICP_QAT_HW_CCM_MSG_LEN_MAX_FIELD_SIZE + - q), q); + } + + if (aad_len_field_sz > 0) { + *(uint16_t *)&aad_data[ICP_QAT_HW_CCM_AAD_B0_LEN] + = rte_bswap16(aad_ccm_real_len); + + if ((aad_ccm_real_len + aad_len_field_sz) + % ICP_QAT_HW_CCM_AAD_B0_LEN) { + uint8_t pad_len = 0; + uint8_t pad_idx = 0; + + pad_len = ICP_QAT_HW_CCM_AAD_B0_LEN - + ((aad_ccm_real_len + aad_len_field_sz) % + ICP_QAT_HW_CCM_AAD_B0_LEN); + pad_idx = ICP_QAT_HW_CCM_AAD_B0_LEN + + aad_ccm_real_len + aad_len_field_sz; + memset(&aad_data[pad_idx], + 0, pad_len); + } + + } + + set_cipher_iv_ccm(ctx->cipher_iv.length, + ctx->cipher_iv.offset, + cipher_param, op, q, + aad_len_field_sz); + + } + + cipher_len = op->sym->aead.data.length; + cipher_ofs = op->sym->aead.data.offset; + auth_len = op->sym->aead.data.length; + auth_ofs = op->sym->aead.data.offset; + + auth_param->u1.aad_adr = aad_phys_addr_aead; + auth_param->auth_res_addr = op->sym->aead.digest.phys_addr; + min_ofs = op->sym->aead.data.offset; + } + + if (op->sym->m_src->next || (op->sym->m_dst && op->sym->m_dst->next)) + do_sgl = 1; + + /* adjust for chain case */ + if (do_cipher && do_auth) + min_ofs = cipher_ofs < auth_ofs ? cipher_ofs : auth_ofs; + + if (unlikely(min_ofs >= rte_pktmbuf_data_len(op->sym->m_src) && do_sgl)) + min_ofs = 0; + + if (unlikely(op->sym->m_dst != NULL)) { + /* Out-of-place operation (OOP) + * Don't align DMA start. DMA the minimum data-set + * so as not to overwrite data in dest buffer + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs); + dst_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_dst, min_ofs); + + } else { + /* In-place operation + * Start DMA at nearest aligned address below min_ofs + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, min_ofs) + & QAT_64_BTYE_ALIGN_MASK; + + if (unlikely((rte_pktmbuf_iova(op->sym->m_src) - + rte_pktmbuf_headroom(op->sym->m_src)) + > src_buf_start)) { + /* alignment has pushed addr ahead of start of mbuf + * so revert and take the performance hit + */ + src_buf_start = + rte_pktmbuf_iova_offset(op->sym->m_src, + min_ofs); + } + dst_buf_start = src_buf_start; + } + + if (do_cipher || do_aead) { + cipher_param->cipher_offset = + (uint32_t)rte_pktmbuf_iova_offset( + op->sym->m_src, cipher_ofs) - src_buf_start; + cipher_param->cipher_length = cipher_len; + } else { + cipher_param->cipher_offset = 0; + cipher_param->cipher_length = 0; + } + + if (do_auth || do_aead) { + auth_param->auth_off = (uint32_t)rte_pktmbuf_iova_offset( + op->sym->m_src, auth_ofs) - src_buf_start; + auth_param->auth_len = auth_len; + } else { + auth_param->auth_off = 0; + auth_param->auth_len = 0; + } + + qat_req->comn_mid.dst_length = + qat_req->comn_mid.src_length = + (cipher_param->cipher_offset + cipher_param->cipher_length) + > (auth_param->auth_off + auth_param->auth_len) ? + (cipher_param->cipher_offset + cipher_param->cipher_length) + : (auth_param->auth_off + auth_param->auth_len); + + if (do_sgl) { + + ICP_QAT_FW_COMN_PTR_TYPE_SET(qat_req->comn_hdr.comn_req_flags, + QAT_COMN_PTR_TYPE_SGL); + ret = qat_sgl_fill_array(op->sym->m_src, + (int64_t)(src_buf_start - rte_pktmbuf_iova(op->sym->m_src)), + &cookie->qat_sgl_src, + qat_req->comn_mid.src_length, + QAT_SYM_SGL_MAX_NUMBER); + + if (unlikely(ret)) { + QAT_DP_LOG(ERR, "QAT PMD Cannot fill sgl array"); + return ret; + } + + if (likely(op->sym->m_dst == NULL)) + qat_req->comn_mid.dest_data_addr = + qat_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + else { + ret = qat_sgl_fill_array(op->sym->m_dst, + (int64_t)(dst_buf_start - + rte_pktmbuf_iova(op->sym->m_dst)), + &cookie->qat_sgl_dst, + qat_req->comn_mid.dst_length, + QAT_SYM_SGL_MAX_NUMBER); + + if (unlikely(ret)) { + QAT_DP_LOG(ERR, "QAT PMD can't fill sgl array"); + return ret; + } + + qat_req->comn_mid.src_data_addr = + cookie->qat_sgl_src_phys_addr; + qat_req->comn_mid.dest_data_addr = + cookie->qat_sgl_dst_phys_addr; + } + } else { + qat_req->comn_mid.src_data_addr = src_buf_start; + qat_req->comn_mid.dest_data_addr = dst_buf_start; + } + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_req:", qat_req, + sizeof(struct icp_qat_fw_la_bulk_req)); + QAT_DP_HEXDUMP_LOG(DEBUG, "src_data:", + rte_pktmbuf_mtod(op->sym->m_src, uint8_t*), + rte_pktmbuf_data_len(op->sym->m_src)); + if (do_cipher) { + uint8_t *cipher_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->cipher_iv.offset); + QAT_DP_HEXDUMP_LOG(DEBUG, "cipher iv:", cipher_iv_ptr, + ctx->cipher_iv.length); + } + + if (do_auth) { + if (ctx->auth_iv.length) { + uint8_t *auth_iv_ptr = rte_crypto_op_ctod_offset(op, + uint8_t *, + ctx->auth_iv.offset); + QAT_DP_HEXDUMP_LOG(DEBUG, "auth iv:", auth_iv_ptr, + ctx->auth_iv.length); + } + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->auth.digest.data, + ctx->digest_length); + } + + if (do_aead) { + QAT_DP_HEXDUMP_LOG(DEBUG, "digest:", op->sym->aead.digest.data, + ctx->digest_length); + QAT_DP_HEXDUMP_LOG(DEBUG, "aad:", op->sym->aead.aad.data, + ctx->aad_len); + } +#endif + return 0; +} diff --git a/drivers/crypto/qat/qat_sym.h b/drivers/crypto/qat/qat_sym.h new file mode 100644 index 00000000..bc6426c3 --- /dev/null +++ b/drivers/crypto/qat/qat_sym.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_H_ +#define _QAT_SYM_H_ + +#include + +#ifdef BUILD_QAT_SYM +#include + +#include "qat_common.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" +#include "qat_logs.h" + +#define BYTE_LENGTH 8 +/* bpi is only used for partial blocks of DES and AES + * so AES block len can be assumed as max len for iv, src and dst + */ +#define BPI_MAX_ENCR_IV_LEN ICP_QAT_HW_AES_BLK_SZ + +/* + * Maximum number of SGL entries + */ +#define QAT_SYM_SGL_MAX_NUMBER 16 + +struct qat_sym_session; + +struct qat_sym_sgl { + qat_sgl_hdr; + struct qat_flat_buf buffers[QAT_SYM_SGL_MAX_NUMBER]; +} __rte_packed __rte_cache_aligned; + +struct qat_sym_op_cookie { + struct qat_sym_sgl qat_sgl_src; + struct qat_sym_sgl qat_sgl_dst; + phys_addr_t qat_sgl_src_phys_addr; + phys_addr_t qat_sgl_dst_phys_addr; +}; + +int +qat_sym_build_request(void *in_op, uint8_t *out_msg, + void *op_cookie, enum qat_device_gen qat_dev_gen); + + +/** Encrypt a single partial block + * Depends on openssl libcrypto + * Uses ECB+XOR to do CFB encryption, same result, more performant + */ +static inline int +bpi_cipher_encrypt(uint8_t *src, uint8_t *dst, + uint8_t *iv, int ivlen, int srclen, + void *bpi_ctx) +{ + EVP_CIPHER_CTX *ctx = (EVP_CIPHER_CTX *)bpi_ctx; + int encrypted_ivlen; + uint8_t encrypted_iv[BPI_MAX_ENCR_IV_LEN]; + uint8_t *encr = encrypted_iv; + + /* ECB method: encrypt the IV, then XOR this with plaintext */ + if (EVP_EncryptUpdate(ctx, encrypted_iv, &encrypted_ivlen, iv, ivlen) + <= 0) + goto cipher_encrypt_err; + + for (; srclen != 0; --srclen, ++dst, ++src, ++encr) + *dst = *src ^ *encr; + + return 0; + +cipher_encrypt_err: + QAT_DP_LOG(ERR, "libcrypto ECB cipher encrypt failed"); + return -EINVAL; +} + +static inline uint32_t +qat_bpicipher_postprocess(struct qat_sym_session *ctx, + struct rte_crypto_op *op) +{ + int block_len = qat_cipher_get_block_size(ctx->qat_cipher_alg); + struct rte_crypto_sym_op *sym_op = op->sym; + uint8_t last_block_len = block_len > 0 ? + sym_op->cipher.data.length % block_len : 0; + + if (last_block_len > 0 && + ctx->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) { + + /* Encrypt last block */ + uint8_t *last_block, *dst, *iv; + uint32_t last_block_offset; + + last_block_offset = sym_op->cipher.data.offset + + sym_op->cipher.data.length - last_block_len; + last_block = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_src, + uint8_t *, last_block_offset); + + if (unlikely(sym_op->m_dst != NULL)) + /* out-of-place operation (OOP) */ + dst = (uint8_t *) rte_pktmbuf_mtod_offset(sym_op->m_dst, + uint8_t *, last_block_offset); + else + dst = last_block; + + if (last_block_len < sym_op->cipher.data.length) + /* use previous block ciphertext as IV */ + iv = dst - block_len; + else + /* runt block, i.e. less than one full block */ + iv = rte_crypto_op_ctod_offset(op, uint8_t *, + ctx->cipher_iv.offset); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src before post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst before post-process:", + dst, last_block_len); +#endif + bpi_cipher_encrypt(last_block, dst, iv, block_len, + last_block_len, ctx->bpi_ctx); +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "BPI: src after post-process:", + last_block, last_block_len); + if (sym_op->m_dst != NULL) + QAT_DP_HEXDUMP_LOG(DEBUG, + "BPI: dst after post-process:", + dst, last_block_len); +#endif + } + return sym_op->cipher.data.length - last_block_len; +} + +static inline void +qat_sym_process_response(void **op, uint8_t *resp) +{ + + struct icp_qat_fw_comn_resp *resp_msg = + (struct icp_qat_fw_comn_resp *)resp; + struct rte_crypto_op *rx_op = (struct rte_crypto_op *)(uintptr_t) + (resp_msg->opaque_data); + +#if RTE_LOG_DP_LEVEL >= RTE_LOG_DEBUG + QAT_DP_HEXDUMP_LOG(DEBUG, "qat_response:", (uint8_t *)resp_msg, + sizeof(struct icp_qat_fw_comn_resp)); +#endif + + if (ICP_QAT_FW_COMN_STATUS_FLAG_OK != + ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET( + resp_msg->comn_hdr.comn_status)) { + + rx_op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + } else { + struct qat_sym_session *sess = (struct qat_sym_session *) + get_sym_session_private_data( + rx_op->sym->session, + cryptodev_qat_driver_id); + + + if (sess->bpi_ctx) + qat_bpicipher_postprocess(sess, rx_op); + rx_op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } + *op = (void *)rx_op; +} +#else + +static inline void +qat_sym_process_response(void **op __rte_unused, uint8_t *resp __rte_unused) +{ +} +#endif +#endif /* _QAT_SYM_H_ */ diff --git a/drivers/crypto/qat/qat_sym_capabilities.h b/drivers/crypto/qat/qat_sym_capabilities.h new file mode 100644 index 00000000..eea08bc7 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_capabilities.h @@ -0,0 +1,557 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2017-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_CAPABILITIES_H_ +#define _QAT_SYM_CAPABILITIES_H_ + +#define QAT_BASE_GEN1_SYM_CAPABILITIES \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 20, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA224 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 28, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA256 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 32, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA384 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 48, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* SHA512 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \ + .block_size = 128, \ + .key_size = { \ + .min = 1, \ + .max = 128, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* MD5 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 16, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES XCBC MAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .aad_size = { 0 }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES CCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_CCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 16, \ + .increment = 2 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 224, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 7, \ + .max = 13, \ + .increment = 1 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES GCM */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \ + {.aead = { \ + .algo = RTE_CRYPTO_AEAD_AES_GCM, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .aad_size = { \ + .min = 0, \ + .max = 240, \ + .increment = 1 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 12, \ + .increment = 0 \ + }, \ + }, } \ + }, } \ + }, \ + { /* AES GMAC (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_AES_GMAC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .digest_size = { \ + .min = 8, \ + .max = 16, \ + .increment = 4 \ + }, \ + .iv_size = { \ + .min = 12, \ + .max = 12, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UIA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SNOW3G_UIA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES DOCSIS BPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_DOCSISBPI,\ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* SNOW 3G (UEA2) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_SNOW3G_UEA2, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* AES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CTR, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* NULL (AUTH) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .iv_size = { 0 } \ + }, }, \ + }, }, \ + }, \ + { /* NULL (CIPHER) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_NULL, \ + .block_size = 1, \ + .key_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 0, \ + .max = 0, \ + .increment = 0 \ + } \ + }, }, \ + }, } \ + }, \ + { /* KASUMI (F8) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* KASUMI (F9) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* 3DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* 3DES CTR */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_3DES_CTR, \ + .block_size = 8, \ + .key_size = { \ + .min = 16, \ + .max = 24, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_CBC, \ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* DES DOCSISBPI */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_DES_DOCSISBPI,\ + .block_size = 8, \ + .key_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 8, \ + .max = 8, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#define QAT_EXTRA_GEN2_SYM_CAPABILITIES \ + { /* ZUC (EEA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_ZUC_EEA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + }, \ + { /* ZUC (EIA3) */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_ZUC_EIA3, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + }, \ + .digest_size = { \ + .min = 4, \ + .max = 4, \ + .increment = 0 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#endif /* _QAT_SYM_CAPABILITIES_H_ */ diff --git a/drivers/crypto/qat/qat_sym_pmd.c b/drivers/crypto/qat/qat_sym_pmd.c new file mode 100644 index 00000000..96f442e8 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_pmd.c @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_sym.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" + +uint8_t cryptodev_qat_driver_id; + +static const struct rte_cryptodev_capabilities qat_gen1_sym_capabilities[] = { + QAT_BASE_GEN1_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static const struct rte_cryptodev_capabilities qat_gen2_sym_capabilities[] = { + QAT_BASE_GEN1_SYM_CAPABILITIES, + QAT_EXTRA_GEN2_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +static int qat_sym_qp_release(struct rte_cryptodev *dev, + uint16_t queue_pair_id); + +static int qat_sym_dev_config(__rte_unused struct rte_cryptodev *dev, + __rte_unused struct rte_cryptodev_config *config) +{ + return 0; +} + +static int qat_sym_dev_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +static void qat_sym_dev_stop(__rte_unused struct rte_cryptodev *dev) +{ + return; +} + +static int qat_sym_dev_close(struct rte_cryptodev *dev) +{ + int i, ret; + + for (i = 0; i < dev->data->nb_queue_pairs; i++) { + ret = qat_sym_qp_release(dev, i); + if (ret < 0) + return ret; + } + + return 0; +} + +static void qat_sym_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) +{ + struct qat_sym_dev_private *internals = dev->data->dev_private; + const struct qat_qp_hw_data *sym_hw_qps = + qat_gen_config[internals->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_SYMMETRIC]; + + if (info != NULL) { + info->max_nb_queue_pairs = + qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC); + info->feature_flags = dev->feature_flags; + info->capabilities = internals->qat_dev_capabilities; + info->driver_id = cryptodev_qat_driver_id; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; + } +} + +static void qat_sym_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + struct qat_common_stats qat_stats = {0}; + struct qat_sym_dev_private *qat_priv; + + if (stats == NULL || dev == NULL) { + QAT_LOG(ERR, "invalid ptr: stats %p, dev %p", stats, dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_get(qat_priv->qat_dev, &qat_stats, QAT_SERVICE_SYMMETRIC); + stats->enqueued_count = qat_stats.enqueued_count; + stats->dequeued_count = qat_stats.dequeued_count; + stats->enqueue_err_count = qat_stats.enqueue_err_count; + stats->dequeue_err_count = qat_stats.dequeue_err_count; +} + +static void qat_sym_stats_reset(struct rte_cryptodev *dev) +{ + struct qat_sym_dev_private *qat_priv; + + if (dev == NULL) { + QAT_LOG(ERR, "invalid cryptodev ptr %p", dev); + return; + } + qat_priv = dev->data->dev_private; + + qat_stats_reset(qat_priv->qat_dev, QAT_SERVICE_SYMMETRIC); + +} + +static int qat_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) +{ + struct qat_sym_dev_private *qat_private = dev->data->dev_private; + + QAT_LOG(DEBUG, "Release sym qp %u on device %d", + queue_pair_id, dev->data->dev_id); + + qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][queue_pair_id] + = NULL; + + return qat_qp_release((struct qat_qp **) + &(dev->data->queue_pairs[queue_pair_id])); +} + +static int qat_sym_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, struct rte_mempool *session_pool __rte_unused) +{ + struct qat_qp *qp; + int ret = 0; + uint32_t i; + struct qat_qp_config qat_qp_conf; + + struct qat_qp **qp_addr = + (struct qat_qp **)&(dev->data->queue_pairs[qp_id]); + struct qat_sym_dev_private *qat_private = dev->data->dev_private; + const struct qat_qp_hw_data *sym_hw_qps = + qat_gen_config[qat_private->qat_dev->qat_dev_gen] + .qp_hw_data[QAT_SERVICE_SYMMETRIC]; + const struct qat_qp_hw_data *qp_hw_data = sym_hw_qps + qp_id; + + /* If qp is already in use free ring memory and qp metadata. */ + if (*qp_addr != NULL) { + ret = qat_sym_qp_release(dev, qp_id); + if (ret < 0) + return ret; + } + if (qp_id >= qat_qps_per_service(sym_hw_qps, QAT_SERVICE_SYMMETRIC)) { + QAT_LOG(ERR, "qp_id %u invalid for this device", qp_id); + return -EINVAL; + } + + qat_qp_conf.hw = qp_hw_data; + qat_qp_conf.build_request = qat_sym_build_request; + qat_qp_conf.cookie_size = sizeof(struct qat_sym_op_cookie); + qat_qp_conf.nb_descriptors = qp_conf->nb_descriptors; + qat_qp_conf.socket_id = socket_id; + qat_qp_conf.service_str = "sym"; + + ret = qat_qp_setup(qat_private->qat_dev, qp_addr, qp_id, &qat_qp_conf); + if (ret != 0) + return ret; + + /* store a link to the qp in the qat_pci_device */ + qat_private->qat_dev->qps_in_use[QAT_SERVICE_SYMMETRIC][qp_id] + = *qp_addr; + + qp = (struct qat_qp *)*qp_addr; + + for (i = 0; i < qp->nb_descriptors; i++) { + + struct qat_sym_op_cookie *cookie = + qp->op_cookies[i]; + + cookie->qat_sgl_src_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_src); + + cookie->qat_sgl_dst_phys_addr = + rte_mempool_virt2iova(cookie) + + offsetof(struct qat_sym_op_cookie, + qat_sgl_dst); + } + + return ret; +} + +static struct rte_cryptodev_ops crypto_qat_ops = { + + /* Device related operations */ + .dev_configure = qat_sym_dev_config, + .dev_start = qat_sym_dev_start, + .dev_stop = qat_sym_dev_stop, + .dev_close = qat_sym_dev_close, + .dev_infos_get = qat_sym_dev_info_get, + + .stats_get = qat_sym_stats_get, + .stats_reset = qat_sym_stats_reset, + .queue_pair_setup = qat_sym_qp_setup, + .queue_pair_release = qat_sym_qp_release, + .queue_pair_count = NULL, + + /* Crypto related operations */ + .sym_session_get_size = qat_sym_session_get_private_size, + .sym_session_configure = qat_sym_session_configure, + .sym_session_clear = qat_sym_session_clear +}; + +static uint16_t +qat_sym_pmd_enqueue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_enqueue_op_burst(qp, (void **)ops, nb_ops); +} + +static uint16_t +qat_sym_pmd_dequeue_op_burst(void *qp, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + return qat_dequeue_op_burst(qp, (void **)ops, nb_ops); +} + +/* An rte_driver is needed in the registration of both the device and the driver + * with cryptodev. + * The actual qat pci's rte_driver can't be used as its name represents + * the whole pci device with all services. Think of this as a holder for a name + * for the crypto part of the pci device. + */ +static const char qat_sym_drv_name[] = RTE_STR(CRYPTODEV_NAME_QAT_SYM_PMD); +static const struct rte_driver cryptodev_qat_sym_driver = { + .name = qat_sym_drv_name, + .alias = qat_sym_drv_name +}; + +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = qat_pci_dev->pci_dev->device.numa_node, + .private_data_size = sizeof(struct qat_sym_dev_private) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct rte_cryptodev *cryptodev; + struct qat_sym_dev_private *internals; + + snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_%s", + qat_pci_dev->name, "sym"); + QAT_LOG(DEBUG, "Creating QAT SYM device %s", name); + + /* Populate subset device to use in cryptodev device creation */ + qat_pci_dev->sym_rte_dev.driver = &cryptodev_qat_sym_driver; + qat_pci_dev->sym_rte_dev.numa_node = + qat_pci_dev->pci_dev->device.numa_node; + qat_pci_dev->sym_rte_dev.devargs = NULL; + + cryptodev = rte_cryptodev_pmd_create(name, + &(qat_pci_dev->sym_rte_dev), &init_params); + + if (cryptodev == NULL) + return -ENODEV; + + qat_pci_dev->sym_rte_dev.name = cryptodev->data->name; + cryptodev->driver_id = cryptodev_qat_driver_id; + cryptodev->dev_ops = &crypto_qat_ops; + + cryptodev->enqueue_burst = qat_sym_pmd_enqueue_op_burst; + cryptodev->dequeue_burst = qat_sym_pmd_dequeue_op_burst; + + cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_HW_ACCELERATED | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + RTE_CRYPTODEV_FF_IN_PLACE_SGL | + RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | + RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT; + + internals = cryptodev->data->dev_private; + internals->qat_dev = qat_pci_dev; + qat_pci_dev->sym_dev = internals; + + internals->sym_dev_id = cryptodev->data->dev_id; + switch (qat_pci_dev->qat_dev_gen) { + case QAT_GEN1: + internals->qat_dev_capabilities = qat_gen1_sym_capabilities; + break; + case QAT_GEN2: + internals->qat_dev_capabilities = qat_gen2_sym_capabilities; + break; + default: + internals->qat_dev_capabilities = qat_gen2_sym_capabilities; + QAT_LOG(DEBUG, + "QAT gen %d capabilities unknown, default to GEN2", + qat_pci_dev->qat_dev_gen); + break; + } + + QAT_LOG(DEBUG, "Created QAT SYM device %s as cryptodev instance %d", + cryptodev->data->name, internals->sym_dev_id); + return 0; +} + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev) +{ + struct rte_cryptodev *cryptodev; + + if (qat_pci_dev == NULL) + return -ENODEV; + if (qat_pci_dev->sym_dev == NULL) + return 0; + + /* free crypto device */ + cryptodev = rte_cryptodev_pmd_get_dev(qat_pci_dev->sym_dev->sym_dev_id); + rte_cryptodev_pmd_destroy(cryptodev); + qat_pci_dev->sym_rte_dev.name = NULL; + qat_pci_dev->sym_dev = NULL; + + return 0; +} + + +static struct cryptodev_driver qat_crypto_drv; +RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, + cryptodev_qat_sym_driver, + cryptodev_qat_driver_id); diff --git a/drivers/crypto/qat/qat_sym_pmd.h b/drivers/crypto/qat/qat_sym_pmd.h new file mode 100644 index 00000000..d3432854 --- /dev/null +++ b/drivers/crypto/qat/qat_sym_pmd.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ + +#ifndef _QAT_SYM_PMD_H_ +#define _QAT_SYM_PMD_H_ + +#ifdef BUILD_QAT_SYM + +#include + +#include "qat_sym_capabilities.h" +#include "qat_device.h" + +/**< Intel(R) QAT Symmetric Crypto PMD device name */ +#define CRYPTODEV_NAME_QAT_SYM_PMD crypto_qat + +extern uint8_t cryptodev_qat_driver_id; + +/** private data structure for a QAT device. + * This QAT device is a device offering only symmetric crypto service, + * there can be one of these on each qat_pci_device (VF), + * in future there may also be private data structures for other services. + */ +struct qat_sym_dev_private { + struct qat_pci_device *qat_dev; + /**< The qat pci device hosting the service */ + uint8_t sym_dev_id; + /**< Device instance for this rte_cryptodev */ + const struct rte_cryptodev_capabilities *qat_dev_capabilities; + /* QAT device symmetric crypto capabilities */ +}; + +int +qat_sym_dev_create(struct qat_pci_device *qat_pci_dev); + +int +qat_sym_dev_destroy(struct qat_pci_device *qat_pci_dev); + +#endif +#endif /* _QAT_SYM_PMD_H_ */ diff --git a/drivers/crypto/qat/qat_sym_session.c b/drivers/crypto/qat/qat_sym_session.c new file mode 100644 index 00000000..1d58220a --- /dev/null +++ b/drivers/crypto/qat/qat_sym_session.c @@ -0,0 +1,1725 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) + * Copyright(c) 2015-2018 Intel Corporation + */ + +#include /* Needed to calculate pre-compute values */ +#include /* Needed to calculate pre-compute values */ +#include /* Needed to calculate pre-compute values */ +#include /* Needed for bpi runt block processing */ + +#include +#include +#include +#include +#include +#include +#include + +#include "qat_logs.h" +#include "qat_sym_session.h" +#include "qat_sym_pmd.h" + +/** Frees a context previously created + * Depends on openssl libcrypto + */ +static void +bpi_cipher_ctx_free(void *bpi_ctx) +{ + if (bpi_ctx != NULL) + EVP_CIPHER_CTX_free((EVP_CIPHER_CTX *)bpi_ctx); +} + +/** Creates a context in either AES or DES in ECB mode + * Depends on openssl libcrypto + */ +static int +bpi_cipher_ctx_init(enum rte_crypto_cipher_algorithm cryptodev_algo, + enum rte_crypto_cipher_operation direction __rte_unused, + uint8_t *key, void **ctx) +{ + const EVP_CIPHER *algo = NULL; + int ret; + *ctx = EVP_CIPHER_CTX_new(); + + if (*ctx == NULL) { + ret = -ENOMEM; + goto ctx_init_err; + } + + if (cryptodev_algo == RTE_CRYPTO_CIPHER_DES_DOCSISBPI) + algo = EVP_des_ecb(); + else + algo = EVP_aes_128_ecb(); + + /* IV will be ECB encrypted whether direction is encrypt or decrypt*/ + if (EVP_EncryptInit_ex(*ctx, algo, NULL, key, 0) != 1) { + ret = -EINVAL; + goto ctx_init_err; + } + + return 0; + +ctx_init_err: + if (*ctx != NULL) + EVP_CIPHER_CTX_free(*ctx); + return ret; +} + +static int +qat_is_cipher_alg_supported(enum rte_crypto_cipher_algorithm algo, + struct qat_sym_dev_private *internals) +{ + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) + continue; + + if (capability->sym.cipher.algo == algo) + return 1; + } + return 0; +} + +static int +qat_is_auth_alg_supported(enum rte_crypto_auth_algorithm algo, + struct qat_sym_dev_private *internals) +{ + int i = 0; + const struct rte_cryptodev_capabilities *capability; + + while ((capability = &(internals->qat_dev_capabilities[i++]))->op != + RTE_CRYPTO_OP_TYPE_UNDEFINED) { + if (capability->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + continue; + + if (capability->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) + continue; + + if (capability->sym.auth.algo == algo) + return 1; + } + return 0; +} + +void +qat_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + uint8_t index = dev->driver_id; + void *sess_priv = get_sym_session_private_data(sess, index); + struct qat_sym_session *s = (struct qat_sym_session *)sess_priv; + + if (sess_priv) { + if (s->bpi_ctx) + bpi_cipher_ctx_free(s->bpi_ctx); + memset(s, 0, qat_sym_session_get_private_size(dev)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); + + set_sym_session_private_data(sess, index, NULL); + rte_mempool_put(sess_mp, sess_priv); + } +} + +static int +qat_get_cmd_id(const struct rte_crypto_sym_xform *xform) +{ + /* Cipher Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) + return ICP_QAT_FW_LA_CMD_CIPHER; + + /* Authentication Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL) + return ICP_QAT_FW_LA_CMD_AUTH; + + /* AEAD */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + /* AES-GCM and AES-CCM works with different direction + * GCM first encrypts and generate hash where AES-CCM + * first generate hash and encrypts. Similar relation + * applies to decryption. + */ + if (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + else + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + else + if (xform->aead.algo == RTE_CRYPTO_AEAD_AES_GCM) + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + else + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + } + + if (xform->next == NULL) + return -1; + + /* Cipher then Authenticate */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return ICP_QAT_FW_LA_CMD_CIPHER_HASH; + + /* Authenticate then Cipher */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return ICP_QAT_FW_LA_CMD_HASH_CIPHER; + + return -1; +} + +static struct rte_crypto_auth_xform * +qat_get_auth_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return &xform->auth; + + xform = xform->next; + } while (xform); + + return NULL; +} + +static struct rte_crypto_cipher_xform * +qat_get_cipher_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return &xform->cipher; + + xform = xform->next; + } while (xform); + + return NULL; +} + +int +qat_sym_session_configure_cipher(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct qat_sym_dev_private *internals = dev->data->dev_private; + struct rte_crypto_cipher_xform *cipher_xform = NULL; + int ret; + + /* Get cipher xform from crypto xform chain */ + cipher_xform = qat_get_cipher_xform(xform); + + session->cipher_iv.offset = cipher_xform->iv.offset; + session->cipher_iv.length = cipher_xform->iv.length; + + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + if (qat_sym_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + if (qat_sym_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: + if (qat_sym_validate_snow3g_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid SNOW 3G cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_NULL: + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_KASUMI_F8: + if (qat_sym_validate_kasumi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid KASUMI cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_F8_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CBC: + if (qat_sym_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid 3DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_CBC: + if (qat_sym_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_CTR: + if (qat_sym_validate_3des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid 3DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; + case RTE_CRYPTO_CIPHER_DES_DOCSISBPI: + ret = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data, + &session->bpi_ctx); + if (ret != 0) { + QAT_LOG(ERR, "failed to create DES BPI ctx"); + goto error_out; + } + if (qat_sym_validate_des_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid DES cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_AES_DOCSISBPI: + ret = bpi_cipher_ctx_init( + cipher_xform->algo, + cipher_xform->op, + cipher_xform->key.data, + &session->bpi_ctx); + if (ret != 0) { + QAT_LOG(ERR, "failed to create AES BPI ctx"); + goto error_out; + } + if (qat_sym_validate_aes_docsisbpi_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES DOCSISBPI key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CBC_MODE; + break; + case RTE_CRYPTO_CIPHER_ZUC_EEA3: + if (!qat_is_cipher_alg_supported( + cipher_xform->algo, internals)) { + QAT_LOG(ERR, "%s not supported on this device", + rte_crypto_cipher_algorithm_strings + [cipher_xform->algo]); + ret = -ENOTSUP; + goto error_out; + } + if (qat_sym_validate_zuc_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid ZUC cipher key size"); + ret = -EINVAL; + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_ECB_MODE; + break; + case RTE_CRYPTO_CIPHER_3DES_ECB: + case RTE_CRYPTO_CIPHER_AES_ECB: + case RTE_CRYPTO_CIPHER_AES_F8: + case RTE_CRYPTO_CIPHER_AES_XTS: + case RTE_CRYPTO_CIPHER_ARC4: + QAT_LOG(ERR, "Crypto QAT PMD: Unsupported Cipher alg %u", + cipher_xform->algo); + ret = -ENOTSUP; + goto error_out; + default: + QAT_LOG(ERR, "Crypto: Undefined Cipher specified %u\n", + cipher_xform->algo); + ret = -EINVAL; + goto error_out; + } + + if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + else + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + + if (qat_sym_session_aead_create_cd_cipher(session, + cipher_xform->key.data, + cipher_xform->key.length)) { + ret = -EINVAL; + goto error_out; + } + + return 0; + +error_out: + if (session->bpi_ctx) { + bpi_cipher_ctx_free(session->bpi_ctx); + session->bpi_ctx = NULL; + } + return ret; +} + +int +qat_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + void *sess_private_data; + int ret; + + if (rte_mempool_get(mempool, &sess_private_data)) { + CDEV_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + ret = qat_sym_session_set_parameters(dev, xform, sess_private_data); + if (ret != 0) { + QAT_LOG(ERR, + "Crypto QAT PMD: failed to configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(mempool, sess_private_data); + return ret; + } + + set_sym_session_private_data(sess, dev->driver_id, + sess_private_data); + + return 0; +} + +int +qat_sym_session_set_parameters(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private) +{ + struct qat_sym_session *session = session_private; + int ret; + int qat_cmd_id; + + /* Set context descriptor physical address */ + session->cd_paddr = rte_mempool_virt2iova(session) + + offsetof(struct qat_sym_session, cd); + + session->min_qat_dev_gen = QAT_GEN1; + + /* Get requested QAT command id */ + qat_cmd_id = qat_get_cmd_id(xform); + if (qat_cmd_id < 0 || qat_cmd_id >= ICP_QAT_FW_LA_CMD_DELIMITER) { + QAT_LOG(ERR, "Unsupported xform chain requested"); + return -ENOTSUP; + } + session->qat_cmd = (enum icp_qat_fw_la_cmd_id)qat_cmd_id; + switch (session->qat_cmd) { + case ICP_QAT_FW_LA_CMD_CIPHER: + ret = qat_sym_session_configure_cipher(dev, xform, session); + if (ret < 0) + return ret; + break; + case ICP_QAT_FW_LA_CMD_AUTH: + ret = qat_sym_session_configure_auth(dev, xform, session); + if (ret < 0) + return ret; + break; + case ICP_QAT_FW_LA_CMD_CIPHER_HASH: + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret = qat_sym_session_configure_aead(xform, + session); + if (ret < 0) + return ret; + } else { + ret = qat_sym_session_configure_cipher(dev, + xform, session); + if (ret < 0) + return ret; + ret = qat_sym_session_configure_auth(dev, + xform, session); + if (ret < 0) + return ret; + } + break; + case ICP_QAT_FW_LA_CMD_HASH_CIPHER: + if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret = qat_sym_session_configure_aead(xform, + session); + if (ret < 0) + return ret; + } else { + ret = qat_sym_session_configure_auth(dev, + xform, session); + if (ret < 0) + return ret; + ret = qat_sym_session_configure_cipher(dev, + xform, session); + if (ret < 0) + return ret; + } + break; + case ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM: + case ICP_QAT_FW_LA_CMD_TRNG_TEST: + case ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE: + case ICP_QAT_FW_LA_CMD_MGF1: + case ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP: + case ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP: + case ICP_QAT_FW_LA_CMD_DELIMITER: + QAT_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + return -ENOTSUP; + default: + QAT_LOG(ERR, "Unsupported Service %u", + session->qat_cmd); + return -ENOTSUP; + } + + return 0; +} + +int +qat_sym_session_configure_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct rte_crypto_auth_xform *auth_xform = qat_get_auth_xform(xform); + struct qat_sym_dev_private *internals = dev->data->dev_private; + uint8_t *key_data = auth_xform->key.data; + uint8_t key_length = auth_xform->key.length; + + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA1; + break; + case RTE_CRYPTO_AUTH_SHA224_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA224; + break; + case RTE_CRYPTO_AUTH_SHA256_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA256; + break; + case RTE_CRYPTO_AUTH_SHA384_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA384; + break; + case RTE_CRYPTO_AUTH_SHA512_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SHA512; + break; + case RTE_CRYPTO_AUTH_AES_XCBC_MAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC; + break; + case RTE_CRYPTO_AUTH_AES_GMAC: + if (qat_sym_validate_aes_key(auth_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + + break; + case RTE_CRYPTO_AUTH_SNOW3G_UIA2: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2; + break; + case RTE_CRYPTO_AUTH_MD5_HMAC: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_MD5; + break; + case RTE_CRYPTO_AUTH_NULL: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_NULL; + break; + case RTE_CRYPTO_AUTH_KASUMI_F9: + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_KASUMI_F9; + break; + case RTE_CRYPTO_AUTH_ZUC_EIA3: + if (!qat_is_auth_alg_supported(auth_xform->algo, internals)) { + QAT_LOG(ERR, "%s not supported on this device", + rte_crypto_auth_algorithm_strings + [auth_xform->algo]); + return -ENOTSUP; + } + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3; + break; + case RTE_CRYPTO_AUTH_SHA1: + case RTE_CRYPTO_AUTH_SHA256: + case RTE_CRYPTO_AUTH_SHA512: + case RTE_CRYPTO_AUTH_SHA224: + case RTE_CRYPTO_AUTH_SHA384: + case RTE_CRYPTO_AUTH_MD5: + case RTE_CRYPTO_AUTH_AES_CMAC: + case RTE_CRYPTO_AUTH_AES_CBC_MAC: + QAT_LOG(ERR, "Crypto: Unsupported hash alg %u", + auth_xform->algo); + return -ENOTSUP; + default: + QAT_LOG(ERR, "Crypto: Undefined Hash algo %u specified", + auth_xform->algo); + return -EINVAL; + } + + session->auth_iv.offset = auth_xform->iv.offset; + session->auth_iv.length = auth_xform->iv.length; + + if (auth_xform->algo == RTE_CRYPTO_AUTH_AES_GMAC) { + if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE) { + session->qat_cmd = ICP_QAT_FW_LA_CMD_CIPHER_HASH; + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + + if (qat_sym_session_aead_create_cd_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + } else { + session->qat_cmd = ICP_QAT_FW_LA_CMD_HASH_CIPHER; + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ + + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_cipher(session, + auth_xform->key.data, + auth_xform->key.length)) + return -EINVAL; + } + /* Restore to authentication only only */ + session->qat_cmd = ICP_QAT_FW_LA_CMD_AUTH; + } else { + if (qat_sym_session_aead_create_cd_auth(session, + key_data, + key_length, + 0, + auth_xform->digest_length, + auth_xform->op)) + return -EINVAL; + } + + session->digest_length = auth_xform->digest_length; + return 0; +} + +int +qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session) +{ + struct rte_crypto_aead_xform *aead_xform = &xform->aead; + enum rte_crypto_auth_operation crypto_operation; + + /* + * Store AEAD IV parameters as cipher IV, + * to avoid unnecessary memory usage + */ + session->cipher_iv.offset = xform->aead.iv.offset; + session->cipher_iv.length = xform->aead.iv.length; + + switch (aead_xform->algo) { + case RTE_CRYPTO_AEAD_AES_GCM: + if (qat_sym_validate_aes_key(aead_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_GALOIS_128; + break; + case RTE_CRYPTO_AEAD_AES_CCM: + if (qat_sym_validate_aes_key(aead_xform->key.length, + &session->qat_cipher_alg) != 0) { + QAT_LOG(ERR, "Invalid AES key size"); + return -EINVAL; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + session->qat_hash_alg = ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC; + break; + default: + QAT_LOG(ERR, "Crypto: Undefined AEAD specified %u\n", + aead_xform->algo); + return -EINVAL; + } + + if ((aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT && + aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM) || + (aead_xform->op == RTE_CRYPTO_AEAD_OP_DECRYPT && + aead_xform->algo == RTE_CRYPTO_AEAD_AES_CCM)) { + session->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + /* + * It needs to create cipher desc content first, + * then authentication + */ + crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? + RTE_CRYPTO_AUTH_OP_GENERATE : RTE_CRYPTO_AUTH_OP_VERIFY; + + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_auth(session, + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + crypto_operation)) + return -EINVAL; + } else { + session->qat_dir = ICP_QAT_HW_CIPHER_DECRYPT; + /* + * It needs to create authentication desc content first, + * then cipher + */ + + crypto_operation = aead_xform->algo == RTE_CRYPTO_AEAD_AES_GCM ? + RTE_CRYPTO_AUTH_OP_VERIFY : RTE_CRYPTO_AUTH_OP_GENERATE; + + if (qat_sym_session_aead_create_cd_auth(session, + aead_xform->key.data, + aead_xform->key.length, + aead_xform->aad_length, + aead_xform->digest_length, + crypto_operation)) + return -EINVAL; + + if (qat_sym_session_aead_create_cd_cipher(session, + aead_xform->key.data, + aead_xform->key.length)) + return -EINVAL; + } + + session->digest_length = aead_xform->digest_length; + return 0; +} + +unsigned int qat_sym_session_get_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + return RTE_ALIGN_CEIL(sizeof(struct qat_sym_session), 8); +} + +/* returns block size in bytes per cipher algo */ +int qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg) +{ + switch (qat_cipher_alg) { + case ICP_QAT_HW_CIPHER_ALGO_DES: + return ICP_QAT_HW_DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_3DES: + return ICP_QAT_HW_3DES_BLK_SZ; + case ICP_QAT_HW_CIPHER_ALGO_AES128: + case ICP_QAT_HW_CIPHER_ALGO_AES192: + case ICP_QAT_HW_CIPHER_ALGO_AES256: + return ICP_QAT_HW_AES_BLK_SZ; + default: + QAT_LOG(ERR, "invalid block cipher alg %u", qat_cipher_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* + * Returns size in bytes per hash algo for state1 size field in cd_ctrl + * This is digest size rounded up to nearest quadword + */ +static int qat_hash_get_state1_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA1_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA224_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA256_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA384_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + return QAT_HW_ROUND_UP(ICP_QAT_HW_GALOIS_128_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + return QAT_HW_ROUND_UP(ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + return QAT_HW_ROUND_UP(ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_MD5: + return QAT_HW_ROUND_UP(ICP_QAT_HW_MD5_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + return QAT_HW_ROUND_UP(ICP_QAT_HW_KASUMI_F9_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + return QAT_HW_ROUND_UP(ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_NULL: + return QAT_HW_ROUND_UP(ICP_QAT_HW_NULL_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum state1 size in this case */ + return QAT_HW_ROUND_UP(ICP_QAT_HW_SHA512_STATE1_SZ, + QAT_HW_DEFAULT_ALIGNMENT); + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* returns digest size in bytes per hash algo */ +static int qat_hash_get_digest_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return ICP_QAT_HW_SHA1_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return ICP_QAT_HW_SHA224_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return ICP_QAT_HW_SHA256_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return ICP_QAT_HW_SHA384_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return ICP_QAT_HW_SHA512_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_MD5: + return ICP_QAT_HW_MD5_STATE1_SZ; + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum digest size in this case */ + return ICP_QAT_HW_SHA512_STATE1_SZ; + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +/* returns block size in byes per hash algo */ +static int qat_hash_get_block_size(enum icp_qat_hw_auth_algo qat_hash_alg) +{ + switch (qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + return SHA_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + return SHA256_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + return SHA256_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + return SHA512_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + return SHA512_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + return 16; + case ICP_QAT_HW_AUTH_ALGO_MD5: + return MD5_CBLOCK; + case ICP_QAT_HW_AUTH_ALGO_DELIMITER: + /* return maximum block size in this case */ + return SHA512_CBLOCK; + default: + QAT_LOG(ERR, "invalid hash alg %u", qat_hash_alg); + return -EFAULT; + }; + return -EFAULT; +} + +static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out) +{ + SHA_CTX ctx; + + if (!SHA1_Init(&ctx)) + return -EFAULT; + SHA1_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA224_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out) +{ + SHA256_CTX ctx; + + if (!SHA256_Init(&ctx)) + return -EFAULT; + SHA256_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA256_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA384_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out) +{ + SHA512_CTX ctx; + + if (!SHA512_Init(&ctx)) + return -EFAULT; + SHA512_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, SHA512_DIGEST_LENGTH); + return 0; +} + +static int partial_hash_md5(uint8_t *data_in, uint8_t *data_out) +{ + MD5_CTX ctx; + + if (!MD5_Init(&ctx)) + return -EFAULT; + MD5_Transform(&ctx, data_in); + rte_memcpy(data_out, &ctx, MD5_DIGEST_LENGTH); + + return 0; +} + +static int partial_hash_compute(enum icp_qat_hw_auth_algo hash_alg, + uint8_t *data_in, + uint8_t *data_out) +{ + int digest_size; + uint8_t digest[qat_hash_get_digest_size( + ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + uint32_t *hash_state_out_be32; + uint64_t *hash_state_out_be64; + int i; + + digest_size = qat_hash_get_digest_size(hash_alg); + if (digest_size <= 0) + return -EFAULT; + + hash_state_out_be32 = (uint32_t *)data_out; + hash_state_out_be64 = (uint64_t *)data_out; + + switch (hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (partial_hash_sha1(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + if (partial_hash_sha224(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (partial_hash_sha256(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 2; i++, hash_state_out_be32++) + *hash_state_out_be32 = + rte_bswap32(*(((uint32_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + if (partial_hash_sha384(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) + *hash_state_out_be64 = + rte_bswap64(*(((uint64_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (partial_hash_sha512(data_in, digest)) + return -EFAULT; + for (i = 0; i < digest_size >> 3; i++, hash_state_out_be64++) + *hash_state_out_be64 = + rte_bswap64(*(((uint64_t *)digest)+i)); + break; + case ICP_QAT_HW_AUTH_ALGO_MD5: + if (partial_hash_md5(data_in, data_out)) + return -EFAULT; + break; + default: + QAT_LOG(ERR, "invalid hash alg %u", hash_alg); + return -EFAULT; + } + + return 0; +} +#define HMAC_IPAD_VALUE 0x36 +#define HMAC_OPAD_VALUE 0x5c +#define HASH_XCBC_PRECOMP_KEY_NUM 3 + +static int qat_sym_do_precomputes(enum icp_qat_hw_auth_algo hash_alg, + const uint8_t *auth_key, + uint16_t auth_keylen, + uint8_t *p_state_buf, + uint16_t *p_state_len) +{ + int block_size; + uint8_t ipad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + uint8_t opad[qat_hash_get_block_size(ICP_QAT_HW_AUTH_ALGO_DELIMITER)]; + int i; + + if (hash_alg == ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC) { + static uint8_t qat_aes_xcbc_key_seed[ + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ] = { + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + }; + + uint8_t *in = NULL; + uint8_t *out = p_state_buf; + int x; + AES_KEY enc_key; + + in = rte_zmalloc("working mem for key", + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ, 16); + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + rte_memcpy(in, qat_aes_xcbc_key_seed, + ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + for (x = 0; x < HASH_XCBC_PRECOMP_KEY_NUM; x++) { + if (AES_set_encrypt_key(auth_key, auth_keylen << 3, + &enc_key) != 0) { + rte_free(in - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ)); + memset(out - + (x * ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ), + 0, ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ); + return -EFAULT; + } + AES_encrypt(in, out, &enc_key); + in += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + out += ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ; + } + *p_state_len = ICP_QAT_HW_AES_XCBC_MAC_STATE2_SZ; + rte_free(in - x*ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ); + return 0; + } else if ((hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_128) || + (hash_alg == ICP_QAT_HW_AUTH_ALGO_GALOIS_64)) { + uint8_t *in = NULL; + uint8_t *out = p_state_buf; + AES_KEY enc_key; + + memset(p_state_buf, 0, ICP_QAT_HW_GALOIS_H_SZ + + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ); + in = rte_zmalloc("working mem for key", + ICP_QAT_HW_GALOIS_H_SZ, 16); + if (in == NULL) { + QAT_LOG(ERR, "Failed to alloc memory"); + return -ENOMEM; + } + + memset(in, 0, ICP_QAT_HW_GALOIS_H_SZ); + if (AES_set_encrypt_key(auth_key, auth_keylen << 3, + &enc_key) != 0) { + return -EFAULT; + } + AES_encrypt(in, out, &enc_key); + *p_state_len = ICP_QAT_HW_GALOIS_H_SZ + + ICP_QAT_HW_GALOIS_LEN_A_SZ + + ICP_QAT_HW_GALOIS_E_CTR0_SZ; + rte_free(in); + return 0; + } + + block_size = qat_hash_get_block_size(hash_alg); + if (block_size <= 0) + return -EFAULT; + /* init ipad and opad from key and xor with fixed values */ + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + + if (auth_keylen > (unsigned int)block_size) { + QAT_LOG(ERR, "invalid keylen %u", auth_keylen); + return -EFAULT; + } + rte_memcpy(ipad, auth_key, auth_keylen); + rte_memcpy(opad, auth_key, auth_keylen); + + for (i = 0; i < block_size; i++) { + uint8_t *ipad_ptr = ipad + i; + uint8_t *opad_ptr = opad + i; + *ipad_ptr ^= HMAC_IPAD_VALUE; + *opad_ptr ^= HMAC_OPAD_VALUE; + } + + /* do partial hash of ipad and copy to state1 */ + if (partial_hash_compute(hash_alg, ipad, p_state_buf)) { + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + QAT_LOG(ERR, "ipad precompute failed"); + return -EFAULT; + } + + /* + * State len is a multiple of 8, so may be larger than the digest. + * Put the partial hash of opad state_len bytes after state1 + */ + *p_state_len = qat_hash_get_state1_size(hash_alg); + if (partial_hash_compute(hash_alg, opad, p_state_buf + *p_state_len)) { + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + QAT_LOG(ERR, "opad precompute failed"); + return -EFAULT; + } + + /* don't leave data lying around */ + memset(ipad, 0, block_size); + memset(opad, 0, block_size); + return 0; +} + +static void +qat_sym_session_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, + enum qat_sym_proto_flag proto_flags) +{ + header->hdr_flags = + ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET); + header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA; + header->comn_req_flags = + ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR, + QAT_COMN_PTR_TYPE_FLAT); + ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_PARTIAL_NONE); + ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_CIPH_IV_16BYTE_DATA); + + switch (proto_flags) { + case QAT_CRYPTO_PROTO_FLAG_NONE: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_CCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_GCM: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_GCM_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_SNOW3G: + ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_SNOW_3G_PROTO); + break; + case QAT_CRYPTO_PROTO_FLAG_ZUC: + ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_ZUC_3G_PROTO); + break; + } + + ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_UPDATE_STATE); + ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER); +} + +/* + * Snow3G and ZUC should never use this function + * and set its protocol flag in both cipher and auth part of content + * descriptor building function + */ +static enum qat_sym_proto_flag +qat_get_crypto_proto_flag(uint16_t flags) +{ + int proto = ICP_QAT_FW_LA_PROTO_GET(flags); + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + switch (proto) { + case ICP_QAT_FW_LA_GCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + break; + case ICP_QAT_FW_LA_CCM_PROTO: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + break; + } + + return qat_proto_flag; +} + +int qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cdesc, + uint8_t *cipherkey, + uint32_t cipherkeylen) +{ + struct icp_qat_hw_cipher_algo_blk *cipher; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + enum icp_qat_hw_cipher_convert key_convert; + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + uint32_t total_key_size; + uint16_t cipher_offset, cd_size; + uint32_t wordIndex = 0; + uint32_t *temp_key = NULL; + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER) { + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + QAT_LOG(ERR, "Invalid param, must be a cipher command."); + return -EFAULT; + } + + if (cdesc->qat_mode == ICP_QAT_HW_CIPHER_CTR_MODE) { + /* + * CTR Streaming ciphers are a special case. Decrypt = encrypt + * Overriding default values previously set + */ + cdesc->qat_dir = ICP_QAT_HW_CIPHER_ENCRYPT; + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 + || cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + else if (cdesc->qat_dir == ICP_QAT_HW_CIPHER_ENCRYPT) + key_convert = ICP_QAT_HW_CIPHER_NO_CONVERT; + else + key_convert = ICP_QAT_HW_CIPHER_KEY_CONVERT; + + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2) { + total_key_size = ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ + + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + total_key_size = ICP_QAT_HW_KASUMI_F8_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_KASUMI_BLK_SZ >> 3; + cipher_cd_ctrl->cipher_padding_sz = + (2 * ICP_QAT_HW_KASUMI_BLK_SZ) >> 3; + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) { + total_key_size = ICP_QAT_HW_3DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_3DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_DES) { + total_key_size = ICP_QAT_HW_DES_KEY_SZ; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_DES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } else if (cdesc->qat_cipher_alg == + ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3) { + total_key_size = ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + cipher_cd_ctrl->cipher_state_sz = + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + cdesc->min_qat_dev_gen = QAT_GEN2; + } else { + total_key_size = cipherkeylen; + cipher_cd_ctrl->cipher_state_sz = ICP_QAT_HW_AES_BLK_SZ >> 3; + qat_proto_flag = + qat_get_crypto_proto_flag(header->serv_specif_flags); + } + cipher_cd_ctrl->cipher_key_sz = total_key_size >> 3; + cipher_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); + cipher_cd_ctrl->cipher_cfg_offset = cipher_offset >> 3; + + header->service_cmd_id = cdesc->qat_cmd; + qat_sym_session_init_common_hdr(header, qat_proto_flag); + + cipher = (struct icp_qat_hw_cipher_algo_blk *)cdesc->cd_cur_ptr; + cipher->cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(cdesc->qat_mode, + cdesc->qat_cipher_alg, key_convert, + cdesc->qat_dir); + + if (cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_KASUMI) { + temp_key = (uint32_t *)(cdesc->cd_cur_ptr + + sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen); + memcpy(cipher->key, cipherkey, cipherkeylen); + memcpy(temp_key, cipherkey, cipherkeylen); + + /* XOR Key with KASUMI F8 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (cipherkeylen >> 2); + wordIndex++) + temp_key[wordIndex] ^= KASUMI_F8_KEY_MODIFIER_4_BYTES; + + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen + cipherkeylen; + } else { + memcpy(cipher->key, cipherkey, cipherkeylen); + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + cipherkeylen; + } + + if (total_key_size > cipherkeylen) { + uint32_t padding_size = total_key_size-cipherkeylen; + if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) + && (cipherkeylen == QAT_3DES_KEY_SZ_OPT2)) { + /* K3 not provided so use K1 = K3*/ + memcpy(cdesc->cd_cur_ptr, cipherkey, padding_size); + } else if ((cdesc->qat_cipher_alg == ICP_QAT_HW_CIPHER_ALGO_3DES) + && (cipherkeylen == QAT_3DES_KEY_SZ_OPT3)) { + /* K2 and K3 not provided so use K1 = K2 = K3*/ + memcpy(cdesc->cd_cur_ptr, cipherkey, + cipherkeylen); + memcpy(cdesc->cd_cur_ptr+cipherkeylen, + cipherkey, cipherkeylen); + } else + memset(cdesc->cd_cur_ptr, 0, padding_size); + + cdesc->cd_cur_ptr += padding_size; + } + cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; + cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; + + return 0; +} + +int qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t aad_length, + uint32_t digestsize, + unsigned int operation) +{ + struct icp_qat_hw_auth_setup *hash; + struct icp_qat_hw_cipher_algo_blk *cipherconfig; + struct icp_qat_fw_la_bulk_req *req_tmpl = &cdesc->fw_req; + struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars; + struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr; + void *ptr = &req_tmpl->cd_ctrl; + struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr; + struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr; + struct icp_qat_fw_la_auth_req_params *auth_param = + (struct icp_qat_fw_la_auth_req_params *) + ((char *)&req_tmpl->serv_specif_rqpars + + sizeof(struct icp_qat_fw_la_cipher_req_params)); + uint16_t state1_size = 0, state2_size = 0; + uint16_t hash_offset, cd_size; + uint32_t *aad_len = NULL; + uint32_t wordIndex = 0; + uint32_t *pTempKey; + enum qat_sym_proto_flag qat_proto_flag = + QAT_CRYPTO_PROTO_FLAG_NONE; + + if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_AUTH) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd == ICP_QAT_FW_LA_CMD_HASH_CIPHER) { + ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_AUTH); + ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_CIPHER); + ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, + ICP_QAT_FW_SLICE_DRAM_WR); + cdesc->cd_cur_ptr = (uint8_t *)&cdesc->cd; + } else if (cdesc->qat_cmd != ICP_QAT_FW_LA_CMD_CIPHER_HASH) { + QAT_LOG(ERR, "Invalid param, must be a hash command."); + return -EFAULT; + } + + if (operation == RTE_CRYPTO_AUTH_OP_VERIFY) { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_CMP_AUTH_RES); + cdesc->auth_op = ICP_QAT_HW_AUTH_VERIFY; + } else { + ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_RET_AUTH_RES); + ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags, + ICP_QAT_FW_LA_NO_CMP_AUTH_RES); + cdesc->auth_op = ICP_QAT_HW_AUTH_GENERATE; + } + + /* + * Setup the inner hash config + */ + hash_offset = cdesc->cd_cur_ptr-((uint8_t *)&cdesc->cd); + hash = (struct icp_qat_hw_auth_setup *)cdesc->cd_cur_ptr; + hash->auth_config.reserved = 0; + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1, + cdesc->qat_hash_alg, digestsize); + + if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 + || cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3) + hash->auth_counter.counter = 0; + else + hash->auth_counter.counter = rte_bswap32( + qat_hash_get_block_size(cdesc->qat_hash_alg)); + + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_auth_setup); + + /* + * cd_cur_ptr now points at the state1 information. + */ + switch (cdesc->qat_hash_alg) { + case ICP_QAT_HW_AUTH_ALGO_SHA1: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA1, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = RTE_ALIGN_CEIL(ICP_QAT_HW_SHA1_STATE2_SZ, 8); + break; + case ICP_QAT_HW_AUTH_ALGO_SHA224: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA224, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA224_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA256: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA256, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA256_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA384: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA384, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA384_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_SHA512: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_SHA512, + authkey, authkeylen, cdesc->cd_cur_ptr, &state1_size)) { + QAT_LOG(ERR, "(SHA)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_SHA512_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC: + state1_size = ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ; + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC, + authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, + &state2_size)) { + QAT_LOG(ERR, "(XCBC)precompute failed"); + return -EFAULT; + } + break; + case ICP_QAT_HW_AUTH_ALGO_GALOIS_128: + case ICP_QAT_HW_AUTH_ALGO_GALOIS_64: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_GCM; + state1_size = ICP_QAT_HW_GALOIS_128_STATE1_SZ; + if (qat_sym_do_precomputes(cdesc->qat_hash_alg, + authkey, authkeylen, cdesc->cd_cur_ptr + state1_size, + &state2_size)) { + QAT_LOG(ERR, "(GCM)precompute failed"); + return -EFAULT; + } + /* + * Write (the length of AAD) into bytes 16-19 of state2 + * in big-endian format. This field is 8 bytes + */ + auth_param->u2.aad_sz = + RTE_ALIGN_CEIL(aad_length, 16); + auth_param->hash_state_sz = (auth_param->u2.aad_sz) >> 3; + + aad_len = (uint32_t *)(cdesc->cd_cur_ptr + + ICP_QAT_HW_GALOIS_128_STATE1_SZ + + ICP_QAT_HW_GALOIS_H_SZ); + *aad_len = rte_bswap32(aad_length); + cdesc->aad_len = aad_length; + break; + case ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_SNOW3G; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2); + state2_size = ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); + + cipherconfig = (struct icp_qat_hw_cipher_algo_blk *) + (cdesc->cd_cur_ptr + state1_size + state2_size); + cipherconfig->cipher_config.val = + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_ECB_MODE, + ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2, + ICP_QAT_HW_CIPHER_KEY_CONVERT, + ICP_QAT_HW_CIPHER_ENCRYPT); + memcpy(cipherconfig->key, authkey, authkeylen); + memset(cipherconfig->key + authkeylen, + 0, ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ); + cdesc->cd_cur_ptr += sizeof(struct icp_qat_hw_cipher_config) + + authkeylen + ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ; + auth_param->hash_state_sz = ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ >> 3; + break; + case ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3: + hash->auth_config.config = + ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE0, + cdesc->qat_hash_alg, digestsize); + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_ZUC; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3); + state2_size = ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ); + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + cdesc->cd_cur_ptr += state1_size + state2_size + + ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ; + auth_param->hash_state_sz = ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ >> 3; + cdesc->min_qat_dev_gen = QAT_GEN2; + + break; + case ICP_QAT_HW_AUTH_ALGO_MD5: + if (qat_sym_do_precomputes(ICP_QAT_HW_AUTH_ALGO_MD5, + authkey, authkeylen, cdesc->cd_cur_ptr, + &state1_size)) { + QAT_LOG(ERR, "(MD5)precompute failed"); + return -EFAULT; + } + state2_size = ICP_QAT_HW_MD5_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_NULL: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_NULL); + state2_size = ICP_QAT_HW_NULL_STATE2_SZ; + break; + case ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC: + qat_proto_flag = QAT_CRYPTO_PROTO_FLAG_CCM; + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC); + state2_size = ICP_QAT_HW_AES_CBC_MAC_KEY_SZ + + ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ; + + if (aad_length > 0) { + aad_length += ICP_QAT_HW_CCM_AAD_B0_LEN + + ICP_QAT_HW_CCM_AAD_LEN_INFO; + auth_param->u2.aad_sz = + RTE_ALIGN_CEIL(aad_length, + ICP_QAT_HW_CCM_AAD_ALIGNMENT); + } else { + auth_param->u2.aad_sz = ICP_QAT_HW_CCM_AAD_B0_LEN; + } + cdesc->aad_len = aad_length; + hash->auth_counter.counter = 0; + + hash_cd_ctrl->outer_prefix_sz = digestsize; + auth_param->hash_state_sz = digestsize; + + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + break; + case ICP_QAT_HW_AUTH_ALGO_KASUMI_F9: + state1_size = qat_hash_get_state1_size( + ICP_QAT_HW_AUTH_ALGO_KASUMI_F9); + state2_size = ICP_QAT_HW_KASUMI_F9_STATE2_SZ; + memset(cdesc->cd_cur_ptr, 0, state1_size + state2_size); + pTempKey = (uint32_t *)(cdesc->cd_cur_ptr + state1_size + + authkeylen); + /* + * The Inner Hash Initial State2 block must contain IK + * (Initialisation Key), followed by IK XOR-ed with KM + * (Key Modifier): IK||(IK^KM). + */ + /* write the auth key */ + memcpy(cdesc->cd_cur_ptr + state1_size, authkey, authkeylen); + /* initialise temp key with auth key */ + memcpy(pTempKey, authkey, authkeylen); + /* XOR Key with KASUMI F9 key modifier at 4 bytes level */ + for (wordIndex = 0; wordIndex < (authkeylen >> 2); wordIndex++) + pTempKey[wordIndex] ^= KASUMI_F9_KEY_MODIFIER_4_BYTES; + break; + default: + QAT_LOG(ERR, "Invalid HASH alg %u", cdesc->qat_hash_alg); + return -EFAULT; + } + + /* Request template setup */ + qat_sym_session_init_common_hdr(header, qat_proto_flag); + header->service_cmd_id = cdesc->qat_cmd; + + /* Auth CD config setup */ + hash_cd_ctrl->hash_cfg_offset = hash_offset >> 3; + hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED; + hash_cd_ctrl->inner_res_sz = digestsize; + hash_cd_ctrl->final_sz = digestsize; + hash_cd_ctrl->inner_state1_sz = state1_size; + auth_param->auth_res_sz = digestsize; + + hash_cd_ctrl->inner_state2_sz = state2_size; + hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset + + ((sizeof(struct icp_qat_hw_auth_setup) + + RTE_ALIGN_CEIL(hash_cd_ctrl->inner_state1_sz, 8)) + >> 3); + + cdesc->cd_cur_ptr += state1_size + state2_size; + cd_size = cdesc->cd_cur_ptr-(uint8_t *)&cdesc->cd; + + cd_pars->u.s.content_desc_addr = cdesc->cd_paddr; + cd_pars->u.s.content_desc_params_sz = RTE_ALIGN_CEIL(cd_size, 8) >> 3; + + return 0; +} + +int qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + case ICP_QAT_HW_AES_192_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES192; + break; + case ICP_QAT_HW_AES_256_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES256; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_AES_128_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_AES128; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_KASUMI_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_KASUMI; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_DES_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_DES; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case QAT_3DES_KEY_SZ_OPT1: + case QAT_3DES_KEY_SZ_OPT2: + case QAT_3DES_KEY_SZ_OPT3: + *alg = ICP_QAT_HW_CIPHER_ALGO_3DES; + break; + default: + return -EINVAL; + } + return 0; +} + +int qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg) +{ + switch (key_len) { + case ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ: + *alg = ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3; + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/crypto/qat/qat_sym_session.h b/drivers/crypto/qat/qat_sym_session.h new file mode 100644 index 00000000..e8f51e5b --- /dev/null +++ b/drivers/crypto/qat/qat_sym_session.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2015-2018 Intel Corporation + */ +#ifndef _QAT_SYM_SESSION_H_ +#define _QAT_SYM_SESSION_H_ + +#include +#include + +#include "qat_common.h" +#include "icp_qat_hw.h" +#include "icp_qat_fw.h" +#include "icp_qat_fw_la.h" + +/* + * Key Modifier (KM) value used in KASUMI algorithm in F9 mode to XOR + * Integrity Key (IK) + */ +#define KASUMI_F9_KEY_MODIFIER_4_BYTES 0xAAAAAAAA + +#define KASUMI_F8_KEY_MODIFIER_4_BYTES 0x55555555 + +/* 3DES key sizes */ +#define QAT_3DES_KEY_SZ_OPT1 24 /* Keys are independent */ +#define QAT_3DES_KEY_SZ_OPT2 16 /* K3=K1 */ +#define QAT_3DES_KEY_SZ_OPT3 8 /* K1=K2=K3 */ + + +#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_NO_CONVERT, \ + ICP_QAT_HW_CIPHER_ENCRYPT) + +#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \ + ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \ + ICP_QAT_HW_CIPHER_KEY_CONVERT, \ + ICP_QAT_HW_CIPHER_DECRYPT) + +enum qat_sym_proto_flag { + QAT_CRYPTO_PROTO_FLAG_NONE = 0, + QAT_CRYPTO_PROTO_FLAG_CCM = 1, + QAT_CRYPTO_PROTO_FLAG_GCM = 2, + QAT_CRYPTO_PROTO_FLAG_SNOW3G = 3, + QAT_CRYPTO_PROTO_FLAG_ZUC = 4 +}; + +/* Common content descriptor */ +struct qat_sym_cd { + struct icp_qat_hw_cipher_algo_blk cipher; + struct icp_qat_hw_auth_algo_blk hash; +} __rte_packed __rte_cache_aligned; + +struct qat_sym_session { + enum icp_qat_fw_la_cmd_id qat_cmd; + enum icp_qat_hw_cipher_algo qat_cipher_alg; + enum icp_qat_hw_cipher_dir qat_dir; + enum icp_qat_hw_cipher_mode qat_mode; + enum icp_qat_hw_auth_algo qat_hash_alg; + enum icp_qat_hw_auth_op auth_op; + void *bpi_ctx; + struct qat_sym_cd cd; + uint8_t *cd_cur_ptr; + phys_addr_t cd_paddr; + struct icp_qat_fw_la_bulk_req fw_req; + uint8_t aad_len; + struct qat_crypto_instance *inst; + struct { + uint16_t offset; + uint16_t length; + } cipher_iv; + struct { + uint16_t offset; + uint16_t length; + } auth_iv; + uint16_t digest_length; + rte_spinlock_t lock; /* protects this struct */ + enum qat_device_gen min_qat_dev_gen; +}; + +int +qat_sym_session_configure(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool); + +int +qat_sym_session_set_parameters(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, void *session_private); + +int +qat_sym_session_configure_aead(struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_configure_cipher(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_configure_auth(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct qat_sym_session *session); + +int +qat_sym_session_aead_create_cd_cipher(struct qat_sym_session *cd, + uint8_t *enckey, + uint32_t enckeylen); + +int +qat_sym_session_aead_create_cd_auth(struct qat_sym_session *cdesc, + uint8_t *authkey, + uint32_t authkeylen, + uint32_t aad_length, + uint32_t digestsize, + unsigned int operation); + +void +qat_sym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *session); + +unsigned int +qat_sym_session_get_private_size(struct rte_cryptodev *dev); + +void +qat_sym_sesssion_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header, + enum qat_sym_proto_flag proto_flags); +int +qat_sym_validate_aes_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_aes_docsisbpi_key(int key_len, + enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_snow3g_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_kasumi_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_3des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_sym_validate_des_key(int key_len, enum icp_qat_hw_cipher_algo *alg); +int +qat_cipher_get_block_size(enum icp_qat_hw_cipher_algo qat_cipher_alg); +int +qat_sym_validate_zuc_key(int key_len, enum icp_qat_hw_cipher_algo *alg); + +#endif /* _QAT_SYM_SESSION_H_ */ diff --git a/drivers/crypto/qat/rte_pmd_qat_version.map b/drivers/crypto/qat/rte_pmd_qat_version.map deleted file mode 100644 index bbaf1c85..00000000 --- a/drivers/crypto/qat/rte_pmd_qat_version.map +++ /dev/null @@ -1,3 +0,0 @@ -DPDK_2.2 { - local: *; -}; \ No newline at end of file diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c deleted file mode 100644 index bf837401..00000000 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ /dev/null @@ -1,180 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2015-2017 Intel Corporation - */ - -#include -#include -#include -#include -#include -#include - -#include "qat_crypto.h" -#include "qat_logs.h" - -uint8_t cryptodev_qat_driver_id; - -static const struct rte_cryptodev_capabilities qat_gen1_capabilities[] = { - QAT_BASE_GEN1_SYM_CAPABILITIES, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - -static const struct rte_cryptodev_capabilities qat_gen2_capabilities[] = { - QAT_BASE_GEN1_SYM_CAPABILITIES, - QAT_EXTRA_GEN2_SYM_CAPABILITIES, - RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() -}; - -static struct rte_cryptodev_ops crypto_qat_ops = { - - /* Device related operations */ - .dev_configure = qat_dev_config, - .dev_start = qat_dev_start, - .dev_stop = qat_dev_stop, - .dev_close = qat_dev_close, - .dev_infos_get = qat_dev_info_get, - - .stats_get = qat_crypto_sym_stats_get, - .stats_reset = qat_crypto_sym_stats_reset, - .queue_pair_setup = qat_crypto_sym_qp_setup, - .queue_pair_release = qat_crypto_sym_qp_release, - .queue_pair_start = NULL, - .queue_pair_stop = NULL, - .queue_pair_count = NULL, - - /* Crypto related operations */ - .session_get_size = qat_crypto_sym_get_session_private_size, - .session_configure = qat_crypto_sym_configure_session, - .session_clear = qat_crypto_sym_clear_session -}; - -/* - * The set of PCI devices this driver supports - */ - -static const struct rte_pci_id pci_id_qat_map[] = { - { - RTE_PCI_DEVICE(0x8086, 0x0443), - }, - { - RTE_PCI_DEVICE(0x8086, 0x37c9), - }, - { - RTE_PCI_DEVICE(0x8086, 0x19e3), - }, - { - RTE_PCI_DEVICE(0x8086, 0x6f55), - }, - {.device_id = 0}, -}; - -static int -crypto_qat_create(const char *name, struct rte_pci_device *pci_dev, - struct rte_cryptodev_pmd_init_params *init_params) -{ - struct rte_cryptodev *cryptodev; - struct qat_pmd_private *internals; - - PMD_INIT_FUNC_TRACE(); - - cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, - init_params); - if (cryptodev == NULL) - return -ENODEV; - - cryptodev->driver_id = cryptodev_qat_driver_id; - cryptodev->dev_ops = &crypto_qat_ops; - - cryptodev->enqueue_burst = qat_pmd_enqueue_op_burst; - cryptodev->dequeue_burst = qat_pmd_dequeue_op_burst; - - cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_HW_ACCELERATED | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | - RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER; - - internals = cryptodev->data->dev_private; - internals->max_nb_sessions = init_params->max_nb_sessions; - switch (pci_dev->id.device_id) { - case 0x0443: - internals->qat_dev_gen = QAT_GEN1; - internals->qat_dev_capabilities = qat_gen1_capabilities; - break; - case 0x37c9: - case 0x19e3: - case 0x6f55: - internals->qat_dev_gen = QAT_GEN2; - internals->qat_dev_capabilities = qat_gen2_capabilities; - break; - default: - PMD_DRV_LOG(ERR, - "Invalid dev_id, can't determine capabilities"); - break; - } - - /* - * For secondary processes, we don't initialise any further as primary - * has already done this work. Only check we don't need a different - * RX function - */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) { - PMD_DRV_LOG(DEBUG, "Device already initialised by primary process"); - return 0; - } - - return 0; -} - -static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, - struct rte_pci_device *pci_dev) -{ - struct rte_cryptodev_pmd_init_params init_params = { - .name = "", - .socket_id = rte_socket_id(), - .private_data_size = sizeof(struct qat_pmd_private), - .max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS - }; - char name[RTE_CRYPTODEV_NAME_MAX_LEN]; - - PMD_DRV_LOG(DEBUG, "Found QAT device at %02x:%02x.%x", - pci_dev->addr.bus, - pci_dev->addr.devid, - pci_dev->addr.function); - - rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); - - return crypto_qat_create(name, pci_dev, &init_params); -} - -static int crypto_qat_pci_remove(struct rte_pci_device *pci_dev) -{ - struct rte_cryptodev *cryptodev; - char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; - - if (pci_dev == NULL) - return -EINVAL; - - rte_pci_device_name(&pci_dev->addr, cryptodev_name, - sizeof(cryptodev_name)); - - cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); - if (cryptodev == NULL) - return -ENODEV; - - /* free crypto device */ - return rte_cryptodev_pmd_destroy(cryptodev); -} - -static struct rte_pci_driver rte_qat_pmd = { - .id_table = pci_id_qat_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - .probe = crypto_qat_pci_probe, - .remove = crypto_qat_pci_remove -}; - -static struct cryptodev_driver qat_crypto_drv; - -RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd); -RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map); -RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd, - cryptodev_qat_driver_id); diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 140c8b41..6e4919c4 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -9,6 +9,8 @@ #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" +int scheduler_logtype_driver; + /** update the scheduler pmd's capability with attaching device's * capability. * For each device to be attached, the scheduler's capability should be @@ -91,8 +93,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx) struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; uint32_t nb_caps = 0, i; - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } for (i = 0; i < sched_ctx->nb_slaves; i++) { struct rte_cryptodev_info dev_info; @@ -166,30 +170,30 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) uint32_t i; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; if (sched_ctx->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { - CS_LOG_ERR("Too many slaves attached"); + CR_SCHED_LOG(ERR, "Too many slaves attached"); return -ENOMEM; } for (i = 0; i < sched_ctx->nb_slaves; i++) if (sched_ctx->slaves[i].dev_id == slave_id) { - CS_LOG_ERR("Slave already added"); + CR_SCHED_LOG(ERR, "Slave already added"); return -ENOTSUP; } @@ -206,7 +210,7 @@ rte_cryptodev_scheduler_slave_attach(uint8_t scheduler_id, uint8_t slave_id) slave->driver_id = 0; sched_ctx->nb_slaves--; - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -225,17 +229,17 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) uint32_t i, slave_pos; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -245,12 +249,12 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) if (sched_ctx->slaves[slave_pos].dev_id == slave_id) break; if (slave_pos == sched_ctx->nb_slaves) { - CS_LOG_ERR("Cannot find slave"); + CR_SCHED_LOG(ERR, "Cannot find slave"); return -ENOTSUP; } if (sched_ctx->ops.slave_detach(dev, slave_id) < 0) { - CS_LOG_ERR("Failed to detach slave"); + CR_SCHED_LOG(ERR, "Failed to detach slave"); return -ENOTSUP; } @@ -263,7 +267,7 @@ rte_cryptodev_scheduler_slave_detach(uint8_t scheduler_id, uint8_t slave_id) sched_ctx->nb_slaves--; if (update_scheduler_capability(sched_ctx) < 0) { - CS_LOG_ERR("capabilities update failed"); + CR_SCHED_LOG(ERR, "capabilities update failed"); return -ENOTSUP; } @@ -282,17 +286,17 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -305,33 +309,33 @@ rte_cryptodev_scheduler_mode_set(uint8_t scheduler_id, case CDEV_SCHED_MODE_ROUNDROBIN: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, roundrobin_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_PKT_SIZE_DISTR: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, pkt_size_based_distr_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_FAILOVER: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, failover_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; case CDEV_SCHED_MODE_MULTICORE: if (rte_cryptodev_scheduler_load_user_scheduler(scheduler_id, multicore_scheduler) < 0) { - CS_LOG_ERR("Failed to load scheduler"); + CR_SCHED_LOG(ERR, "Failed to load scheduler"); return -1; } break; default: - CS_LOG_ERR("Not yet supported"); + CR_SCHED_LOG(ERR, "Not yet supported"); return -ENOTSUP; } @@ -345,12 +349,12 @@ rte_cryptodev_scheduler_mode_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -367,17 +371,17 @@ rte_cryptodev_scheduler_ordering_set(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -395,12 +399,12 @@ rte_cryptodev_scheduler_ordering_get(uint8_t scheduler_id) struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -417,25 +421,25 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } sched_ctx = dev->data->dev_private; if (strlen(scheduler->name) > RTE_CRYPTODEV_NAME_MAX_LEN - 1) { - CS_LOG_ERR("Invalid name %s, should be less than " - "%u bytes.\n", scheduler->name, + CR_SCHED_LOG(ERR, "Invalid name %s, should be less than " + "%u bytes.", scheduler->name, RTE_CRYPTODEV_NAME_MAX_LEN); return -EINVAL; } @@ -444,8 +448,8 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, if (strlen(scheduler->description) > RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1) { - CS_LOG_ERR("Invalid description %s, should be less than " - "%u bytes.\n", scheduler->description, + CR_SCHED_LOG(ERR, "Invalid description %s, should be less than " + "%u bytes.", scheduler->description, RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN - 1); return -EINVAL; } @@ -462,14 +466,16 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, sched_ctx->ops.option_set = scheduler->ops->option_set; sched_ctx->ops.option_get = scheduler->ops->option_get; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } if (sched_ctx->ops.create_private_ctx) { int ret = (*sched_ctx->ops.create_private_ctx)(dev); if (ret < 0) { - CS_LOG_ERR("Unable to create scheduler private " + CR_SCHED_LOG(ERR, "Unable to create scheduler private " "context"); return ret; } @@ -488,12 +494,12 @@ rte_cryptodev_scheduler_slaves_get(uint8_t scheduler_id, uint8_t *slaves) uint32_t nb_slaves = 0; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -521,17 +527,17 @@ rte_cryptodev_scheduler_option_set(uint8_t scheduler_id, if (option_type == CDEV_SCHED_OPTION_NOT_SET || option_type >= CDEV_SCHED_OPTION_COUNT) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (!option) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (dev->data->dev_started) { - CS_LOG_ERR("Illegal operation"); + CR_SCHED_LOG(ERR, "Illegal operation"); return -EBUSY; } @@ -551,17 +557,17 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, struct scheduler_ctx *sched_ctx; if (!dev) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } if (!option) { - CS_LOG_ERR("Invalid option parameter"); + CR_SCHED_LOG(ERR, "Invalid option parameter"); return -EINVAL; } if (dev->driver_id != cryptodev_driver_id) { - CS_LOG_ERR("Operation not supported"); + CR_SCHED_LOG(ERR, "Operation not supported"); return -ENOTSUP; } @@ -571,3 +577,8 @@ rte_cryptodev_scheduler_option_get(uint8_t scheduler_id, return (*sched_ctx->ops.option_get)(dev, option_type, option); } + +RTE_INIT(scheduler_init_log) +{ + scheduler_logtype_driver = rte_log_register("pmd.crypto.scheduler"); +} diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h index 01e7646c..3faea409 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h @@ -30,7 +30,7 @@ extern "C" { #endif /** Maximum number of multi-core worker cores */ -#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64) +#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1) /** Round-robin scheduling mode string */ #define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin @@ -76,6 +76,7 @@ enum rte_cryptodev_schedule_option_type { /** * Threshold option structure */ +#define RTE_CRYPTODEV_SCHEDULER_PARAM_THRES "threshold" struct rte_cryptodev_scheduler_threshold_option { uint32_t threshold; /**< Threshold for packet-size mode */ }; diff --git a/drivers/crypto/scheduler/scheduler_failover.c b/drivers/crypto/scheduler/scheduler_failover.c index 005b1638..ddfb5b81 100644 --- a/drivers/crypto/scheduler/scheduler_failover.c +++ b/drivers/crypto/scheduler/scheduler_failover.c @@ -139,7 +139,7 @@ scheduler_start(struct rte_cryptodev *dev) uint16_t i; if (sched_ctx->nb_slaves < 2) { - CS_LOG_ERR("Number of slaves shall no less than 2"); + CR_SCHED_LOG(ERR, "Number of slaves shall no less than 2"); return -ENOMEM; } @@ -182,7 +182,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) fo_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*fo_qp_ctx), 0, rte_socket_id()); if (!fo_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index b2ce44ce..d410e69d 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -21,8 +21,8 @@ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ uint32_t stop_signal; - struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; - struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_enq_ring[RTE_MAX_LCORE]; + struct rte_ring *sched_deq_ring[RTE_MAX_LCORE]; }; struct mc_scheduler_qp_ctx { @@ -178,7 +178,8 @@ mc_scheduler_worker(struct rte_cryptodev *dev) } } if (worker_idx == -1) { - CS_LOG_ERR("worker on core %u:cannot find worker index!\n", core_id); + CR_SCHED_LOG(ERR, "worker on core %u:cannot find worker index!", + core_id); return -1; } @@ -313,7 +314,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) mc_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*mc_qp_ctx), 0, rte_socket_id()); if (!mc_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -328,16 +329,18 @@ static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct mc_scheduler_ctx *mc_ctx; + struct mc_scheduler_ctx *mc_ctx = NULL; uint16_t i; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, rte_socket_id()); if (!mc_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -345,25 +348,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) for (i = 0; i < sched_ctx->nb_wc; i++) { char r_name[16]; - snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_enq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } - snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_deq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CR_SCHED_LOG(ERR, "Cannot create ring for worker %u", + i); + goto exit; + } } } sched_ctx->private_ctx = (void *)mc_ctx; return 0; + +exit: + for (i = 0; i < sched_ctx->nb_wc; i++) { + rte_ring_free(mc_ctx->sched_enq_ring[i]); + rte_ring_free(mc_ctx->sched_deq_ring[i]); + } + rte_free(mc_ctx); + + return -1; } struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 96bf0161..74129b66 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -258,7 +258,7 @@ scheduler_start(struct rte_cryptodev *dev) /* for packet size based scheduler, nb_slaves have to >= 2 */ if (sched_ctx->nb_slaves < NB_PKT_SIZE_SLAVES) { - CS_LOG_ERR("not enough slaves to start"); + CR_SCHED_LOG(ERR, "not enough slaves to start"); return -1; } @@ -302,7 +302,7 @@ scheduler_stop(struct rte_cryptodev *dev) if (ps_qp_ctx->primary_slave.nb_inflight_cops + ps_qp_ctx->secondary_slave.nb_inflight_cops) { - CS_LOG_ERR("Some crypto ops left in slave queue"); + CR_SCHED_LOG(ERR, "Some crypto ops left in slave queue"); return -1; } } @@ -319,7 +319,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) ps_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*ps_qp_ctx), 0, rte_socket_id()); if (!ps_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } @@ -334,13 +334,15 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0, rte_socket_id()); if (!psd_ctx) { - CS_LOG_ERR("failed allocate memory"); + CR_SCHED_LOG(ERR, "failed allocate memory"); return -ENOMEM; } @@ -360,14 +362,14 @@ scheduler_option_set(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } threshold = ((struct rte_cryptodev_scheduler_threshold_option *) option)->threshold; if (!rte_is_power_of_2(threshold)) { - CS_LOG_ERR("Threshold is not power of 2"); + CR_SCHED_LOG(ERR, "Threshold is not power of 2"); return -EINVAL; } @@ -386,7 +388,7 @@ scheduler_option_get(struct rte_cryptodev *dev, uint32_t option_type, if ((enum rte_cryptodev_schedule_option_type)option_type != CDEV_SCHED_OPTION_THRESHOLD) { - CS_LOG_ERR("Option not supported"); + CR_SCHED_LOG(ERR, "Option not supported"); return -EINVAL; } diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c index 51a85fa6..a9221a94 100644 --- a/drivers/crypto/scheduler/scheduler_pmd.c +++ b/drivers/crypto/scheduler/scheduler_pmd.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "rte_cryptodev_scheduler.h" #include "scheduler_pmd_private.h" @@ -19,8 +20,10 @@ struct scheduler_init_params { struct rte_cryptodev_pmd_init_params def_p; uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; + char mode_param_str[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; uint32_t enable_ordering; - uint64_t wcmask; + uint16_t wc_pool[RTE_MAX_LCORE]; + uint16_t nb_wc; char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES] [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; }; @@ -28,9 +31,9 @@ struct scheduler_init_params { #define RTE_CRYPTODEV_VDEV_NAME ("name") #define RTE_CRYPTODEV_VDEV_SLAVE ("slave") #define RTE_CRYPTODEV_VDEV_MODE ("mode") +#define RTE_CRYPTODEV_VDEV_MODE_PARAM ("mode_param") #define RTE_CRYPTODEV_VDEV_ORDERING ("ordering") #define RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG ("max_nb_queue_pairs") -#define RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG ("max_nb_sessions") #define RTE_CRYPTODEV_VDEV_SOCKET_ID ("socket_id") #define RTE_CRYPTODEV_VDEV_COREMASK ("coremask") #define RTE_CRYPTODEV_VDEV_CORELIST ("corelist") @@ -39,9 +42,9 @@ const char *scheduler_valid_params[] = { RTE_CRYPTODEV_VDEV_NAME, RTE_CRYPTODEV_VDEV_SLAVE, RTE_CRYPTODEV_VDEV_MODE, + RTE_CRYPTODEV_VDEV_MODE_PARAM, RTE_CRYPTODEV_VDEV_ORDERING, RTE_CRYPTODEV_VDEV_MAX_NB_QP_ARG, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, RTE_CRYPTODEV_VDEV_SOCKET_ID, RTE_CRYPTODEV_VDEV_COREMASK, RTE_CRYPTODEV_VDEV_CORELIST @@ -68,6 +71,8 @@ const struct scheduler_parse_map scheduler_ordering_map[] = { {"disable", 0} }; +#define CDEV_SCHED_MODE_PARAM_SEP_CHAR ':' + static int cryptodev_scheduler_create(const char *name, struct rte_vdev_device *vdev, @@ -81,15 +86,11 @@ cryptodev_scheduler_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, &init_params->def_p); if (dev == NULL) { - CS_LOG_ERR("driver %s: failed to create cryptodev vdev", + CR_SCHED_LOG(ERR, "driver %s: failed to create cryptodev vdev", name); return -EFAULT; } - if (init_params->wcmask != 0) - RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n", - init_params->wcmask); - dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_crypto_scheduler_pmd_ops; @@ -100,20 +101,26 @@ cryptodev_scheduler_create(const char *name, if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) { uint16_t i; - sched_ctx->nb_wc = 0; + sched_ctx->nb_wc = init_params->nb_wc; - for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) { - if (init_params->wcmask & (1ULL << i)) { - sched_ctx->wc_pool[sched_ctx->nb_wc++] = i; - RTE_LOG(INFO, PMD, - " Worker core[%u]=%u added\n", - sched_ctx->nb_wc-1, i); - } + for (i = 0; i < sched_ctx->nb_wc; i++) { + sched_ctx->wc_pool[i] = init_params->wc_pool[i]; + CR_SCHED_LOG(INFO, " Worker core[%u]=%u added", + i, sched_ctx->wc_pool[i]); } } if (init_params->mode > CDEV_SCHED_MODE_USERDEFINED && init_params->mode < CDEV_SCHED_MODE_COUNT) { + union { + struct rte_cryptodev_scheduler_threshold_option + threshold_option; + } option; + enum rte_cryptodev_schedule_option_type option_type; + char param_name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0}; + char param_val[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN] = {0}; + char *s, *end; + ret = rte_cryptodev_scheduler_mode_set(dev->data->dev_id, init_params->mode); if (ret < 0) { @@ -125,10 +132,52 @@ cryptodev_scheduler_create(const char *name, if (scheduler_mode_map[i].val != sched_ctx->mode) continue; - RTE_LOG(INFO, PMD, " Scheduling mode = %s\n", + CR_SCHED_LOG(INFO, " Scheduling mode = %s", scheduler_mode_map[i].name); break; } + + if (strlen(init_params->mode_param_str) > 0) { + s = strchr(init_params->mode_param_str, + CDEV_SCHED_MODE_PARAM_SEP_CHAR); + if (s == NULL) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + strlcpy(param_name, init_params->mode_param_str, + s - init_params->mode_param_str + 1); + s++; + strlcpy(param_val, s, + RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); + + switch (init_params->mode) { + case CDEV_SCHED_MODE_PKT_SIZE_DISTR: + if (strcmp(param_name, + RTE_CRYPTODEV_SCHEDULER_PARAM_THRES) + != 0) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + option_type = CDEV_SCHED_OPTION_THRESHOLD; + + option.threshold_option.threshold = + strtoul(param_val, &end, 0); + break; + default: + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + if (sched_ctx->ops.option_set(dev, option_type, + (void *)&option) < 0) { + CR_SCHED_LOG(ERR, "Invalid mode param"); + return -EINVAL; + } + + RTE_LOG(INFO, PMD, " Sched mode param (%s = %s)\n", + param_name, param_val); + } } sched_ctx->reordering_enabled = init_params->enable_ordering; @@ -138,7 +187,7 @@ cryptodev_scheduler_create(const char *name, sched_ctx->reordering_enabled) continue; - RTE_LOG(INFO, PMD, " Packet ordering = %s\n", + CR_SCHED_LOG(INFO, " Packet ordering = %s", scheduler_ordering_map[i].name); break; @@ -153,7 +202,7 @@ cryptodev_scheduler_create(const char *name, if (!sched_ctx->init_slave_names[ sched_ctx->nb_init_slaves]) { - CS_LOG_ERR("driver %s: Insufficient memory", + CR_SCHED_LOG(ERR, "driver %s: Insufficient memory", name); return -ENOMEM; } @@ -175,8 +224,8 @@ cryptodev_scheduler_create(const char *name, 0, SOCKET_ID_ANY); if (!sched_ctx->capabilities) { - RTE_LOG(ERR, PMD, "Not enough memory for capability " - "information\n"); + CR_SCHED_LOG(ERR, "Not enough memory for capability " + "information"); return -ENOMEM; } @@ -220,7 +269,7 @@ parse_integer_arg(const char *key __rte_unused, *i = atoi(value); if (*i < 0) { - CS_LOG_ERR("Argument has to be positive.\n"); + CR_SCHED_LOG(ERR, "Argument has to be positive."); return -EINVAL; } @@ -232,9 +281,47 @@ static int parse_coremask_arg(const char *key __rte_unused, const char *value, void *extra_args) { + int i, j, val; + uint16_t idx = 0; + char c; struct scheduler_init_params *params = extra_args; - params->wcmask = strtoull(value, NULL, 16); + params->nb_wc = 0; + + if (value == NULL) + return -1; + /* Remove all blank characters ahead and after . + * Remove 0x/0X if exists. + */ + while (isblank(*value)) + value++; + if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X'))) + value += 2; + i = strlen(value); + while ((i > 0) && isblank(value[i - 1])) + i--; + + if (i == 0) + return -1; + + for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { + c = value[i]; + if (isxdigit(c) == 0) { + /* invalid characters */ + return -1; + } + if (isdigit(c)) + val = c - '0'; + else if (isupper(c)) + val = c - 'A' + 10; + else + val = c - 'a' + 10; + + for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) { + if ((1 << j) & val) + params->wc_pool[params->nb_wc++] = idx; + } + } return 0; } @@ -246,7 +333,7 @@ parse_corelist_arg(const char *key __rte_unused, { struct scheduler_init_params *params = extra_args; - params->wcmask = 0ULL; + params->nb_wc = 0; const char *token = value; @@ -254,7 +341,11 @@ parse_corelist_arg(const char *key __rte_unused, char *rval; unsigned int core = strtoul(token, &rval, 10); - params->wcmask |= 1ULL << core; + if (core >= RTE_MAX_LCORE) { + CR_SCHED_LOG(ERR, "Invalid worker core %u, should be smaller " + "than %u.", core, RTE_MAX_LCORE); + } + params->wc_pool[params->nb_wc++] = (uint16_t)core; token = (const char *)rval; if (token[0] == '\0') break; @@ -272,8 +363,8 @@ parse_name_arg(const char *key __rte_unused, struct rte_cryptodev_pmd_init_params *params = extra_args; if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) { - CS_LOG_ERR("Invalid name %s, should be less than " - "%u bytes.\n", value, + CR_SCHED_LOG(ERR, "Invalid name %s, should be less than " + "%u bytes.", value, RTE_CRYPTODEV_NAME_MAX_LEN - 1); return -EINVAL; } @@ -291,7 +382,7 @@ parse_slave_arg(const char *key __rte_unused, struct scheduler_init_params *param = extra_args; if (param->nb_slaves >= RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES) { - CS_LOG_ERR("Too many slaves.\n"); + CR_SCHED_LOG(ERR, "Too many slaves."); return -ENOMEM; } @@ -312,18 +403,31 @@ parse_mode_arg(const char *key __rte_unused, if (strcmp(value, scheduler_mode_map[i].name) == 0) { param->mode = (enum rte_cryptodev_scheduler_mode) scheduler_mode_map[i].val; + break; } } if (i == RTE_DIM(scheduler_mode_map)) { - CS_LOG_ERR("Unrecognized input.\n"); + CR_SCHED_LOG(ERR, "Unrecognized input."); return -EINVAL; } return 0; } +static int +parse_mode_param_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + struct scheduler_init_params *param = extra_args; + + strlcpy(param->mode_param_str, value, + RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN); + + return 0; +} + static int parse_ordering_arg(const char *key __rte_unused, const char *value, void *extra_args) @@ -340,7 +444,7 @@ parse_ordering_arg(const char *key __rte_unused, } if (i == RTE_DIM(scheduler_ordering_map)) { - CS_LOG_ERR("Unrecognized input.\n"); + CR_SCHED_LOG(ERR, "Unrecognized input."); return -EINVAL; } @@ -370,13 +474,6 @@ scheduler_parse_init_params(struct scheduler_init_params *params, if (ret < 0) goto free_kvlist; - ret = rte_kvargs_process(kvlist, - RTE_CRYPTODEV_VDEV_MAX_NB_SESS_ARG, - &parse_integer_arg, - ¶ms->def_p.max_nb_sessions); - if (ret < 0) - goto free_kvlist; - ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_SOCKET_ID, &parse_integer_arg, ¶ms->def_p.socket_id); @@ -411,6 +508,11 @@ scheduler_parse_init_params(struct scheduler_init_params *params, if (ret < 0) goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_MODE_PARAM, + &parse_mode_param_arg, params); + if (ret < 0) + goto free_kvlist; + ret = rte_kvargs_process(kvlist, RTE_CRYPTODEV_VDEV_ORDERING, &parse_ordering_arg, params); if (ret < 0) @@ -430,8 +532,7 @@ cryptodev_scheduler_probe(struct rte_vdev_device *vdev) "", sizeof(struct scheduler_ctx), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }, .nb_slaves = 0, .mode = CDEV_SCHED_MODE_NOT_SET, @@ -464,9 +565,8 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SCHEDULER_PMD, cryptodev_scheduler_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id= " "slave="); RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv, - cryptodev_scheduler_pmd_drv, + cryptodev_scheduler_pmd_drv.driver, cryptodev_driver_id); diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c index 680c2afb..778071ca 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c @@ -27,7 +27,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev) int status; if (!slave_dev) { - CS_LOG_ERR("Failed to locate slave dev %s", + CR_SCHED_LOG(ERR, "Failed to locate slave dev %s", dev_name); return -EINVAL; } @@ -36,16 +36,17 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev) scheduler_id, slave_dev->data->dev_id); if (status < 0) { - CS_LOG_ERR("Failed to attach slave cryptodev %u", + CR_SCHED_LOG(ERR, "Failed to attach slave cryptodev %u", slave_dev->data->dev_id); return status; } - CS_LOG_INFO("Scheduler %s attached slave %s\n", + CR_SCHED_LOG(INFO, "Scheduler %s attached slave %s", dev->data->name, sched_ctx->init_slave_names[i]); rte_free(sched_ctx->init_slave_names[i]); + sched_ctx->init_slave_names[i] = NULL; sched_ctx->nb_init_slaves -= 1; } @@ -101,7 +102,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) if (snprintf(order_ring_name, RTE_CRYPTODEV_NAME_MAX_LEN, "%s_rb_%u_%u", RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), dev->data->dev_id, qp_id) < 0) { - CS_LOG_ERR("failed to create unique reorder buffer " + CR_SCHED_LOG(ERR, "failed to create unique reorder buffer" "name"); return -ENOMEM; } @@ -110,7 +111,7 @@ update_order_ring(struct rte_cryptodev *dev, uint16_t qp_id) buff_size, rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ); if (!qp_ctx->order_ring) { - CS_LOG_ERR("failed to create order ring"); + CR_SCHED_LOG(ERR, "failed to create order ring"); return -ENOMEM; } } else { @@ -144,18 +145,18 @@ scheduler_pmd_start(struct rte_cryptodev *dev) for (i = 0; i < dev->data->nb_queue_pairs; i++) { ret = update_order_ring(dev, i); if (ret < 0) { - CS_LOG_ERR("Failed to update reorder buffer"); + CR_SCHED_LOG(ERR, "Failed to update reorder buffer"); return ret; } } if (sched_ctx->mode == CDEV_SCHED_MODE_NOT_SET) { - CS_LOG_ERR("Scheduler mode is not set"); + CR_SCHED_LOG(ERR, "Scheduler mode is not set"); return -1; } if (!sched_ctx->nb_slaves) { - CS_LOG_ERR("No slave in the scheduler"); + CR_SCHED_LOG(ERR, "No slave in the scheduler"); return -1; } @@ -165,7 +166,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; if ((*sched_ctx->ops.slave_attach)(dev, slave_dev_id) < 0) { - CS_LOG_ERR("Failed to attach slave"); + CR_SCHED_LOG(ERR, "Failed to attach slave"); return -ENOTSUP; } } @@ -173,7 +174,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) RTE_FUNC_PTR_OR_ERR_RET(*sched_ctx->ops.scheduler_start, -ENOTSUP); if ((*sched_ctx->ops.scheduler_start)(dev) < 0) { - CS_LOG_ERR("Scheduler start failed"); + CR_SCHED_LOG(ERR, "Scheduler start failed"); return -1; } @@ -185,7 +186,7 @@ scheduler_pmd_start(struct rte_cryptodev *dev) ret = (*slave_dev->dev_ops->dev_start)(slave_dev); if (ret < 0) { - CS_LOG_ERR("Failed to start slave dev %u", + CR_SCHED_LOG(ERR, "Failed to start slave dev %u", slave_dev_id); return ret; } @@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev) } } - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } return 0; } @@ -316,8 +321,9 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *dev_info) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - uint32_t max_nb_sessions = sched_ctx->nb_slaves ? - UINT32_MAX : RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS; + uint32_t max_nb_sess = 0; + uint16_t headroom_sz = 0; + uint16_t tailroom_sz = 0; uint32_t i; if (!dev_info) @@ -333,17 +339,32 @@ scheduler_pmd_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info slave_info; rte_cryptodev_info_get(slave_dev_id, &slave_info); - max_nb_sessions = slave_info.sym.max_nb_sessions < - max_nb_sessions ? - slave_info.sym.max_nb_sessions : - max_nb_sessions; + uint32_t dev_max_sess = slave_info.sym.max_nb_sessions; + if (dev_max_sess != 0) { + if (max_nb_sess == 0 || dev_max_sess < max_nb_sess) + max_nb_sess = slave_info.sym.max_nb_sessions; + } + + /* Get the max headroom requirement among slave PMDs */ + headroom_sz = slave_info.min_mbuf_headroom_req > + headroom_sz ? + slave_info.min_mbuf_headroom_req : + headroom_sz; + + /* Get the max tailroom requirement among slave PMDs */ + tailroom_sz = slave_info.min_mbuf_tailroom_req > + tailroom_sz ? + slave_info.min_mbuf_tailroom_req : + tailroom_sz; } dev_info->driver_id = dev->driver_id; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = sched_ctx->capabilities; dev_info->max_nb_queue_pairs = sched_ctx->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = max_nb_sessions; + dev_info->min_mbuf_headroom_req = headroom_sz; + dev_info->min_mbuf_tailroom_req = tailroom_sz; + dev_info->sym.max_nb_sessions = max_nb_sess; } /** Release queue pair */ @@ -381,7 +402,7 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, if (snprintf(name, RTE_CRYPTODEV_NAME_MAX_LEN, "CRYTO_SCHE PMD %u QP %u", dev->data->dev_id, qp_id) < 0) { - CS_LOG_ERR("Failed to create unique queue pair name"); + CR_SCHED_LOG(ERR, "Failed to create unique queue pair name"); return -EFAULT; } @@ -419,14 +440,14 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, */ ret = scheduler_attach_init_slave(dev); if (ret < 0) { - CS_LOG_ERR("Failed to attach slave"); + CR_SCHED_LOG(ERR, "Failed to attach slave"); scheduler_pmd_qp_release(dev, qp_id); return ret; } if (*sched_ctx->ops.config_queue_pair) { if ((*sched_ctx->ops.config_queue_pair)(dev, qp_id) < 0) { - CS_LOG_ERR("Unable to configure queue pair"); + CR_SCHED_LOG(ERR, "Unable to configure queue pair"); return -1; } } @@ -434,22 +455,6 @@ scheduler_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, return 0; } -/** Start queue pair */ -static int -scheduler_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -scheduler_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t scheduler_pmd_qp_count(struct rte_cryptodev *dev) @@ -458,7 +463,7 @@ scheduler_pmd_qp_count(struct rte_cryptodev *dev) } static uint32_t -scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +scheduler_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; uint8_t i = 0; @@ -468,7 +473,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) for (i = 0; i < sched_ctx->nb_slaves; i++) { uint8_t slave_dev_id = sched_ctx->slaves[i].dev_id; struct rte_cryptodev *dev = &rte_cryptodevs[slave_dev_id]; - uint32_t priv_sess_size = (*dev->dev_ops->session_get_size)(dev); + uint32_t priv_sess_size = (*dev->dev_ops->sym_session_get_size)(dev); if (max_priv_sess_size < priv_sess_size) max_priv_sess_size = priv_sess_size; @@ -478,7 +483,7 @@ scheduler_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) } static int -scheduler_pmd_session_configure(struct rte_cryptodev *dev, +scheduler_pmd_sym_session_configure(struct rte_cryptodev *dev, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -493,7 +498,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev, ret = rte_cryptodev_sym_session_init(slave->dev_id, sess, xform, mempool); if (ret < 0) { - CS_LOG_ERR("unabled to config sym session"); + CR_SCHED_LOG(ERR, "unable to config sym session"); return ret; } } @@ -503,7 +508,7 @@ scheduler_pmd_session_configure(struct rte_cryptodev *dev, /** Clear the memory of session so it doesn't leave key material behind */ static void -scheduler_pmd_session_clear(struct rte_cryptodev *dev, +scheduler_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; @@ -530,13 +535,11 @@ struct rte_cryptodev_ops scheduler_pmd_ops = { .queue_pair_setup = scheduler_pmd_qp_setup, .queue_pair_release = scheduler_pmd_qp_release, - .queue_pair_start = scheduler_pmd_qp_start, - .queue_pair_stop = scheduler_pmd_qp_stop, .queue_pair_count = scheduler_pmd_qp_count, - .session_get_size = scheduler_pmd_session_get_size, - .session_configure = scheduler_pmd_session_configure, - .session_clear = scheduler_pmd_session_clear, + .sym_session_get_size = scheduler_pmd_sym_session_get_size, + .sym_session_configure = scheduler_pmd_sym_session_configure, + .sym_session_clear = scheduler_pmd_sym_session_clear, }; struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops = &scheduler_pmd_ops; diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index dd7ca5a4..d5e602a2 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -12,25 +12,11 @@ #define PER_SLAVE_BUFF_SIZE (256) -#define CS_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_CRYPTO_SCHEDULER_DEBUG -#define CS_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) - -#define CS_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD), \ - __func__, __LINE__, ## args) -#else -#define CS_LOG_INFO(fmt, args...) -#define CS_LOG_DBG(fmt, args...) -#endif +extern int scheduler_logtype_driver; + +#define CR_SCHED_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \ + "%s() line %u: "fmt "\n", __func__, __LINE__, ##args) struct scheduler_slave { uint8_t dev_id; @@ -60,7 +46,7 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; - uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + uint16_t wc_pool[RTE_MAX_LCORE]; uint16_t nb_wc; char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; diff --git a/drivers/crypto/scheduler/scheduler_roundrobin.c b/drivers/crypto/scheduler/scheduler_roundrobin.c index c6e03e21..c7082a64 100644 --- a/drivers/crypto/scheduler/scheduler_roundrobin.c +++ b/drivers/crypto/scheduler/scheduler_roundrobin.c @@ -175,7 +175,7 @@ scheduler_config_qp(struct rte_cryptodev *dev, uint16_t qp_id) rr_qp_ctx = rte_zmalloc_socket(NULL, sizeof(*rr_qp_ctx), 0, rte_socket_id()); if (!rr_qp_ctx) { - CS_LOG_ERR("failed allocate memory for private queue pair"); + CR_SCHED_LOG(ERR, "failed allocate memory for private queue pair"); return -ENOMEM; } diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index 27af69f2..a17536b7 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -79,7 +79,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess, break; case SNOW3G_OP_NOT_SUPPORTED: default: - SNOW3G_LOG_ERR("Unsupported operation chain order parameter"); + SNOW3G_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -89,7 +89,7 @@ snow3g_set_session_parameters(struct snow3g_session *sess, return -ENOTSUP; if (cipher_xform->cipher.iv.length != SNOW3G_IV_LENGTH) { - SNOW3G_LOG_ERR("Wrong IV length"); + SNOW3G_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; @@ -105,14 +105,14 @@ snow3g_set_session_parameters(struct snow3g_session *sess, return -ENOTSUP; if (auth_xform->auth.digest_length != SNOW3G_DIGEST_LENGTH) { - SNOW3G_LOG_ERR("Wrong digest length"); + SNOW3G_LOG(ERR, "Wrong digest length"); return -EINVAL; } sess->auth_op = auth_xform->auth.op; if (auth_xform->auth.iv.length != SNOW3G_IV_LENGTH) { - SNOW3G_LOG_ERR("Wrong IV length"); + SNOW3G_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->auth_iv_offset = auth_xform->auth.iv.offset; @@ -137,7 +137,7 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) sess = (struct snow3g_session *) - get_session_private_data( + get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -159,8 +159,8 @@ snow3g_get_session(struct snow3g_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -216,7 +216,7 @@ process_snow3g_cipher_op_bit(struct rte_crypto_op *op, src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); if (op->sym->m_dst == NULL) { op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("bit-level in-place not supported\n"); + SNOW3G_LOG(ERR, "bit-level in-place not supported\n"); return 0; } dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); @@ -246,7 +246,7 @@ process_snow3g_hash_op(struct snow3g_qp *qp, struct rte_crypto_op **ops, /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Offset"); + SNOW3G_LOG(ERR, "Offset"); break; } @@ -295,7 +295,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, (ops[i]->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( ops[i]->sym->m_dst))) { - SNOW3G_LOG_ERR("PMD supports only contiguous mbufs, " + SNOW3G_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " "source/destination buffer.\n", ops[i]); ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; @@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { memset(session, 0, sizeof(struct snow3g_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); + rte_cryptodev_sym_get_header_session_size()); rte_mempool_put(qp->sess_mp, session); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; @@ -537,7 +537,7 @@ cryptodev_snow3g_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - SNOW3G_LOG_ERR("failed to create cryptodev vdev"); + SNOW3G_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -555,11 +555,10 @@ cryptodev_snow3g_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - SNOW3G_LOG_ERR("driver %s: cryptodev_snow3g_create failed", + SNOW3G_LOG(ERR, "driver %s: cryptodev_snow3g_create failed", init_params->name); cryptodev_snow3g_remove(vdev); @@ -573,8 +572,7 @@ cryptodev_snow3g_probe(struct rte_vdev_device *vdev) "", sizeof(struct snow3g_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -617,7 +615,11 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd_drv); RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_SNOW3G_PMD, cryptodev_snow3g_pmd); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv, - cryptodev_driver_id); +RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, + cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(snow3g_init_log) +{ + snow3g_logtype_driver = rte_log_register("pmd.crypto.snow3g"); +} diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c index f60b4759..cfbc9522 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -130,7 +130,8 @@ snow3g_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = snow3g_pmd_capabilities; } @@ -172,13 +173,13 @@ snow3g_pmd_qp_create_processed_ops_ring(struct snow3g_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - SNOW3G_LOG_INFO("Reusing existing ring %s" + SNOW3G_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - SNOW3G_LOG_ERR("Unable to reuse existing ring %s" + SNOW3G_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -230,22 +231,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -snow3g_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -snow3g_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t snow3g_pmd_qp_count(struct rte_cryptodev *dev) @@ -255,14 +240,14 @@ snow3g_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the SNOW 3G session structure */ static unsigned -snow3g_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +snow3g_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct snow3g_session); } /** Configure a SNOW 3G session from a crypto xform chain */ static int -snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +snow3g_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -271,26 +256,26 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - SNOW3G_LOG_ERR("invalid session struct"); + SNOW3G_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + SNOW3G_LOG(ERR, "Couldn't get object from session mempool"); return -ENOMEM; } ret = snow3g_set_session_parameters(sess_private_data, xform); if (ret != 0) { - SNOW3G_LOG_ERR("failed configure session parameters"); + SNOW3G_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -298,17 +283,17 @@ snow3g_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -snow3g_pmd_session_clear(struct rte_cryptodev *dev, +snow3g_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct snow3g_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -326,13 +311,11 @@ struct rte_cryptodev_ops snow3g_pmd_ops = { .queue_pair_setup = snow3g_pmd_qp_setup, .queue_pair_release = snow3g_pmd_qp_release, - .queue_pair_start = snow3g_pmd_qp_start, - .queue_pair_stop = snow3g_pmd_qp_stop, .queue_pair_count = snow3g_pmd_qp_count, - .session_get_size = snow3g_pmd_session_get_size, - .session_configure = snow3g_pmd_session_configure, - .session_clear = snow3g_pmd_session_clear + .sym_session_get_size = snow3g_pmd_sym_session_get_size, + .sym_session_configure = snow3g_pmd_sym_session_configure, + .sym_session_clear = snow3g_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_snow3g_pmd_ops = &snow3g_pmd_ops; diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h index eea900e0..b7807b62 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd_private.h +++ b/drivers/crypto/snow3g/rte_snow3g_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_SNOW3G_PMD_PRIVATE_H_ @@ -10,25 +10,13 @@ #define CRYPTODEV_NAME_SNOW3G_PMD crypto_snow3g /**< SNOW 3G PMD device name */ -#define SNOW3G_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_SNOW3G_DEBUG -#define SNOW3G_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) - -#define SNOW3G_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_SNOW3G_PMD), \ - __func__, __LINE__, ## args) -#else -#define SNOW3G_LOG_INFO(fmt, args...) -#define SNOW3G_LOG_DBG(fmt, args...) -#endif +/** SNOW 3G PMD LOGTYPE DRIVER */ +int snow3g_logtype_driver; + +#define SNOW3G_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, snow3g_logtype_driver, \ + "%s() line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define SNOW3G_DIGEST_LENGTH 4 @@ -36,8 +24,6 @@ struct snow3g_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** SNOW 3G buffer queue pair */ diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile new file mode 100644 index 00000000..be7b828f --- /dev/null +++ b/drivers/crypto/virtio/Makefile @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_virtio_crypto.a + +# +# include virtio_crypto.h +# +CFLAGS += -I$(RTE_SDK)/lib/librte_vhost +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_virtio_crypto_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c + +# this lib depends upon: +LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool +LDLIBS += -lrte_cryptodev +LDLIBS += -lrte_pci -lrte_bus_pci + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build new file mode 100644 index 00000000..b15b3f9f --- /dev/null +++ b/drivers/crypto/virtio/meson.build @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + +includes += include_directories('../../../lib/librte_vhost') +deps += 'bus_pci' +name = 'virtio_crypto' +sources = files('virtio_cryptodev.c', 'virtio_pci.c', + 'virtio_rxtx.c', 'virtqueue.c') diff --git a/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map new file mode 100644 index 00000000..de8e412f --- /dev/null +++ b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map @@ -0,0 +1,3 @@ +DPDK_18.05 { + local: *; +}; diff --git a/drivers/crypto/virtio/virtio_crypto_algs.h b/drivers/crypto/virtio/virtio_crypto_algs.h new file mode 100644 index 00000000..4c44af37 --- /dev/null +++ b/drivers/crypto/virtio/virtio_crypto_algs.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTO_ALGS_H_ +#define _VIRTIO_CRYPTO_ALGS_H_ + +#include + +#include "virtio_crypto.h" + +struct virtio_crypto_session { + uint64_t session_id; + + struct { + uint16_t offset; + uint16_t length; + } iv; + + struct { + uint32_t length; + phys_addr_t phys_addr; + } aad; + + struct virtio_crypto_op_ctrl_req ctrl; +}; + +#endif /* _VIRTIO_CRYPTO_ALGS_H_ */ diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h new file mode 100644 index 00000000..03c30dee --- /dev/null +++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_ +#define _VIRTIO_CRYPTO_CAPABILITIES_H_ + +#define VIRTIO_SYM_CAPABILITIES \ + { /* SHA1 HMAC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \ + {.auth = { \ + .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \ + .block_size = 64, \ + .key_size = { \ + .min = 1, \ + .max = 64, \ + .increment = 1 \ + }, \ + .digest_size = { \ + .min = 1, \ + .max = 20, \ + .increment = 1 \ + }, \ + .iv_size = { 0 } \ + }, } \ + }, } \ + }, \ + { /* AES CBC */ \ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \ + {.sym = { \ + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \ + {.cipher = { \ + .algo = RTE_CRYPTO_CIPHER_AES_CBC, \ + .block_size = 16, \ + .key_size = { \ + .min = 16, \ + .max = 32, \ + .increment = 8 \ + }, \ + .iv_size = { \ + .min = 16, \ + .max = 16, \ + .increment = 0 \ + } \ + }, } \ + }, } \ + } + +#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */ diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c new file mode 100644 index 00000000..568b5a40 --- /dev/null +++ b/drivers/crypto/virtio/virtio_cryptodev.c @@ -0,0 +1,1505 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "virtio_cryptodev.h" +#include "virtqueue.h" +#include "virtio_crypto_algs.h" +#include "virtio_crypto_capabilities.h" + +int virtio_crypto_logtype_init; +int virtio_crypto_logtype_session; +int virtio_crypto_logtype_rx; +int virtio_crypto_logtype_tx; +int virtio_crypto_logtype_driver; + +static int virtio_crypto_dev_configure(struct rte_cryptodev *dev, + struct rte_cryptodev_config *config); +static int virtio_crypto_dev_start(struct rte_cryptodev *dev); +static void virtio_crypto_dev_stop(struct rte_cryptodev *dev); +static int virtio_crypto_dev_close(struct rte_cryptodev *dev); +static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info); +static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats); +static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev); +static int virtio_crypto_qp_setup(struct rte_cryptodev *dev, + uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, + struct rte_mempool *session_pool); +static int virtio_crypto_qp_release(struct rte_cryptodev *dev, + uint16_t queue_pair_id); +static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev); +static unsigned int virtio_crypto_sym_get_session_private_size( + struct rte_cryptodev *dev); +static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess); +static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *session, + struct rte_mempool *mp); + +/* + * The set of PCI devices this driver supports + */ +static const struct rte_pci_id pci_id_virtio_crypto_map[] = { + { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID, + VIRTIO_CRYPTO_PCI_DEVICEID) }, + { .vendor_id = 0, /* sentinel */ }, +}; + +static const struct rte_cryptodev_capabilities virtio_capabilities[] = { + VIRTIO_SYM_CAPABILITIES, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +uint8_t cryptodev_virtio_driver_id; + +#define NUM_ENTRY_SYM_CREATE_SESSION 4 + +static int +virtio_crypto_send_command(struct virtqueue *vq, + struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key, + uint8_t *auth_key, struct virtio_crypto_session *session) +{ + uint8_t idx = 0; + uint8_t needed = 1; + uint32_t head = 0; + uint32_t len_cipher_key = 0; + uint32_t len_auth_key = 0; + uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); + uint32_t len_session_input = sizeof(struct virtio_crypto_session_input); + uint32_t len_total = 0; + uint32_t input_offset = 0; + void *virt_addr_started = NULL; + phys_addr_t phys_addr_started; + struct vring_desc *desc; + uint32_t desc_offset; + struct virtio_crypto_session_input *input; + int ret; + + PMD_INIT_FUNC_TRACE(); + + if (session == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL."); + return -EINVAL; + } + /* cipher only is supported, it is available if auth_key is NULL */ + if (!cipher_key) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL."); + return -EINVAL; + } + + head = vq->vq_desc_head_idx; + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p", + head, vq); + + if (vq->vq_free_cnt < needed) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry"); + return -ENOSPC; + } + + /* calculate the length of cipher key */ + if (cipher_key) { + switch (ctrl->u.sym_create_session.op_type) { + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + len_cipher_key + = ctrl->u.sym_create_session.u.cipher + .para.keylen; + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + len_cipher_key + = ctrl->u.sym_create_session.u.chain + .para.cipher_param.keylen; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type"); + return -EINVAL; + } + } + + /* calculate the length of auth key */ + if (auth_key) { + len_auth_key = + ctrl->u.sym_create_session.u.chain.para.u.mac_param + .auth_key_len; + } + + /* + * malloc memory to store indirect vring_desc entries, including + * ctrl request, cipher key, auth key, session input and desc vring + */ + desc_offset = len_ctrl_req + len_cipher_key + len_auth_key + + len_session_input; + virt_addr_started = rte_malloc(NULL, + desc_offset + NUM_ENTRY_SYM_CREATE_SESSION + * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); + if (virt_addr_started == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory"); + return -ENOSPC; + } + phys_addr_started = rte_malloc_virt2iova(virt_addr_started); + + /* address to store indirect vring desc entries */ + desc = (struct vring_desc *) + ((uint8_t *)virt_addr_started + desc_offset); + + /* ctrl req part */ + memcpy(virt_addr_started, ctrl, len_ctrl_req); + desc[idx].addr = phys_addr_started; + desc[idx].len = len_ctrl_req; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_ctrl_req; + input_offset += len_ctrl_req; + + /* cipher key part */ + if (len_cipher_key > 0) { + memcpy((uint8_t *)virt_addr_started + len_total, + cipher_key, len_cipher_key); + + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_cipher_key; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_cipher_key; + input_offset += len_cipher_key; + } + + /* auth key part */ + if (len_auth_key > 0) { + memcpy((uint8_t *)virt_addr_started + len_total, + auth_key, len_auth_key); + + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_auth_key; + desc[idx].flags = VRING_DESC_F_NEXT; + desc[idx].next = idx + 1; + idx++; + len_total += len_auth_key; + input_offset += len_auth_key; + } + + /* input part */ + input = (struct virtio_crypto_session_input *) + ((uint8_t *)virt_addr_started + input_offset); + input->status = VIRTIO_CRYPTO_ERR; + input->session_id = ~0ULL; + desc[idx].addr = phys_addr_started + len_total; + desc[idx].len = len_session_input; + desc[idx].flags = VRING_DESC_F_WRITE; + idx++; + + /* use a single desc entry */ + vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset; + vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc); + vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vq_free_cnt--; + + vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", + vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + rte_rmb(); + usleep(100); + } + + while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + idx = (uint32_t) uep->id; + desc_idx = idx; + + while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) { + desc_idx = vq->vq_ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" + "vq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + /* get the result */ + if (input->status != VIRTIO_CRYPTO_OK) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! " + "status=%u, session_id=%" PRIu64 "", + input->status, input->session_id); + rte_free(virt_addr_started); + ret = -1; + } else { + session->session_id = input->session_id; + + VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, " + "session_id=%" PRIu64 "", input->session_id); + rte_free(virt_addr_started); + ret = 0; + } + + return ret; +} + +void +virtio_crypto_queue_release(struct virtqueue *vq) +{ + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (vq) { + hw = vq->hw; + /* Select and deactivate the queue */ + VTPCI_OPS(hw)->del_queue(hw, vq); + + rte_memzone_free(vq->mz); + rte_mempool_free(vq->mpool); + rte_free(vq); + } +} + +#define MPOOL_MAX_NAME_SZ 32 + +int +virtio_crypto_queue_setup(struct rte_cryptodev *dev, + int queue_type, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + int socket_id, + struct virtqueue **pvq) +{ + char vq_name[VIRTQUEUE_MAX_NAME_SZ]; + char mpool_name[MPOOL_MAX_NAME_SZ]; + const struct rte_memzone *mz; + unsigned int vq_size, size; + struct virtio_crypto_hw *hw = dev->data->dev_private; + struct virtqueue *vq = NULL; + uint32_t i = 0; + uint32_t j; + + PMD_INIT_FUNC_TRACE(); + + VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx); + + /* + * Read the virtqueue size from the Queue Size field + * Always power of 2 and if 0 virtqueue does not exist + */ + vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx); + if (vq_size == 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist"); + return -EINVAL; + } + VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size); + + if (!rte_is_power_of_2(vq_size)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2"); + return -EINVAL; + } + + if (queue_type == VTCRYPTO_DATAQ) { + snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d", + dev->data->dev_id, vtpci_queue_idx); + snprintf(mpool_name, sizeof(mpool_name), + "dev%d_dataqueue%d_mpool", + dev->data->dev_id, vtpci_queue_idx); + } else if (queue_type == VTCRYPTO_CTRLQ) { + snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue", + dev->data->dev_id); + snprintf(mpool_name, sizeof(mpool_name), + "dev%d_controlqueue_mpool", + dev->data->dev_id); + } + size = RTE_ALIGN_CEIL(sizeof(*vq) + + vq_size * sizeof(struct vq_desc_extra), + RTE_CACHE_LINE_SIZE); + vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE, + socket_id); + if (vq == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue"); + return -ENOMEM; + } + + if (queue_type == VTCRYPTO_DATAQ) { + /* pre-allocate a mempool and use it in the data plane to + * improve performance + */ + vq->mpool = rte_mempool_lookup(mpool_name); + if (vq->mpool == NULL) + vq->mpool = rte_mempool_create(mpool_name, + vq_size, + sizeof(struct virtio_crypto_op_cookie), + RTE_CACHE_LINE_SIZE, 0, + NULL, NULL, NULL, NULL, socket_id, + 0); + if (!vq->mpool) { + VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD " + "Cannot create mempool"); + goto mpool_create_err; + } + for (i = 0; i < vq_size; i++) { + vq->vq_descx[i].cookie = + rte_zmalloc("crypto PMD op cookie pointer", + sizeof(struct virtio_crypto_op_cookie), + RTE_CACHE_LINE_SIZE); + if (vq->vq_descx[i].cookie == NULL) { + VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to " + "alloc mem for cookie"); + goto cookie_alloc_err; + } + } + } + + vq->hw = hw; + vq->dev_id = dev->data->dev_id; + vq->vq_queue_index = vtpci_queue_idx; + vq->vq_nentries = vq_size; + + /* + * Using part of the vring entries is permitted, but the maximum + * is vq_size + */ + if (nb_desc == 0 || nb_desc > vq_size) + nb_desc = vq_size; + vq->vq_free_cnt = nb_desc; + + /* + * Reserve a memzone for vring elements + */ + size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); + vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); + VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d", + (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq", + size, vq->vq_ring_size); + + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, + socket_id, 0, VIRTIO_PCI_VRING_ALIGN); + if (mz == NULL) { + if (rte_errno == EEXIST) + mz = rte_memzone_lookup(vq_name); + if (mz == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory"); + goto mz_reserve_err; + } + } + + /* + * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((mz->phys_addr + vq->vq_ring_size - 1) + >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be " + "above 16TB!"); + goto vring_addr_err; + } + + memset(mz->addr, 0, sizeof(mz->len)); + vq->mz = mz; + vq->vq_ring_mem = mz->phys_addr; + vq->vq_ring_virt_mem = mz->addr; + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64, + (uint64_t)mz->phys_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64, + (uint64_t)(uintptr_t)mz->addr); + + *pvq = vq; + + return 0; + +vring_addr_err: + rte_memzone_free(mz); +mz_reserve_err: +cookie_alloc_err: + rte_mempool_free(vq->mpool); + if (i != 0) { + for (j = 0; j < i; j++) + rte_free(vq->vq_descx[j].cookie); + } +mpool_create_err: + rte_free(vq); + return -ENOMEM; +} + +static int +virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx) +{ + int ret; + struct virtqueue *vq; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + /* if virtio device has started, do not touch the virtqueues */ + if (dev->data->dev_started) + return 0; + + PMD_INIT_FUNC_TRACE(); + + ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx, + 0, SOCKET_ID_ANY, &vq); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed"); + return ret; + } + + hw->cvq = vq; + + return 0; +} + +static void +virtio_crypto_free_queues(struct rte_cryptodev *dev) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* control queue release */ + virtio_crypto_queue_release(hw->cvq); + + /* data queue release */ + for (i = 0; i < hw->max_dataqueues; i++) + virtio_crypto_queue_release(dev->data->queue_pairs[i]); +} + +static int +virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused) +{ + return 0; +} + +/* + * dev_ops for virtio, bare necessities for basic operation + */ +static struct rte_cryptodev_ops virtio_crypto_dev_ops = { + /* Device related operations */ + .dev_configure = virtio_crypto_dev_configure, + .dev_start = virtio_crypto_dev_start, + .dev_stop = virtio_crypto_dev_stop, + .dev_close = virtio_crypto_dev_close, + .dev_infos_get = virtio_crypto_dev_info_get, + + .stats_get = virtio_crypto_dev_stats_get, + .stats_reset = virtio_crypto_dev_stats_reset, + + .queue_pair_setup = virtio_crypto_qp_setup, + .queue_pair_release = virtio_crypto_qp_release, + .queue_pair_count = NULL, + + /* Crypto related operations */ + .sym_session_get_size = virtio_crypto_sym_get_session_private_size, + .sym_session_configure = virtio_crypto_sym_configure_session, + .sym_session_clear = virtio_crypto_sym_clear_session +}; + +static void +virtio_crypto_update_stats(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (stats == NULL) { + VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer"); + return; + } + + for (i = 0; i < hw->max_dataqueues; i++) { + const struct virtqueue *data_queue + = dev->data->queue_pairs[i]; + if (data_queue == NULL) + continue; + + stats->enqueued_count += data_queue->packets_sent_total; + stats->enqueue_err_count += data_queue->packets_sent_failed; + + stats->dequeued_count += data_queue->packets_received_total; + stats->dequeue_err_count + += data_queue->packets_received_failed; + } +} + +static void +virtio_crypto_dev_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + PMD_INIT_FUNC_TRACE(); + + virtio_crypto_update_stats(dev, stats); +} + +static void +virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev) +{ + unsigned int i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + for (i = 0; i < hw->max_dataqueues; i++) { + struct virtqueue *data_queue = dev->data->queue_pairs[i]; + if (data_queue == NULL) + continue; + + data_queue->packets_sent_total = 0; + data_queue->packets_sent_failed = 0; + + data_queue->packets_received_total = 0; + data_queue->packets_received_failed = 0; + } +} + +static int +virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id, + struct rte_mempool *session_pool __rte_unused) +{ + int ret; + struct virtqueue *vq; + + PMD_INIT_FUNC_TRACE(); + + /* if virtio dev is started, do not touch the virtqueues */ + if (dev->data->dev_started) + return 0; + + ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id, + qp_conf->nb_descriptors, socket_id, &vq); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "virtio crypto data queue initialization failed\n"); + return ret; + } + + dev->data->queue_pairs[queue_pair_id] = vq; + + return 0; +} + +static int +virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id) +{ + struct virtqueue *vq + = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id]; + + PMD_INIT_FUNC_TRACE(); + + if (vq == NULL) { + VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed"); + return 0; + } + + virtio_crypto_queue_release(vq); + return 0; +} + +static int +virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features) +{ + uint64_t host_features; + + PMD_INIT_FUNC_TRACE(); + + /* Prepare guest_features: feature that driver wants to support */ + VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64, + req_features); + + /* Read device(host) feature bits */ + host_features = VTPCI_OPS(hw)->get_features(hw); + VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64, + host_features); + + /* + * Negotiate features: Subset of device feature bits are written back + * guest feature bits. + */ + hw->guest_features = req_features; + hw->guest_features = vtpci_cryptodev_negotiate_features(hw, + host_features); + VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64, + hw->guest_features); + + if (hw->modern) { + if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "VIRTIO_F_VERSION_1 features is not enabled."); + return -1; + } + vtpci_cryptodev_set_status(hw, + VIRTIO_CONFIG_STATUS_FEATURES_OK); + if (!(vtpci_cryptodev_get_status(hw) & + VIRTIO_CONFIG_STATUS_FEATURES_OK)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK " + "status!"); + return -1; + } + } + + hw->req_guest_features = req_features; + + return 0; +} + +/* reset device and renegotiate features if needed */ +static int +virtio_crypto_init_device(struct rte_cryptodev *cryptodev, + uint64_t req_features) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + struct virtio_crypto_config local_config; + struct virtio_crypto_config *config = &local_config; + + PMD_INIT_FUNC_TRACE(); + + /* Reset the device although not necessary at startup */ + vtpci_cryptodev_reset(hw); + + /* Tell the host we've noticed this device. */ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK); + + /* Tell the host we've known how to drive the device. */ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER); + if (virtio_negotiate_features(hw, req_features) < 0) + return -1; + + /* Get status of the device */ + vtpci_read_cryptodev_config(hw, + offsetof(struct virtio_crypto_config, status), + &config->status, sizeof(config->status)); + if (config->status != VIRTIO_CRYPTO_S_HW_READY) { + VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is " + "not ready"); + return -1; + } + + /* Get number of data queues */ + vtpci_read_cryptodev_config(hw, + offsetof(struct virtio_crypto_config, max_dataqueues), + &config->max_dataqueues, + sizeof(config->max_dataqueues)); + hw->max_dataqueues = config->max_dataqueues; + + VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d", + hw->max_dataqueues); + + return 0; +} + +/* + * This function is based on probe() function + * It returns 0 on success. + */ +static int +crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev, + struct rte_cryptodev_pmd_init_params *init_params) +{ + struct rte_cryptodev *cryptodev; + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device, + init_params); + if (cryptodev == NULL) + return -ENODEV; + + cryptodev->driver_id = cryptodev_virtio_driver_id; + cryptodev->dev_ops = &virtio_crypto_dev_ops; + + cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst; + cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst; + + cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + + hw = cryptodev->data->dev_private; + hw->dev_id = cryptodev->data->dev_id; + hw->virtio_dev_capabilities = virtio_capabilities; + + VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x", + cryptodev->data->dev_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); + + /* pci device init */ + if (vtpci_cryptodev_init(pci_dev, hw)) + return -1; + + if (virtio_crypto_init_device(cryptodev, + VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) + return -1; + + return 0; +} + +static int +virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY) + return -EPERM; + + if (cryptodev->data->dev_started) { + virtio_crypto_dev_stop(cryptodev); + virtio_crypto_dev_close(cryptodev); + } + + cryptodev->dev_ops = NULL; + cryptodev->enqueue_burst = NULL; + cryptodev->dequeue_burst = NULL; + + /* release control queue */ + virtio_crypto_queue_release(hw->cvq); + + rte_free(cryptodev->data); + cryptodev->data = NULL; + + VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed"); + + return 0; +} + +static int +virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev, + struct rte_cryptodev_config *config __rte_unused) +{ + struct virtio_crypto_hw *hw = cryptodev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (virtio_crypto_init_device(cryptodev, + VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0) + return -1; + + /* setup control queue + * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues + * config->max_dataqueues is the control queue + */ + if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error"); + return -1; + } + virtio_crypto_ctrlq_start(cryptodev); + + return 0; +} + +static void +virtio_crypto_dev_stop(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop"); + + vtpci_cryptodev_reset(hw); + + virtio_crypto_dev_free_mbufs(dev); + virtio_crypto_free_queues(dev); + + dev->data->dev_started = 0; +} + +static int +virtio_crypto_dev_start(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + if (dev->data->dev_started) + return 0; + + /* Do final configuration before queue engine starts */ + virtio_crypto_dataq_start(dev); + vtpci_cryptodev_reinit_complete(hw); + + dev->data->dev_started = 1; + + return 0; +} + +static void +virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev) +{ + uint32_t i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + for (i = 0; i < hw->max_dataqueues; i++) { + VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used " + "and unused buf", i); + VIRTQUEUE_DUMP((struct virtqueue *) + dev->data->queue_pairs[i]); + + VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p", + i, dev->data->queue_pairs[i]); + + virtqueue_detatch_unused(dev->data->queue_pairs[i]); + + VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and " + "unused buf", i); + VIRTQUEUE_DUMP( + (struct virtqueue *)dev->data->queue_pairs[i]); + } +} + +static unsigned int +virtio_crypto_sym_get_session_private_size( + struct rte_cryptodev *dev __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + + return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16); +} + +static int +virtio_crypto_check_sym_session_paras( + struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw; + + PMD_INIT_FUNC_TRACE(); + + if (unlikely(dev == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL"); + return -1; + } + if (unlikely(dev->data == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL"); + return -1; + } + hw = dev->data->dev_private; + if (unlikely(hw == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL"); + return -1; + } + if (unlikely(hw->cvq == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL"); + return -1; + } + + return 0; +} + +static int +virtio_crypto_check_sym_clear_session_paras( + struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + PMD_INIT_FUNC_TRACE(); + + if (sess == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL"); + return -1; + } + + return virtio_crypto_check_sym_session_paras(dev); +} + +#define NUM_ENTRY_SYM_CLEAR_SESSION 2 + +static void +virtio_crypto_sym_clear_session( + struct rte_cryptodev *dev, + struct rte_cryptodev_sym_session *sess) +{ + struct virtio_crypto_hw *hw; + struct virtqueue *vq; + struct virtio_crypto_session *session; + struct virtio_crypto_op_ctrl_req *ctrl; + struct vring_desc *desc; + uint8_t *status; + uint8_t needed = 1; + uint32_t head; + uint8_t *malloc_virt_addr; + uint64_t malloc_phys_addr; + uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr); + uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req); + uint32_t desc_offset = len_op_ctrl_req + len_inhdr; + + PMD_INIT_FUNC_TRACE(); + + if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0) + return; + + hw = dev->data->dev_private; + vq = hw->cvq; + session = (struct virtio_crypto_session *)get_sym_session_private_data( + sess, cryptodev_virtio_driver_id); + if (session == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter"); + return; + } + + VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, " + "vq = %p", vq->vq_desc_head_idx, vq); + + if (vq->vq_free_cnt < needed) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "vq->vq_free_cnt = %d is less than %d, " + "not enough", vq->vq_free_cnt, needed); + return; + } + + /* + * malloc memory to store information of ctrl request op, + * returned status and desc vring + */ + malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr + + NUM_ENTRY_SYM_CLEAR_SESSION + * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE); + if (malloc_virt_addr == NULL) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room"); + return; + } + malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr); + + /* assign ctrl request op part */ + ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr; + ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION; + /* default data virtqueue is 0 */ + ctrl->header.queue_id = 0; + ctrl->u.destroy_session.session_id = session->session_id; + + /* status part */ + status = &(((struct virtio_crypto_inhdr *) + ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status); + *status = VIRTIO_CRYPTO_ERR; + + /* indirect desc vring part */ + desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr + + desc_offset); + + /* ctrl request part */ + desc[0].addr = malloc_phys_addr; + desc[0].len = len_op_ctrl_req; + desc[0].flags = VRING_DESC_F_NEXT; + desc[0].next = 1; + + /* status part */ + desc[1].addr = malloc_phys_addr + len_op_ctrl_req; + desc[1].len = len_inhdr; + desc[1].flags = VRING_DESC_F_WRITE; + + /* use only a single desc entry */ + head = vq->vq_desc_head_idx; + vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset; + vq->vq_ring.desc[head].len + = NUM_ENTRY_SYM_CLEAR_SESSION + * sizeof(struct vring_desc); + vq->vq_free_cnt -= needed; + + vq->vq_desc_head_idx = vq->vq_ring.desc[head].next; + + vq_update_avail_ring(vq, head); + vq_update_avail_idx(vq); + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d", + vq->vq_queue_index); + + virtqueue_notify(vq); + + rte_rmb(); + while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + rte_rmb(); + usleep(100); + } + + while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + uint32_t idx, desc_idx, used_idx; + struct vring_used_elem *uep; + + used_idx = (uint32_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + idx = (uint32_t) uep->id; + desc_idx = idx; + while (vq->vq_ring.desc[desc_idx].flags + & VRING_DESC_F_NEXT) { + desc_idx = vq->vq_ring.desc[desc_idx].next; + vq->vq_free_cnt++; + } + + vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx; + vq->vq_desc_head_idx = idx; + vq->vq_used_cons_idx++; + vq->vq_free_cnt++; + } + + if (*status != VIRTIO_CRYPTO_OK) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed " + "status=%"PRIu32", session_id=%"PRIu64"", + *status, session->session_id); + rte_free(malloc_virt_addr); + return; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n" + "vq->vq_desc_head_idx=%d", + vq->vq_free_cnt, vq->vq_desc_head_idx); + + VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ", + session->session_id); + + memset(session, 0, sizeof(struct virtio_crypto_session)); + struct rte_mempool *sess_mp = rte_mempool_from_obj(session); + set_sym_session_private_data(sess, cryptodev_virtio_driver_id, NULL); + rte_mempool_put(sess_mp, session); + rte_free(malloc_virt_addr); +} + +static struct rte_crypto_cipher_xform * +virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return &xform->cipher; + + xform = xform->next; + } while (xform); + + return NULL; +} + +static struct rte_crypto_auth_xform * +virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform) +{ + do { + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return &xform->auth; + + xform = xform->next; + } while (xform); + + return NULL; +} + +/** Get xform chain order */ +static int +virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform) +{ + if (xform == NULL) + return -1; + + /* Cipher Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next == NULL) + return VIRTIO_CRYPTO_CMD_CIPHER; + + /* Authentication Only */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next == NULL) + return VIRTIO_CRYPTO_CMD_AUTH; + + /* Authenticate then Cipher */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && + xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return VIRTIO_CRYPTO_CMD_HASH_CIPHER; + + /* Cipher then Authenticate */ + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && + xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return VIRTIO_CRYPTO_CMD_CIPHER_HASH; + + return -1; +} + +static int +virtio_crypto_sym_pad_cipher_param( + struct virtio_crypto_cipher_session_para *para, + struct rte_crypto_cipher_xform *cipher_xform) +{ + switch (cipher_xform->algo) { + case RTE_CRYPTO_CIPHER_AES_CBC: + para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported " + "Cipher alg %u", cipher_xform->algo); + return -1; + } + + para->keylen = cipher_xform->key.length; + switch (cipher_xform->op) { + case RTE_CRYPTO_CIPHER_OP_ENCRYPT: + para->op = VIRTIO_CRYPTO_OP_ENCRYPT; + break; + case RTE_CRYPTO_CIPHER_OP_DECRYPT: + para->op = VIRTIO_CRYPTO_OP_DECRYPT; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation " + "parameter"); + return -1; + } + + return 0; +} + +static int +virtio_crypto_sym_pad_auth_param( + struct virtio_crypto_op_ctrl_req *ctrl, + struct rte_crypto_auth_xform *auth_xform) +{ + uint32_t *algo; + struct virtio_crypto_alg_chain_session_para *para = + &(ctrl->u.sym_create_session.u.chain.para); + + switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) { + case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN: + algo = &(para->u.hash_param.algo); + break; + case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH: + algo = &(para->u.mac_param.algo); + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u " + "specified", + ctrl->u.sym_create_session.u.chain.para.hash_mode); + return -1; + } + + switch (auth_xform->algo) { + case RTE_CRYPTO_AUTH_SHA1_HMAC: + *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1; + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Crypto: Undefined Hash algo %u specified", + auth_xform->algo); + return -1; + } + + return 0; +} + +static int +virtio_crypto_sym_pad_op_ctrl_req( + struct virtio_crypto_op_ctrl_req *ctrl, + struct rte_crypto_sym_xform *xform, bool is_chainned, + uint8_t **cipher_key_data, uint8_t **auth_key_data, + struct virtio_crypto_session *session) +{ + int ret; + struct rte_crypto_auth_xform *auth_xform = NULL; + struct rte_crypto_cipher_xform *cipher_xform = NULL; + + /* Get cipher xform from crypto xform chain */ + cipher_xform = virtio_crypto_get_cipher_xform(xform); + if (cipher_xform) { + if (cipher_xform->iv.length > VIRTIO_CRYPTO_MAX_IV_SIZE) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "cipher IV size cannot be longer than %u", + VIRTIO_CRYPTO_MAX_IV_SIZE); + return -1; + } + if (is_chainned) + ret = virtio_crypto_sym_pad_cipher_param( + &ctrl->u.sym_create_session.u.chain.para + .cipher_param, cipher_xform); + else + ret = virtio_crypto_sym_pad_cipher_param( + &ctrl->u.sym_create_session.u.cipher.para, + cipher_xform); + + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "pad cipher parameter failed"); + return -1; + } + + *cipher_key_data = cipher_xform->key.data; + + session->iv.offset = cipher_xform->iv.offset; + session->iv.length = cipher_xform->iv.length; + } + + /* Get auth xform from crypto xform chain */ + auth_xform = virtio_crypto_get_auth_xform(xform); + if (auth_xform) { + /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */ + struct virtio_crypto_alg_chain_session_para *para = + &(ctrl->u.sym_create_session.u.chain.para); + if (auth_xform->key.length) { + para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH; + para->u.mac_param.auth_key_len = + (uint32_t)auth_xform->key.length; + para->u.mac_param.hash_result_len = + auth_xform->digest_length; + + *auth_key_data = auth_xform->key.data; + } else { + para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN; + para->u.hash_param.hash_result_len = + auth_xform->digest_length; + } + + ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter " + "failed"); + return -1; + } + } + + return 0; +} + +static int +virtio_crypto_check_sym_configure_session_paras( + struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sym_sess, + struct rte_mempool *mempool) +{ + if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) || + unlikely(mempool == NULL)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer"); + return -1; + } + + if (virtio_crypto_check_sym_session_paras(dev) < 0) + return -1; + + return 0; +} + +static int +virtio_crypto_sym_configure_session( + struct rte_cryptodev *dev, + struct rte_crypto_sym_xform *xform, + struct rte_cryptodev_sym_session *sess, + struct rte_mempool *mempool) +{ + int ret; + struct virtio_crypto_session crypto_sess; + void *session_private = &crypto_sess; + struct virtio_crypto_session *session; + struct virtio_crypto_op_ctrl_req *ctrl_req; + enum virtio_crypto_cmd_id cmd_id; + uint8_t *cipher_key_data = NULL; + uint8_t *auth_key_data = NULL; + struct virtio_crypto_hw *hw; + struct virtqueue *control_vq; + + PMD_INIT_FUNC_TRACE(); + + ret = virtio_crypto_check_sym_configure_session_paras(dev, xform, + sess, mempool); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters"); + return ret; + } + + if (rte_mempool_get(mempool, &session_private)) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Couldn't get object from session mempool"); + return -ENOMEM; + } + + session = (struct virtio_crypto_session *)session_private; + memset(session, 0, sizeof(struct virtio_crypto_session)); + ctrl_req = &session->ctrl; + ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION; + /* FIXME: support multiqueue */ + ctrl_req->header.queue_id = 0; + + hw = dev->data->dev_private; + control_vq = hw->cvq; + + cmd_id = virtio_crypto_get_chain_order(xform); + if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH) + ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order + = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH; + if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER) + ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order + = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER; + + switch (cmd_id) { + case VIRTIO_CRYPTO_CMD_CIPHER_HASH: + case VIRTIO_CRYPTO_CMD_HASH_CIPHER: + ctrl_req->u.sym_create_session.op_type + = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING; + + ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, + xform, true, &cipher_key_data, &auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "padding sym op ctrl req failed"); + goto error_out; + } + ret = virtio_crypto_send_command(control_vq, ctrl_req, + cipher_key_data, auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "create session failed: %d", ret); + goto error_out; + } + break; + case VIRTIO_CRYPTO_CMD_CIPHER: + ctrl_req->u.sym_create_session.op_type + = VIRTIO_CRYPTO_SYM_OP_CIPHER; + ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform, + false, &cipher_key_data, &auth_key_data, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "padding sym op ctrl req failed"); + goto error_out; + } + ret = virtio_crypto_send_command(control_vq, ctrl_req, + cipher_key_data, NULL, session); + if (ret < 0) { + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "create session failed: %d", ret); + goto error_out; + } + break; + default: + VIRTIO_CRYPTO_SESSION_LOG_ERR( + "Unsupported operation chain order parameter"); + goto error_out; + } + + set_sym_session_private_data(sess, dev->driver_id, + session_private); + + return 0; + +error_out: + return -1; +} + +static void +virtio_crypto_dev_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *info) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + if (info != NULL) { + info->driver_id = cryptodev_virtio_driver_id; + info->feature_flags = dev->feature_flags; + info->max_nb_queue_pairs = hw->max_dataqueues; + /* No limit of number of sessions */ + info->sym.max_nb_sessions = 0; + info->capabilities = hw->virtio_dev_capabilities; + } +} + +static int +crypto_virtio_pci_probe( + struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + struct rte_cryptodev_pmd_init_params init_params = { + .name = "", + .socket_id = rte_socket_id(), + .private_data_size = sizeof(struct virtio_crypto_hw) + }; + char name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x", + pci_dev->addr.bus, + pci_dev->addr.devid, + pci_dev->addr.function); + + rte_pci_device_name(&pci_dev->addr, name, sizeof(name)); + + return crypto_virtio_create(name, pci_dev, &init_params); +} + +static int +crypto_virtio_pci_remove( + struct rte_pci_device *pci_dev __rte_unused) +{ + struct rte_cryptodev *cryptodev; + char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + + if (pci_dev == NULL) + return -EINVAL; + + rte_pci_device_name(&pci_dev->addr, cryptodev_name, + sizeof(cryptodev_name)); + + cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name); + if (cryptodev == NULL) + return -ENODEV; + + return virtio_crypto_dev_uninit(cryptodev); +} + +static struct rte_pci_driver rte_virtio_crypto_driver = { + .id_table = pci_id_virtio_crypto_map, + .drv_flags = 0, + .probe = crypto_virtio_pci_probe, + .remove = crypto_virtio_pci_remove +}; + +static struct cryptodev_driver virtio_crypto_drv; + +RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver); +RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv, + rte_virtio_crypto_driver.driver, + cryptodev_virtio_driver_id); + +RTE_INIT(virtio_crypto_init_log) +{ + virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init"); + if (virtio_crypto_logtype_init >= 0) + rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE); + + virtio_crypto_logtype_session = + rte_log_register("pmd.crypto.virtio.session"); + if (virtio_crypto_logtype_session >= 0) + rte_log_set_level(virtio_crypto_logtype_session, + RTE_LOG_NOTICE); + + virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx"); + if (virtio_crypto_logtype_rx >= 0) + rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE); + + virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx"); + if (virtio_crypto_logtype_tx >= 0) + rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE); + + virtio_crypto_logtype_driver = + rte_log_register("pmd.crypto.virtio.driver"); + if (virtio_crypto_logtype_driver >= 0) + rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE); +} diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h new file mode 100644 index 00000000..0fd7b722 --- /dev/null +++ b/drivers/crypto/virtio/virtio_cryptodev.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_CRYPTODEV_H_ +#define _VIRTIO_CRYPTODEV_H_ + +#include "virtio_crypto.h" +#include "virtio_pci.h" +#include "virtio_ring.h" + +/* Features desired/implemented by this driver. */ +#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1) + +#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio + +#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7 + +#define VIRTIO_CRYPTO_MAX_IV_SIZE 16 + +extern uint8_t cryptodev_virtio_driver_id; + +enum virtio_crypto_cmd_id { + VIRTIO_CRYPTO_CMD_CIPHER = 0, + VIRTIO_CRYPTO_CMD_AUTH = 1, + VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2, + VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3 +}; + +struct virtio_crypto_op_cookie { + struct virtio_crypto_op_data_req data_req; + struct virtio_crypto_inhdr inhdr; + struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP]; + uint8_t iv[VIRTIO_CRYPTO_MAX_IV_SIZE]; +}; + +/* + * Control queue function prototype + */ +void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev); + +/* + * Data queue function prototype + */ +void virtio_crypto_dataq_start(struct rte_cryptodev *dev); + +int virtio_crypto_queue_setup(struct rte_cryptodev *dev, + int queue_type, + uint16_t vtpci_queue_idx, + uint16_t nb_desc, + int socket_id, + struct virtqueue **pvq); + +void virtio_crypto_queue_release(struct virtqueue *vq); + +uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue, + struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts); + +uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue, + struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts); + +#endif /* _VIRTIO_CRYPTODEV_H_ */ diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h new file mode 100644 index 00000000..26a286cf --- /dev/null +++ b/drivers/crypto/virtio/virtio_logs.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_LOGS_H_ +#define _VIRTIO_LOGS_H_ + +#include + +#define PMD_INIT_LOG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \ + "PMD: %s(): " fmt "\n", __func__, ##args) + +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>") + +extern int virtio_crypto_logtype_init; + +#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \ + "INIT: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_session; + +#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \ + "SESSION: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_rx; + +#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \ + "RX: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_tx; + +#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \ + "TX: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args) + +extern int virtio_crypto_logtype_driver; + +#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \ + "DRIVER: %s(): " fmt "\n", __func__, ##args) + +#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args) + +#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args) + +#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \ + VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args) + +#endif /* _VIRTIO_LOGS_H_ */ diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c new file mode 100644 index 00000000..832c465b --- /dev/null +++ b/drivers/crypto/virtio/virtio_pci.c @@ -0,0 +1,462 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#include + +#ifdef RTE_EXEC_ENV_LINUXAPP + #include + #include +#endif + +#include +#include + +#include "virtio_pci.h" +#include "virtqueue.h" + +/* + * Following macros are derived from linux/pci_regs.h, however, + * we can't simply include that header here, as there is no such + * file for non-Linux platform. + */ +#define PCI_CAPABILITY_LIST 0x34 +#define PCI_CAP_ID_VNDR 0x09 +#define PCI_CAP_ID_MSIX 0x11 + +/* + * The remaining space is defined by each driver as the per-driver + * configuration space. + */ +#define VIRTIO_PCI_CONFIG(hw) \ + (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) + +struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; + +static inline int +check_vq_phys_addr_ok(struct virtqueue *vq) +{ + /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> + (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!"); + return 0; + } + + return 1; +} + +static inline void +io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi) +{ + rte_write32(val & ((1ULL << 32) - 1), lo); + rte_write32(val >> 32, hi); +} + +static void +modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length) +{ + int i; + uint8_t *p; + uint8_t old_gen, new_gen; + + do { + old_gen = rte_read8(&hw->common_cfg->config_generation); + + p = dst; + for (i = 0; i < length; i++) + *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i); + + new_gen = rte_read8(&hw->common_cfg->config_generation); + } while (old_gen != new_gen); +} + +static void +modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length) +{ + int i; + const uint8_t *p = src; + + for (i = 0; i < length; i++) + rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i)); +} + +static uint64_t +modern_get_features(struct virtio_crypto_hw *hw) +{ + uint32_t features_lo, features_hi; + + rte_write32(0, &hw->common_cfg->device_feature_select); + features_lo = rte_read32(&hw->common_cfg->device_feature); + + rte_write32(1, &hw->common_cfg->device_feature_select); + features_hi = rte_read32(&hw->common_cfg->device_feature); + + return ((uint64_t)features_hi << 32) | features_lo; +} + +static void +modern_set_features(struct virtio_crypto_hw *hw, uint64_t features) +{ + rte_write32(0, &hw->common_cfg->guest_feature_select); + rte_write32(features & ((1ULL << 32) - 1), + &hw->common_cfg->guest_feature); + + rte_write32(1, &hw->common_cfg->guest_feature_select); + rte_write32(features >> 32, + &hw->common_cfg->guest_feature); +} + +static uint8_t +modern_get_status(struct virtio_crypto_hw *hw) +{ + return rte_read8(&hw->common_cfg->device_status); +} + +static void +modern_set_status(struct virtio_crypto_hw *hw, uint8_t status) +{ + rte_write8(status, &hw->common_cfg->device_status); +} + +static void +modern_reset(struct virtio_crypto_hw *hw) +{ + modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + modern_get_status(hw); +} + +static uint8_t +modern_get_isr(struct virtio_crypto_hw *hw) +{ + return rte_read8(hw->isr); +} + +static uint16_t +modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec) +{ + rte_write16(vec, &hw->common_cfg->msix_config); + return rte_read16(&hw->common_cfg->msix_config); +} + +static uint16_t +modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq, + uint16_t vec) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + rte_write16(vec, &hw->common_cfg->queue_msix_vector); + return rte_read16(&hw->common_cfg->queue_msix_vector); +} + +static uint16_t +modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id) +{ + rte_write16(queue_id, &hw->common_cfg->queue_select); + return rte_read16(&hw->common_cfg->queue_size); +} + +static int +modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) +{ + uint64_t desc_addr, avail_addr, used_addr; + uint16_t notify_off; + + if (!check_vq_phys_addr_ok(vq)) + return -1; + + desc_addr = vq->vq_ring_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, + ring[vq->vq_nentries]), + VIRTIO_PCI_VRING_ALIGN); + + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + notify_off = rte_read16(&hw->common_cfg->queue_notify_off); + vq->notify_addr = (void *)((uint8_t *)hw->notify_base + + notify_off * hw->notify_off_multiplier); + + rte_write16(1, &hw->common_cfg->queue_enable); + + VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr); + VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)", + vq->notify_addr, notify_off); + + return 0; +} + +static void +modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select); + + io_write64_twopart(0, &hw->common_cfg->queue_desc_lo, + &hw->common_cfg->queue_desc_hi); + io_write64_twopart(0, &hw->common_cfg->queue_avail_lo, + &hw->common_cfg->queue_avail_hi); + io_write64_twopart(0, &hw->common_cfg->queue_used_lo, + &hw->common_cfg->queue_used_hi); + + rte_write16(0, &hw->common_cfg->queue_enable); +} + +static void +modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused, + struct virtqueue *vq) +{ + rte_write16(vq->vq_queue_index, vq->notify_addr); +} + +const struct virtio_pci_ops virtio_crypto_modern_ops = { + .read_dev_cfg = modern_read_dev_config, + .write_dev_cfg = modern_write_dev_config, + .reset = modern_reset, + .get_status = modern_get_status, + .set_status = modern_set_status, + .get_features = modern_get_features, + .set_features = modern_set_features, + .get_isr = modern_get_isr, + .set_config_irq = modern_set_config_irq, + .set_queue_irq = modern_set_queue_irq, + .get_queue_num = modern_get_queue_num, + .setup_queue = modern_setup_queue, + .del_queue = modern_del_queue, + .notify_queue = modern_notify_queue, +}; + +void +vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length) +{ + VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length); +} + +void +vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length) +{ + VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length); +} + +uint64_t +vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, + uint64_t host_features) +{ + uint64_t features; + + /* + * Limit negotiated features to what the driver, virtqueue, and + * host all support. + */ + features = host_features & hw->guest_features; + VTPCI_OPS(hw)->set_features(hw, features); + + return features; +} + +void +vtpci_cryptodev_reset(struct virtio_crypto_hw *hw) +{ + VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + /* flush status write */ + VTPCI_OPS(hw)->get_status(hw); +} + +void +vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw) +{ + vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK); +} + +void +vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status) +{ + if (status != VIRTIO_CONFIG_STATUS_RESET) + status |= VTPCI_OPS(hw)->get_status(hw); + + VTPCI_OPS(hw)->set_status(hw, status); +} + +uint8_t +vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw) +{ + return VTPCI_OPS(hw)->get_status(hw); +} + +uint8_t +vtpci_cryptodev_isr(struct virtio_crypto_hw *hw) +{ + return VTPCI_OPS(hw)->get_isr(hw); +} + +static void * +get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) +{ + uint8_t bar = cap->bar; + uint32_t length = cap->length; + uint32_t offset = cap->offset; + uint8_t *base; + + if (bar >= PCI_MAX_RESOURCE) { + VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar); + return NULL; + } + + if (offset + length < offset) { + VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows", + offset, length); + return NULL; + } + + if (offset + length > dev->mem_resource[bar].len) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "invalid cap: overflows bar space: %u > %" PRIu64, + offset + length, dev->mem_resource[bar].len); + return NULL; + } + + base = dev->mem_resource[bar].addr; + if (base == NULL) { + VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar); + return NULL; + } + + return base + offset; +} + +#define PCI_MSIX_ENABLE 0x8000 + +static int +virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) +{ + uint8_t pos; + struct virtio_pci_cap cap; + int ret; + + if (rte_pci_map_device(dev)) { + VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!"); + return -1; + } + + ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list"); + return -1; + } + + while (pos) { + ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); + if (ret < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR( + "failed to read pci cap at pos: %x", pos); + break; + } + + if (cap.cap_vndr == PCI_CAP_ID_MSIX) { + /* Transitional devices would also have this capability, + * that's why we also check if msix is enabled. + * 1st byte is cap ID; 2nd byte is the position of next + * cap; next two bytes are the flags. + */ + uint16_t flags = ((uint16_t *)&cap)[1]; + + if (flags & PCI_MSIX_ENABLE) + hw->use_msix = VIRTIO_MSIX_ENABLED; + else + hw->use_msix = VIRTIO_MSIX_DISABLED; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) { + VIRTIO_CRYPTO_INIT_LOG_DBG( + "[%2x] skipping non VNDR cap id: %02x", + pos, cap.cap_vndr); + goto next; + } + + VIRTIO_CRYPTO_INIT_LOG_DBG( + "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", + pos, cap.cfg_type, cap.bar, cap.offset, cap.length); + + switch (cap.cfg_type) { + case VIRTIO_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_NOTIFY_CFG: + rte_pci_read_config(dev, &hw->notify_off_multiplier, + 4, pos + sizeof(cap)); + hw->notify_base = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_DEVICE_CFG: + hw->dev_cfg = get_cfg_addr(dev, &cap); + break; + case VIRTIO_PCI_CAP_ISR_CFG: + hw->isr = get_cfg_addr(dev, &cap); + break; + } + +next: + pos = cap.cap_next; + } + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->dev_cfg == NULL || hw->isr == NULL) { + VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found."); + return -1; + } + + VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device."); + + VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg); + VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg); + VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr); + VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u", + hw->notify_base, hw->notify_off_multiplier); + + return 0; +} + +/* + * Return -1: + * if there is error mapping with VFIO/UIO. + * if port map error when driver type is KDRV_NONE. + * if whitelisted but driver type is KDRV_UNKNOWN. + * Return 1 if kernel driver is managing the device. + * Return 0 on success. + */ +int +vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw) +{ + /* + * Try if we can succeed reading virtio pci caps, which exists + * only on modern pci device. If failed, we fallback to legacy + * virtio handling. + */ + if (virtio_read_caps(dev, hw) == 0) { + VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected."); + virtio_hw_internal[hw->dev_id].vtpci_ops = + &virtio_crypto_modern_ops; + hw->modern = 1; + return 0; + } + + /* + * virtio crypto conforms to virtio 1.0 and doesn't support + * legacy mode + */ + return -1; +} diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h new file mode 100644 index 00000000..604ec366 --- /dev/null +++ b/drivers/crypto/virtio/virtio_pci.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_PCI_H_ +#define _VIRTIO_PCI_H_ + +#include + +#include +#include +#include + +#include "virtio_crypto.h" + +struct virtqueue; + +/* VirtIO PCI vendor/device ID. */ +#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4 +#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054 + +/* VirtIO ABI version, this must match exactly. */ +#define VIRTIO_PCI_ABI_VERSION 0 + +/* + * VirtIO Header, located in BAR 0. + */ +#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/ +#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */ +#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */ +#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */ +#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */ +#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */ +#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */ +#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading + * also clears the register (8, RO) + */ +/* Only if MSIX is enabled: */ + +/* configuration change vector (16, RW) */ +#define VIRTIO_MSI_CONFIG_VECTOR 20 +/* vector for selected VQ notifications */ +#define VIRTIO_MSI_QUEUE_VECTOR 22 + +/* The bit of the ISR which indicates a device has an interrupt. */ +#define VIRTIO_PCI_ISR_INTR 0x1 +/* The bit of the ISR which indicates a device configuration change. */ +#define VIRTIO_PCI_ISR_CONFIG 0x2 +/* Vector value used to disable MSI for queue. */ +#define VIRTIO_MSI_NO_VECTOR 0xFFFF + +/* Status byte for guest to report progress. */ +#define VIRTIO_CONFIG_STATUS_RESET 0x00 +#define VIRTIO_CONFIG_STATUS_ACK 0x01 +#define VIRTIO_CONFIG_STATUS_DRIVER 0x02 +#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04 +#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08 +#define VIRTIO_CONFIG_STATUS_FAILED 0x80 + +/* + * Each virtqueue indirect descriptor list must be physically contiguous. + * To allow us to malloc(9) each list individually, limit the number + * supported to what will fit in one page. With 4KB pages, this is a limit + * of 256 descriptors. If there is ever a need for more, we can switch to + * contigmalloc(9) for the larger allocations, similar to what + * bus_dmamem_alloc(9) does. + * + * Note the sizeof(struct vring_desc) is 16 bytes. + */ +#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16)) + +/* Do we get callbacks when the ring is completely used, even if we've + * suppressed them? + */ +#define VIRTIO_F_NOTIFY_ON_EMPTY 24 + +/* Can the device handle any descriptor layout? */ +#define VIRTIO_F_ANY_LAYOUT 27 + +/* We support indirect buffer descriptors */ +#define VIRTIO_RING_F_INDIRECT_DESC 28 + +#define VIRTIO_F_VERSION_1 32 +#define VIRTIO_F_IOMMU_PLATFORM 33 + +/* The Guest publishes the used index for which it expects an interrupt + * at the end of the avail ring. Host should ignore the avail->flags field. + */ +/* The Host publishes the avail index for which it expects a kick + * at the end of the used ring. Guest should ignore the used->flags field. + */ +#define VIRTIO_RING_F_EVENT_IDX 29 + +/* Common configuration */ +#define VIRTIO_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 +/* ISR Status */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 +/* Device specific configuration */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 + +/* This is the PCI capability header: */ +struct virtio_pci_cap { + uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + uint8_t cap_next; /* Generic PCI field: next ptr. */ + uint8_t cap_len; /* Generic PCI field: capability length */ + uint8_t cfg_type; /* Identifies the structure. */ + uint8_t bar; /* Where to find it. */ + uint8_t padding[3]; /* Pad to full dword. */ + uint32_t offset; /* Offset within bar. */ + uint32_t length; /* Length of the structure, in bytes. */ +}; + +struct virtio_pci_notify_cap { + struct virtio_pci_cap cap; + uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */ +struct virtio_pci_common_cfg { + /* About the whole device. */ + uint32_t device_feature_select; /* read-write */ + uint32_t device_feature; /* read-only */ + uint32_t guest_feature_select; /* read-write */ + uint32_t guest_feature; /* read-write */ + uint16_t msix_config; /* read-write */ + uint16_t num_queues; /* read-only */ + uint8_t device_status; /* read-write */ + uint8_t config_generation; /* read-only */ + + /* About a specific virtqueue. */ + uint16_t queue_select; /* read-write */ + uint16_t queue_size; /* read-write, power of 2. */ + uint16_t queue_msix_vector; /* read-write */ + uint16_t queue_enable; /* read-write */ + uint16_t queue_notify_off; /* read-only */ + uint32_t queue_desc_lo; /* read-write */ + uint32_t queue_desc_hi; /* read-write */ + uint32_t queue_avail_lo; /* read-write */ + uint32_t queue_avail_hi; /* read-write */ + uint32_t queue_used_lo; /* read-write */ + uint32_t queue_used_hi; /* read-write */ +}; + +struct virtio_crypto_hw; + +struct virtio_pci_ops { + void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int len); + void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int len); + void (*reset)(struct virtio_crypto_hw *hw); + + uint8_t (*get_status)(struct virtio_crypto_hw *hw); + void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status); + + uint64_t (*get_features)(struct virtio_crypto_hw *hw); + void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features); + + uint8_t (*get_isr)(struct virtio_crypto_hw *hw); + + uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec); + + uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw, + struct virtqueue *vq, uint16_t vec); + + uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw, + uint16_t queue_id); + int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); + void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); + void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq); +}; + +struct virtio_crypto_hw { + /* control queue */ + struct virtqueue *cvq; + uint16_t dev_id; + uint16_t max_dataqueues; + uint64_t req_guest_features; + uint64_t guest_features; + uint8_t use_msix; + uint8_t modern; + uint32_t notify_off_multiplier; + uint8_t *isr; + uint16_t *notify_base; + struct virtio_pci_common_cfg *common_cfg; + struct virtio_crypto_config *dev_cfg; + const struct rte_cryptodev_capabilities *virtio_dev_capabilities; +}; + +/* + * While virtio_crypto_hw is stored in shared memory, this structure stores + * some infos that may vary in the multiple process model locally. + * For example, the vtpci_ops pointer. + */ +struct virtio_hw_internal { + const struct virtio_pci_ops *vtpci_ops; + struct rte_pci_ioport io; +}; + +#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops) +#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io) + +extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO]; + +/* + * How many bits to shift physical queue address written to QUEUE_PFN. + * 12 is historical, and due to x86 page size. + */ +#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12 + +/* The alignment to use between consumer and producer parts of vring. */ +#define VIRTIO_PCI_VRING_ALIGN 4096 + +enum virtio_msix_status { + VIRTIO_MSIX_NONE = 0, + VIRTIO_MSIX_DISABLED = 1, + VIRTIO_MSIX_ENABLED = 2 +}; + +static inline int +vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit) +{ + return (hw->guest_features & (1ULL << bit)) != 0; +} + +/* + * Function declaration from virtio_pci.c + */ +int vtpci_cryptodev_init(struct rte_pci_device *dev, + struct virtio_crypto_hw *hw); +void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw); + +void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw); + +uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw); +void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status); + +uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw, + uint64_t host_features); + +void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + const void *src, int length); + +void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset, + void *dst, int length); + +uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw); + +#endif /* _VIRTIO_PCI_H_ */ diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h new file mode 100644 index 00000000..ee306745 --- /dev/null +++ b/drivers/crypto/virtio/virtio_ring.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTIO_RING_H_ +#define _VIRTIO_RING_H_ + +#include + +#include + +/* This marks a buffer as continuing via the next field. */ +#define VRING_DESC_F_NEXT 1 +/* This marks a buffer as write-only (otherwise read-only). */ +#define VRING_DESC_F_WRITE 2 +/* This means the buffer contains a list of buffer descriptors. */ +#define VRING_DESC_F_INDIRECT 4 + +/* The Host uses this in used->flags to advise the Guest: don't kick me + * when you add a buffer. It's unreliable, so it's simply an + * optimization. Guest will still kick if it's out of buffers. + */ +#define VRING_USED_F_NO_NOTIFY 1 +/* The Guest uses this in avail->flags to advise the Host: don't + * interrupt me when you consume a buffer. It's unreliable, so it's + * simply an optimization. + */ +#define VRING_AVAIL_F_NO_INTERRUPT 1 + +/* VirtIO ring descriptors: 16 bytes. + * These can chain together via "next". + */ +struct vring_desc { + uint64_t addr; /* Address (guest-physical). */ + uint32_t len; /* Length. */ + uint16_t flags; /* The flags as indicated above. */ + uint16_t next; /* We chain unused descriptors via this. */ +}; + +struct vring_avail { + uint16_t flags; + uint16_t idx; + uint16_t ring[0]; +}; + +/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */ +struct vring_used_elem { + /* Index of start of used descriptor chain. */ + uint32_t id; + /* Total length of the descriptor chain which was written to. */ + uint32_t len; +}; + +struct vring_used { + uint16_t flags; + volatile uint16_t idx; + struct vring_used_elem ring[0]; +}; + +struct vring { + unsigned int num; + struct vring_desc *desc; + struct vring_avail *avail; + struct vring_used *used; +}; + +/* The standard layout for the ring is a continuous chunk of memory which + * looks like this. We assume num is a power of 2. + * + * struct vring { + * // The actual descriptors (16 bytes each) + * struct vring_desc desc[num]; + * + * // A ring of available descriptor heads with free-running index. + * __u16 avail_flags; + * __u16 avail_idx; + * __u16 available[num]; + * __u16 used_event_idx; + * + * // Padding to the next align boundary. + * char pad[]; + * + * // A ring of used descriptor heads with free-running index. + * __u16 used_flags; + * __u16 used_idx; + * struct vring_used_elem used[num]; + * __u16 avail_event_idx; + * }; + * + * NOTE: for VirtIO PCI, align is 4096. + */ + +/* + * We publish the used event index at the end of the available ring, and vice + * versa. They are at the end for backwards compatibility. + */ +#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num]) +#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num]) + +static inline size_t +vring_size(unsigned int num, unsigned long align) +{ + size_t size; + + size = num * sizeof(struct vring_desc); + size += sizeof(struct vring_avail) + (num * sizeof(uint16_t)); + size = RTE_ALIGN_CEIL(size, align); + size += sizeof(struct vring_used) + + (num * sizeof(struct vring_used_elem)); + return size; +} + +static inline void +vring_init(struct vring *vr, unsigned int num, uint8_t *p, + unsigned long align) +{ + vr->num = num; + vr->desc = (struct vring_desc *) p; + vr->avail = (struct vring_avail *) (p + + num * sizeof(struct vring_desc)); + vr->used = (void *) + RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align); +} + +/* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * Assuming a given event_idx value from the other size, if we have + * just incremented index from old to new_idx, should we trigger an + * event? + */ +static inline int +vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); +} + +#endif /* _VIRTIO_RING_H_ */ diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c new file mode 100644 index 00000000..e32a1ecd --- /dev/null +++ b/drivers/crypto/virtio/virtio_rxtx.c @@ -0,0 +1,527 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ +#include + +#include "virtqueue.h" +#include "virtio_cryptodev.h" +#include "virtio_crypto_algs.h" + +static void +vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) +{ + struct vring_desc *dp, *dp_tail; + struct vq_desc_extra *dxp; + uint16_t desc_idx_last = desc_idx; + + dp = &vq->vq_ring.desc[desc_idx]; + dxp = &vq->vq_descx[desc_idx]; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); + if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { + while (dp->flags & VRING_DESC_F_NEXT) { + desc_idx_last = dp->next; + dp = &vq->vq_ring.desc[dp->next]; + } + } + dxp->ndescs = 0; + + /* + * We must append the existing free chain, if any, to the end of + * newly freed chain. If the virtqueue was completely used, then + * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). + */ + if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { + vq->vq_desc_head_idx = desc_idx; + } else { + dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; + dp_tail->next = desc_idx; + } + + vq->vq_desc_tail_idx = desc_idx_last; + dp->next = VQ_RING_DESC_CHAIN_END; +} + +static uint16_t +virtqueue_dequeue_burst_rx(struct virtqueue *vq, + struct rte_crypto_op **rx_pkts, uint16_t num) +{ + struct vring_used_elem *uep; + struct rte_crypto_op *cop; + uint16_t used_idx, desc_idx; + uint16_t i; + struct virtio_crypto_inhdr *inhdr; + struct virtio_crypto_op_cookie *op_cookie; + + /* Caller does the check */ + for (i = 0; i < num ; i++) { + used_idx = (uint16_t)(vq->vq_used_cons_idx + & (vq->vq_nentries - 1)); + uep = &vq->vq_ring.used->ring[used_idx]; + desc_idx = (uint16_t)uep->id; + cop = (struct rte_crypto_op *) + vq->vq_descx[desc_idx].crypto_op; + if (unlikely(cop == NULL)) { + VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no " + "mbuf cookie at %u", + vq->vq_used_cons_idx); + break; + } + + op_cookie = (struct virtio_crypto_op_cookie *) + vq->vq_descx[desc_idx].cookie; + inhdr = &(op_cookie->inhdr); + switch (inhdr->status) { + case VIRTIO_CRYPTO_OK: + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + break; + case VIRTIO_CRYPTO_ERR: + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_BADMSG: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_NOTSUPP: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + vq->packets_received_failed++; + break; + case VIRTIO_CRYPTO_INVSESS: + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + vq->packets_received_failed++; + break; + default: + break; + } + + vq->packets_received_total++; + + rx_pkts[i] = cop; + rte_mempool_put(vq->mpool, op_cookie); + + vq->vq_used_cons_idx++; + vq_ring_free_chain(vq, desc_idx); + vq->vq_descx[desc_idx].crypto_op = NULL; + } + + return i; +} + +static int +virtqueue_crypto_sym_pkt_header_arrange( + struct rte_crypto_op *cop, + struct virtio_crypto_op_data_req *data, + struct virtio_crypto_session *session) +{ + struct rte_crypto_sym_op *sym_op = cop->sym; + struct virtio_crypto_op_data_req *req_data = data; + struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl; + struct virtio_crypto_sym_create_session_req *sym_sess_req = + &ctrl->u.sym_create_session; + struct virtio_crypto_alg_chain_session_para *chain_para = + &sym_sess_req->u.chain.para; + struct virtio_crypto_cipher_session_para *cipher_para; + + req_data->header.session_id = session->session_id; + + switch (sym_sess_req->op_type) { + case VIRTIO_CRYPTO_SYM_OP_CIPHER: + req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER; + + cipher_para = &sym_sess_req->u.cipher.para; + if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT) + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT; + else + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT; + + req_data->u.sym_req.u.cipher.para.iv_len + = session->iv.length; + + req_data->u.sym_req.u.cipher.para.src_data_len = + (sym_op->cipher.data.length + + sym_op->cipher.data.offset); + req_data->u.sym_req.u.cipher.para.dst_data_len = + req_data->u.sym_req.u.cipher.para.src_data_len; + break; + case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING: + req_data->u.sym_req.op_type = + VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING; + + cipher_para = &chain_para->cipher_param; + if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT) + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT; + else + req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT; + + req_data->u.sym_req.u.chain.para.iv_len = session->iv.length; + req_data->u.sym_req.u.chain.para.aad_len = session->aad.length; + + req_data->u.sym_req.u.chain.para.src_data_len = + (sym_op->cipher.data.length + + sym_op->cipher.data.offset); + req_data->u.sym_req.u.chain.para.dst_data_len = + req_data->u.sym_req.u.chain.para.src_data_len; + req_data->u.sym_req.u.chain.para.cipher_start_src_offset = + sym_op->cipher.data.offset; + req_data->u.sym_req.u.chain.para.len_to_cipher = + sym_op->cipher.data.length; + req_data->u.sym_req.u.chain.para.hash_start_src_offset = + sym_op->auth.data.offset; + req_data->u.sym_req.u.chain.para.len_to_hash = + sym_op->auth.data.length; + req_data->u.sym_req.u.chain.para.aad_len = + chain_para->aad_len; + + if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) + req_data->u.sym_req.u.chain.para.hash_result_len = + chain_para->u.hash_param.hash_result_len; + if (chain_para->hash_mode == + VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) + req_data->u.sym_req.u.chain.para.hash_result_len = + chain_para->u.mac_param.hash_result_len; + break; + default: + return -1; + } + + return 0; +} + +static int +virtqueue_crypto_sym_enqueue_xmit( + struct virtqueue *txvq, + struct rte_crypto_op *cop) +{ + uint16_t idx = 0; + uint16_t num_entry; + uint16_t needed = 1; + uint16_t head_idx; + struct vq_desc_extra *dxp; + struct vring_desc *start_dp; + struct vring_desc *desc; + uint64_t indirect_op_data_req_phys_addr; + uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req); + uint32_t indirect_vring_addr_offset = req_data_len + + sizeof(struct virtio_crypto_inhdr); + uint32_t indirect_iv_addr_offset = indirect_vring_addr_offset + + sizeof(struct vring_desc) * NUM_ENTRY_VIRTIO_CRYPTO_OP; + struct rte_crypto_sym_op *sym_op = cop->sym; + struct virtio_crypto_session *session = + (struct virtio_crypto_session *)get_sym_session_private_data( + cop->sym->session, cryptodev_virtio_driver_id); + struct virtio_crypto_op_data_req *op_data_req; + uint32_t hash_result_len = 0; + struct virtio_crypto_op_cookie *crypto_op_cookie; + struct virtio_crypto_alg_chain_session_para *para; + + if (unlikely(sym_op->m_src->nb_segs != 1)) + return -EMSGSIZE; + if (unlikely(txvq->vq_free_cnt == 0)) + return -ENOSPC; + if (unlikely(txvq->vq_free_cnt < needed)) + return -EMSGSIZE; + head_idx = txvq->vq_desc_head_idx; + if (unlikely(head_idx >= txvq->vq_nentries)) + return -EFAULT; + if (unlikely(session == NULL)) + return -EFAULT; + + dxp = &txvq->vq_descx[head_idx]; + + if (rte_mempool_get(txvq->mpool, &dxp->cookie)) { + VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie"); + return -EFAULT; + } + crypto_op_cookie = dxp->cookie; + indirect_op_data_req_phys_addr = + rte_mempool_virt2iova(crypto_op_cookie); + op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie; + + if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session)) + return -EFAULT; + + /* status is initialized to VIRTIO_CRYPTO_ERR */ + ((struct virtio_crypto_inhdr *) + ((uint8_t *)op_data_req + req_data_len))->status = + VIRTIO_CRYPTO_ERR; + + /* point to indirect vring entry */ + desc = (struct vring_desc *) + ((uint8_t *)op_data_req + indirect_vring_addr_offset); + for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++) + desc[idx].next = idx + 1; + desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END; + + idx = 0; + + /* indirect vring: first part, virtio_crypto_op_data_req */ + desc[idx].addr = indirect_op_data_req_phys_addr; + desc[idx].len = req_data_len; + desc[idx++].flags = VRING_DESC_F_NEXT; + + /* indirect vring: iv of cipher */ + if (session->iv.length) { + if (cop->phys_addr) + desc[idx].addr = cop->phys_addr + session->iv.offset; + else { + rte_memcpy(crypto_op_cookie->iv, + rte_crypto_op_ctod_offset(cop, + uint8_t *, session->iv.offset), + session->iv.length); + desc[idx].addr = indirect_op_data_req_phys_addr + + indirect_iv_addr_offset; + } + + desc[idx].len = session->iv.length; + desc[idx++].flags = VRING_DESC_F_NEXT; + } + + /* indirect vring: additional auth data */ + if (session->aad.length) { + desc[idx].addr = session->aad.phys_addr; + desc[idx].len = session->aad.length; + desc[idx++].flags = VRING_DESC_F_NEXT; + } + + /* indirect vring: src data */ + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + desc[idx++].flags = VRING_DESC_F_NEXT; + + /* indirect vring: dst data */ + if (sym_op->m_dst) { + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + } else { + desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0); + desc[idx].len = (sym_op->cipher.data.offset + + sym_op->cipher.data.length); + } + desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT; + + /* indirect vring: digest result */ + para = &(session->ctrl.u.sym_create_session.u.chain.para); + if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN) + hash_result_len = para->u.hash_param.hash_result_len; + if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH) + hash_result_len = para->u.mac_param.hash_result_len; + if (hash_result_len > 0) { + desc[idx].addr = sym_op->auth.digest.phys_addr; + desc[idx].len = hash_result_len; + desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT; + } + + /* indirect vring: last part, status returned */ + desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len; + desc[idx].len = sizeof(struct virtio_crypto_inhdr); + desc[idx++].flags = VRING_DESC_F_WRITE; + + num_entry = idx; + + /* save the infos to use when receiving packets */ + dxp->crypto_op = (void *)cop; + dxp->ndescs = needed; + + /* use a single buffer */ + start_dp = txvq->vq_ring.desc; + start_dp[head_idx].addr = indirect_op_data_req_phys_addr + + indirect_vring_addr_offset; + start_dp[head_idx].len = num_entry * sizeof(struct vring_desc); + start_dp[head_idx].flags = VRING_DESC_F_INDIRECT; + + idx = start_dp[head_idx].next; + txvq->vq_desc_head_idx = idx; + if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + txvq->vq_desc_tail_idx = idx; + txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); + vq_update_avail_ring(txvq, head_idx); + + return 0; +} + +static int +virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq, + struct rte_crypto_op *cop) +{ + int ret; + + switch (cop->type) { + case RTE_CRYPTO_OP_TYPE_SYMMETRIC: + ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop); + break; + default: + VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u", + cop->type); + ret = -EFAULT; + break; + } + + return ret; +} + +static int +virtio_crypto_vring_start(struct virtqueue *vq) +{ + struct virtio_crypto_hw *hw = vq->hw; + int i, size = vq->vq_nentries; + struct vring *vr = &vq->vq_ring; + uint8_t *ring_mem = vq->vq_ring_virt_mem; + + PMD_INIT_FUNC_TRACE(); + + vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN); + vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1); + vq->vq_free_cnt = vq->vq_nentries; + + /* Chain all the descriptors in the ring with an END */ + for (i = 0; i < size - 1; i++) + vr->desc[i].next = (uint16_t)(i + 1); + vr->desc[i].next = VQ_RING_DESC_CHAIN_END; + + /* + * Disable device(host) interrupting guest + */ + virtqueue_disable_intr(vq); + + /* + * Set guest physical address of the virtqueue + * in VIRTIO_PCI_QUEUE_PFN config register of device + * to share with the backend + */ + if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) { + VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed"); + return -EINVAL; + } + + return 0; +} + +void +virtio_crypto_ctrlq_start(struct rte_cryptodev *dev) +{ + struct virtio_crypto_hw *hw = dev->data->dev_private; + + if (hw->cvq) { + virtio_crypto_vring_start(hw->cvq); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); + } +} + +void +virtio_crypto_dataq_start(struct rte_cryptodev *dev) +{ + /* + * Start data vrings + * - Setup vring structure for data queues + */ + uint16_t i; + struct virtio_crypto_hw *hw = dev->data->dev_private; + + PMD_INIT_FUNC_TRACE(); + + /* Start data vring. */ + for (i = 0; i < hw->max_dataqueues; i++) { + virtio_crypto_vring_start(dev->data->queue_pairs[i]); + VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]); + } +} + +/* vring size of data queue is 1024 */ +#define VIRTIO_MBUF_BURST_SZ 1024 + +uint16_t +virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *txvq = tx_queue; + uint16_t nb_used, num, nb_rx; + + nb_used = VIRTQUEUE_NUSED(txvq); + + virtio_rmb(); + + num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); + num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) + ? num : VIRTIO_MBUF_BURST_SZ); + + if (num == 0) + return 0; + + nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num); + VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num); + + return nb_rx; +} + +uint16_t +virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts, + uint16_t nb_pkts) +{ + struct virtqueue *txvq; + uint16_t nb_tx; + int error; + + if (unlikely(nb_pkts < 1)) + return nb_pkts; + if (unlikely(tx_queue == NULL)) { + VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL"); + return 0; + } + txvq = tx_queue; + + VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts); + + for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { + struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src; + /* nb_segs is always 1 at virtio crypto situation */ + int need = txm->nb_segs - txvq->vq_free_cnt; + + /* + * Positive value indicates it hasn't enough space in vring + * descriptors + */ + if (unlikely(need > 0)) { + /* + * try it again because the receive process may be + * free some space + */ + need = txm->nb_segs - txvq->vq_free_cnt; + if (unlikely(need > 0)) { + VIRTIO_CRYPTO_TX_LOG_DBG("No free tx " + "descriptors to transmit"); + break; + } + } + + txvq->packets_sent_total++; + + /* Enqueue Packet buffers */ + error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]); + if (unlikely(error)) { + if (error == ENOSPC) + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue Free count = 0"); + else if (error == EMSGSIZE) + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue Free count < 1"); + else + VIRTIO_CRYPTO_TX_LOG_ERR( + "virtqueue_enqueue error: %d", error); + txvq->packets_sent_failed++; + break; + } + } + + if (likely(nb_tx)) { + vq_update_avail_idx(txvq); + + if (unlikely(virtqueue_kick_prepare(txvq))) { + virtqueue_notify(txvq); + VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit"); + } + } + + return nb_tx; +} diff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c new file mode 100644 index 00000000..fd8be581 --- /dev/null +++ b/drivers/crypto/virtio/virtqueue.c @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#include + +#include +#include +#include + +#include "virtqueue.h" + +void +virtqueue_disable_intr(struct virtqueue *vq) +{ + /* + * Set VRING_AVAIL_F_NO_INTERRUPT to hint host + * not to interrupt when it consumes packets + * Note: this is only considered a hint to the host + */ + vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + +void +virtqueue_detatch_unused(struct virtqueue *vq) +{ + struct rte_crypto_op *cop = NULL; + + int idx; + + if (vq != NULL) + for (idx = 0; idx < vq->vq_nentries; idx++) { + cop = vq->vq_descx[idx].crypto_op; + if (cop) { + if (cop->sym->m_src) + rte_pktmbuf_free(cop->sym->m_src); + if (cop->sym->m_dst) + rte_pktmbuf_free(cop->sym->m_dst); + rte_crypto_op_free(cop); + vq->vq_descx[idx].crypto_op = NULL; + } + } +} diff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h new file mode 100644 index 00000000..bf10c657 --- /dev/null +++ b/drivers/crypto/virtio/virtqueue.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD. + */ + +#ifndef _VIRTQUEUE_H_ +#define _VIRTQUEUE_H_ + +#include + +#include +#include +#include +#include + +#include "virtio_pci.h" +#include "virtio_ring.h" +#include "virtio_logs.h" +#include "virtio_crypto.h" + +struct rte_mbuf; + +/* + * Per virtio_config.h in Linux. + * For virtio_pci on SMP, we don't need to order with respect to MMIO + * accesses through relaxed memory I/O windows, so smp_mb() et al are + * sufficient. + * + */ +#define virtio_mb() rte_smp_mb() +#define virtio_rmb() rte_smp_rmb() +#define virtio_wmb() rte_smp_wmb() + +#define VIRTQUEUE_MAX_NAME_SZ 32 + +enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 }; + +/** + * The maximum virtqueue size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. This is used to verify we are correctly + * handling vq_free_cnt. + */ +#define VQ_RING_DESC_CHAIN_END 32768 + +struct vq_desc_extra { + void *crypto_op; + void *cookie; + uint16_t ndescs; +}; + +struct virtqueue { + /**< virtio_crypto_hw structure pointer. */ + struct virtio_crypto_hw *hw; + /**< mem zone to populate RX ring. */ + const struct rte_memzone *mz; + /**< memzone to populate hdr and request. */ + struct rte_mempool *mpool; + uint8_t dev_id; /**< Device identifier. */ + uint16_t vq_queue_index; /**< PCI queue index */ + + void *vq_ring_virt_mem; /**< linear address of vring*/ + unsigned int vq_ring_size; + phys_addr_t vq_ring_mem; /**< physical address of vring */ + + struct vring vq_ring; /**< vring keeping desc, used and avail */ + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_nentries; /**< vring desc numbers */ + + /** + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + uint16_t vq_avail_idx; + + /* Statistics */ + uint64_t packets_sent_total; + uint64_t packets_sent_failed; + uint64_t packets_received_total; + uint64_t packets_received_failed; + + uint16_t *notify_addr; + + struct vq_desc_extra vq_descx[0]; +}; + +/** + * Tell the backend not to interrupt us. + */ +void virtqueue_disable_intr(struct virtqueue *vq); + +/** + * Get all mbufs to be freed. + */ +void virtqueue_detatch_unused(struct virtqueue *vq); + +static inline int +virtqueue_full(const struct virtqueue *vq) +{ + return vq->vq_free_cnt == 0; +} + +#define VIRTQUEUE_NUSED(vq) \ + ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) + +static inline void +vq_update_avail_idx(struct virtqueue *vq) +{ + virtio_wmb(); + vq->vq_ring.avail->idx = vq->vq_avail_idx; +} + +static inline void +vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) +{ + uint16_t avail_idx; + /* + * Place the head of the descriptor chain into the next slot and make + * it usable to the host. The chain is made available now rather than + * deferring to virtqueue_notify() in the hopes that if the host is + * currently running on another CPU, we can keep it processing the new + * descriptor. + */ + avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); + if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx)) + vq->vq_ring.avail->ring[avail_idx] = desc_idx; + vq->vq_avail_idx++; +} + +static inline int +virtqueue_kick_prepare(struct virtqueue *vq) +{ + return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); +} + +static inline void +virtqueue_notify(struct virtqueue *vq) +{ + /* + * Ensure updated avail->idx is visible to host. + * For virtio on IA, the notificaiton is through io port operation + * which is a serialization instruction itself. + */ + VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); +} + +/** + * Dump virtqueue internal structures, for debug purpose only. + */ +#define VIRTQUEUE_DUMP(vq) do { \ + uint16_t used_idx, nused; \ + used_idx = (vq)->vq_ring.used->idx; \ + nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ + VIRTIO_CRYPTO_INIT_LOG_DBG(\ + "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ + " avail.flags=0x%x; used.flags=0x%x", \ + (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ + (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ + (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ + (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ +} while (0) + +#endif /* _VIRTQUEUE_H_ */ diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c index ad65b80c..313f4590 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd.c +++ b/drivers/crypto/zuc/rte_zuc_pmd.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -11,8 +11,7 @@ #include #include "rte_zuc_pmd_private.h" - -#define ZUC_MAX_BURST 8 +#define ZUC_MAX_BURST 4 #define BYTE_LEN 8 static uint8_t cryptodev_driver_id; @@ -78,7 +77,7 @@ zuc_set_session_parameters(struct zuc_session *sess, break; case ZUC_OP_NOT_SUPPORTED: default: - ZUC_LOG_ERR("Unsupported operation chain order parameter"); + ZUC_LOG(ERR, "Unsupported operation chain order parameter"); return -ENOTSUP; } @@ -88,7 +87,7 @@ zuc_set_session_parameters(struct zuc_session *sess, return -ENOTSUP; if (cipher_xform->cipher.iv.length != ZUC_IV_KEY_LENGTH) { - ZUC_LOG_ERR("Wrong IV length"); + ZUC_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->cipher_iv_offset = cipher_xform->cipher.iv.offset; @@ -104,14 +103,14 @@ zuc_set_session_parameters(struct zuc_session *sess, return -ENOTSUP; if (auth_xform->auth.digest_length != ZUC_DIGEST_LENGTH) { - ZUC_LOG_ERR("Wrong digest length"); + ZUC_LOG(ERR, "Wrong digest length"); return -EINVAL; } sess->auth_op = auth_xform->auth.op; if (auth_xform->auth.iv.length != ZUC_IV_KEY_LENGTH) { - ZUC_LOG_ERR("Wrong IV length"); + ZUC_LOG(ERR, "Wrong IV length"); return -EINVAL; } sess->auth_iv_offset = auth_xform->auth.iv.offset; @@ -135,7 +134,7 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) { if (likely(op->sym->session != NULL)) - sess = (struct zuc_session *)get_session_private_data( + sess = (struct zuc_session *)get_sym_session_private_data( op->sym->session, cryptodev_driver_id); } else { @@ -157,8 +156,8 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) sess = NULL; } op->sym->session = (struct rte_cryptodev_sym_session *)_sess; - set_session_private_data(op->sym->session, cryptodev_driver_id, - _sess_private_data); + set_sym_session_private_data(op->sym->session, + cryptodev_driver_id, _sess_private_data); } if (unlikely(sess == NULL)) @@ -168,10 +167,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) return sess; } -/** Encrypt/decrypt mbufs with same cipher key. */ +/** Encrypt/decrypt mbufs. */ static uint8_t process_zuc_cipher_op(struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -180,22 +179,25 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, uint8_t *iv[ZUC_MAX_BURST]; uint32_t num_bytes[ZUC_MAX_BURST]; uint8_t *cipher_keys[ZUC_MAX_BURST]; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0) || ((ops[i]->sym->cipher.data.offset % BYTE_LEN) != 0)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - ZUC_LOG_ERR("Data Length or offset"); + ZUC_LOG(ERR, "Data Length or offset"); break; } + sess = sessions[i]; + #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || (ops[i]->sym->m_dst != NULL && !rte_pktmbuf_is_contiguous( ops[i]->sym->m_dst))) { - ZUC_LOG_ERR("PMD supports only contiguous mbufs, " + ZUC_LOG(ERR, "PMD supports only contiguous mbufs, " "op (%p) provides noncontiguous mbuf as " "source/destination buffer.\n", ops[i]); ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; @@ -211,10 +213,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->cipher_iv_offset); + sess->cipher_iv_offset); num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; - cipher_keys[i] = session->pKey_cipher; + cipher_keys[i] = sess->pKey_cipher; processed_ops++; } @@ -225,10 +227,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, return processed_ops; } -/** Generate/verify hash from mbufs with same hash key. */ +/** Generate/verify hash from mbufs. */ static int process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -237,26 +239,29 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, uint32_t *dst; uint32_t length_in_bits; uint8_t *iv; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { /* Data must be byte aligned */ if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - ZUC_LOG_ERR("Offset"); + ZUC_LOG(ERR, "Offset"); break; } + sess = sessions[i]; + length_in_bits = ops[i]->sym->auth.data.length; src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->auth.data.offset >> 3); iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->auth_iv_offset); + sess->auth_iv_offset); - if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { + if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { dst = (uint32_t *)qp->temp_digest; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); /* Verify digest. */ @@ -266,7 +271,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, } else { dst = (uint32_t *)ops[i]->sym->auth.digest.data; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); } @@ -276,33 +281,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, return processed_ops; } -/** Process a batch of crypto ops which shares the same session. */ +/** Process a batch of crypto ops which shares the same operation type. */ static int -process_ops(struct rte_crypto_op **ops, struct zuc_session *session, +process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type, + struct zuc_session **sessions, struct zuc_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) { unsigned i; unsigned enqueued_ops, processed_ops; - switch (session->op) { + switch (op_type) { case ZUC_OP_ONLY_CIPHER: processed_ops = process_zuc_cipher_op(ops, - session, num_ops); + sessions, num_ops); break; case ZUC_OP_ONLY_AUTH: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); break; case ZUC_OP_CIPHER_AUTH: - processed_ops = process_zuc_cipher_op(ops, session, + processed_ops = process_zuc_cipher_op(ops, sessions, num_ops); - process_zuc_hash_op(qp, ops, session, processed_ops); + process_zuc_hash_op(qp, ops, sessions, processed_ops); break; case ZUC_OP_AUTH_CIPHER: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); - process_zuc_cipher_op(ops, session, processed_ops); + process_zuc_cipher_op(ops, sessions, processed_ops); break; default: /* Operation not supported. */ @@ -318,10 +324,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session, ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; /* Free session if a session-less crypto op. */ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - memset(session, 0, sizeof(struct zuc_session)); + memset(sessions[i], 0, sizeof(struct zuc_session)); memset(ops[i]->sym->session, 0, - rte_cryptodev_get_header_session_size()); - rte_mempool_put(qp->sess_mp, session); + rte_cryptodev_sym_get_header_session_size()); + rte_mempool_put(qp->sess_mp, sessions[i]); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; } @@ -342,7 +348,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct rte_crypto_op *c_ops[ZUC_MAX_BURST]; struct rte_crypto_op *curr_c_op; - struct zuc_session *prev_sess = NULL, *curr_sess = NULL; + struct zuc_session *curr_sess; + struct zuc_session *sessions[ZUC_MAX_BURST]; + enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED; + enum zuc_operation curr_zuc_op; struct zuc_qp *qp = queue_pair; unsigned i; uint8_t burst_size = 0; @@ -352,61 +361,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, for (i = 0; i < nb_ops; i++) { curr_c_op = ops[i]; - /* Set status as enqueued (not processed yet) by default. */ - curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - curr_sess = zuc_get_session(qp, curr_c_op); - if (unlikely(curr_sess == NULL || - curr_sess->op == ZUC_OP_NOT_SUPPORTED)) { + if (unlikely(curr_sess == NULL)) { curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; break; } - /* Batch ops that share the same session. */ - if (prev_sess == NULL) { - prev_sess = curr_sess; - c_ops[burst_size++] = curr_c_op; - } else if (curr_sess == prev_sess) { - c_ops[burst_size++] = curr_c_op; + curr_zuc_op = curr_sess->op; + + /* + * Batch ops that share the same operation type + * (cipher only, auth only...). + */ + if (burst_size == 0) { + prev_zuc_op = curr_zuc_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; + } else if (curr_zuc_op == prev_zuc_op) { + c_ops[burst_size] = curr_c_op; + sessions[burst_size] = curr_sess; + burst_size++; /* * When there are enough ops to process in a batch, * process them, and start a new batch. */ if (burst_size == ZUC_MAX_BURST) { - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, curr_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = NULL; } } else { /* - * Different session, process the ops - * of the previous session. + * Different operation type, process the ops + * of the previous type. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = curr_sess; + prev_zuc_op = curr_zuc_op; - c_ops[burst_size++] = curr_c_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; } } if (burst_size != 0) { - /* Process the crypto ops of the last session. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + /* Process the crypto ops of the last operation type. */ + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); } qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; @@ -442,7 +460,7 @@ cryptodev_zuc_create(const char *name, dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params); if (dev == NULL) { - ZUC_LOG_ERR("failed to create cryptodev vdev"); + ZUC_LOG(ERR, "failed to create cryptodev vdev"); goto init_error; } @@ -460,11 +478,10 @@ cryptodev_zuc_create(const char *name, internals = dev->data->dev_private; internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; - internals->max_nb_sessions = init_params->max_nb_sessions; return 0; init_error: - ZUC_LOG_ERR("driver %s: cryptodev_zuc_create failed", + ZUC_LOG(ERR, "driver %s: failed", init_params->name); cryptodev_zuc_remove(vdev); @@ -478,8 +495,7 @@ cryptodev_zuc_probe(struct rte_vdev_device *vdev) "", sizeof(struct zuc_private), rte_socket_id(), - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, - RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS + RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS }; const char *name; const char *input_args; @@ -522,7 +538,11 @@ static struct cryptodev_driver zuc_crypto_drv; RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_ZUC_PMD, cryptodev_zuc_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD, "max_nb_queue_pairs= " - "max_nb_sessions= " "socket_id="); -RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv, +RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver, cryptodev_driver_id); + +RTE_INIT(zuc_init_log) +{ + zuc_logtype_driver = rte_log_register("pmd.crypto.zuc"); +} diff --git a/drivers/crypto/zuc/rte_zuc_pmd_ops.c b/drivers/crypto/zuc/rte_zuc_pmd_ops.c index 8abac898..6da39654 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd_ops.c +++ b/drivers/crypto/zuc/rte_zuc_pmd_ops.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #include @@ -130,7 +130,8 @@ zuc_pmd_info_get(struct rte_cryptodev *dev, if (dev_info != NULL) { dev_info->driver_id = dev->driver_id; dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; - dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + /* No limit of number of sessions */ + dev_info->sym.max_nb_sessions = 0; dev_info->feature_flags = dev->feature_flags; dev_info->capabilities = zuc_pmd_capabilities; } @@ -172,13 +173,13 @@ zuc_pmd_qp_create_processed_ops_ring(struct zuc_qp *qp, r = rte_ring_lookup(qp->name); if (r) { if (rte_ring_get_size(r) >= ring_size) { - ZUC_LOG_INFO("Reusing existing ring %s" + ZUC_LOG(INFO, "Reusing existing ring %s" " for processed packets", qp->name); return r; } - ZUC_LOG_ERR("Unable to reuse existing ring %s" + ZUC_LOG(ERR, "Unable to reuse existing ring %s" " for processed packets", qp->name); return NULL; @@ -230,22 +231,6 @@ qp_setup_cleanup: return -1; } -/** Start queue pair */ -static int -zuc_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - -/** Stop queue pair */ -static int -zuc_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, - __rte_unused uint16_t queue_pair_id) -{ - return -ENOTSUP; -} - /** Return the number of allocated queue pairs */ static uint32_t zuc_pmd_qp_count(struct rte_cryptodev *dev) @@ -255,14 +240,14 @@ zuc_pmd_qp_count(struct rte_cryptodev *dev) /** Returns the size of the ZUC session structure */ static unsigned -zuc_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +zuc_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused) { return sizeof(struct zuc_session); } /** Configure a ZUC session from a crypto xform chain */ static int -zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, +zuc_pmd_sym_session_configure(struct rte_cryptodev *dev __rte_unused, struct rte_crypto_sym_xform *xform, struct rte_cryptodev_sym_session *sess, struct rte_mempool *mempool) @@ -271,26 +256,27 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, int ret; if (unlikely(sess == NULL)) { - ZUC_LOG_ERR("invalid session struct"); + ZUC_LOG(ERR, "invalid session struct"); return -EINVAL; } if (rte_mempool_get(mempool, &sess_private_data)) { - CDEV_LOG_ERR( + ZUC_LOG(ERR, "Couldn't get object from session mempool"); + return -ENOMEM; } ret = zuc_set_session_parameters(sess_private_data, xform); if (ret != 0) { - ZUC_LOG_ERR("failed configure session parameters"); + ZUC_LOG(ERR, "failed configure session parameters"); /* Return session to mempool */ rte_mempool_put(mempool, sess_private_data); return ret; } - set_session_private_data(sess, dev->driver_id, + set_sym_session_private_data(sess, dev->driver_id, sess_private_data); return 0; @@ -298,17 +284,17 @@ zuc_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, /** Clear the memory of session so it doesn't leave key material behind */ static void -zuc_pmd_session_clear(struct rte_cryptodev *dev, +zuc_pmd_sym_session_clear(struct rte_cryptodev *dev, struct rte_cryptodev_sym_session *sess) { uint8_t index = dev->driver_id; - void *sess_priv = get_session_private_data(sess, index); + void *sess_priv = get_sym_session_private_data(sess, index); /* Zero out the whole structure */ if (sess_priv) { memset(sess_priv, 0, sizeof(struct zuc_session)); struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv); - set_session_private_data(sess, index, NULL); + set_sym_session_private_data(sess, index, NULL); rte_mempool_put(sess_mp, sess_priv); } } @@ -326,13 +312,11 @@ struct rte_cryptodev_ops zuc_pmd_ops = { .queue_pair_setup = zuc_pmd_qp_setup, .queue_pair_release = zuc_pmd_qp_release, - .queue_pair_start = zuc_pmd_qp_start, - .queue_pair_stop = zuc_pmd_qp_stop, .queue_pair_count = zuc_pmd_qp_count, - .session_get_size = zuc_pmd_session_get_size, - .session_configure = zuc_pmd_session_configure, - .session_clear = zuc_pmd_session_clear + .sym_session_get_size = zuc_pmd_sym_session_get_size, + .sym_session_configure = zuc_pmd_sym_session_configure, + .sym_session_clear = zuc_pmd_sym_session_clear }; struct rte_cryptodev_ops *rte_zuc_pmd_ops = &zuc_pmd_ops; diff --git a/drivers/crypto/zuc/rte_zuc_pmd_private.h b/drivers/crypto/zuc/rte_zuc_pmd_private.h index b83c4a04..5e5906dd 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd_private.h +++ b/drivers/crypto/zuc/rte_zuc_pmd_private.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2016-2017 Intel Corporation + * Copyright(c) 2016-2018 Intel Corporation */ #ifndef _RTE_ZUC_PMD_PRIVATE_H_ @@ -10,25 +10,12 @@ #define CRYPTODEV_NAME_ZUC_PMD crypto_zuc /**< KASUMI PMD device name */ -#define ZUC_LOG_ERR(fmt, args...) \ - RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) - -#ifdef RTE_LIBRTE_ZUC_DEBUG -#define ZUC_LOG_INFO(fmt, args...) \ - RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) - -#define ZUC_LOG_DBG(fmt, args...) \ - RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ - RTE_STR(CRYPTODEV_NAME_ZUC_PMD), \ - __func__, __LINE__, ## args) -#else -#define ZUC_LOG_INFO(fmt, args...) -#define ZUC_LOG_DBG(fmt, args...) -#endif +/** ZUC PMD LOGTYPE DRIVER */ +int zuc_logtype_driver; +#define ZUC_LOG(level, fmt, ...) \ + rte_log(RTE_LOG_ ## level, zuc_logtype_driver, \ + "%s()... line %u: " fmt "\n", __func__, __LINE__, \ + ## __VA_ARGS__) #define ZUC_IV_KEY_LENGTH 16 #define ZUC_DIGEST_LENGTH 4 @@ -37,8 +24,6 @@ struct zuc_private { unsigned max_nb_queue_pairs; /**< Max number of queue pairs supported by device */ - unsigned max_nb_sessions; - /**< Max number of sessions supported by device */ }; /** ZUC buffer queue pair */ -- cgit 1.2.3-korg