aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/Makefile8
-rw-r--r--drivers/crypto/aesni_gcm/Makefile10
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_ops.h3
-rw-r--r--drivers/crypto/aesni_gcm/aesni_gcm_pmd.c6
-rw-r--r--drivers/crypto/aesni_mb/Makefile10
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h31
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c43
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c23
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h20
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd.c4
-rw-r--r--drivers/crypto/armv8/rte_armv8_pmd_ops.c8
-rw-r--r--drivers/crypto/ccp/Makefile35
-rw-r--r--drivers/crypto/ccp/ccp_crypto.c2951
-rw-r--r--drivers/crypto/ccp/ccp_crypto.h388
-rw-r--r--drivers/crypto/ccp/ccp_dev.c810
-rw-r--r--drivers/crypto/ccp/ccp_dev.h495
-rw-r--r--drivers/crypto/ccp/ccp_pci.c236
-rw-r--r--drivers/crypto/ccp/ccp_pci.h27
-rw-r--r--drivers/crypto/ccp/ccp_pmd_ops.c848
-rw-r--r--drivers/crypto/ccp/ccp_pmd_private.h108
-rw-r--r--drivers/crypto/ccp/meson.build21
-rw-r--r--drivers/crypto/ccp/rte_ccp_pmd.c411
-rw-r--r--drivers/crypto/ccp/rte_pmd_ccp_version.map4
-rw-r--r--drivers/crypto/dpaa2_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c514
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h62
-rw-r--r--drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h26
-rw-r--r--drivers/crypto/dpaa2_sec/meson.build14
-rw-r--r--drivers/crypto/dpaa_sec/Makefile5
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.c314
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec.h35
-rw-r--r--drivers/crypto/dpaa_sec/dpaa_sec_log.h65
-rw-r--r--drivers/crypto/dpaa_sec/meson.build13
-rw-r--r--drivers/crypto/kasumi/rte_kasumi_pmd.c6
-rw-r--r--drivers/crypto/meson.build4
-rw-r--r--drivers/crypto/mrvl/Makefile67
-rw-r--r--drivers/crypto/mrvl/rte_mrvl_compat.h51
-rw-r--r--drivers/crypto/mvsam/Makefile42
-rw-r--r--drivers/crypto/mvsam/meson.build21
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_compat.h23
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd.c)38
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_ops.c (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_ops.c)36
-rw-r--r--drivers/crypto/mvsam/rte_mrvl_pmd_private.h (renamed from drivers/crypto/mrvl/rte_mrvl_pmd_private.h)38
-rw-r--r--drivers/crypto/mvsam/rte_pmd_mvsam_version.map (renamed from drivers/crypto/mrvl/rte_pmd_mrvl_version.map)0
-rw-r--r--drivers/crypto/null/null_crypto_pmd.c2
-rw-r--r--drivers/crypto/openssl/rte_openssl_pmd.c6
-rw-r--r--drivers/crypto/qat/Makefile2
-rw-r--r--drivers/crypto/qat/meson.build2
-rw-r--r--drivers/crypto/qat/qat_adf/adf_transport_access_macros.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_fw_la.h47
-rw-r--r--drivers/crypto/qat/qat_adf/icp_qat_hw.h47
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs.h47
-rw-r--r--drivers/crypto/qat/qat_adf/qat_algs_build_desc.c48
-rw-r--r--drivers/crypto/qat/qat_crypto.c2
-rw-r--r--drivers/crypto/qat/qat_crypto.h2
-rw-r--r--drivers/crypto/qat/qat_crypto_capabilities.h2
-rw-r--r--drivers/crypto/qat/qat_logs.h2
-rw-r--r--drivers/crypto/qat/qat_qp.c25
-rw-r--r--drivers/crypto/qat/rte_qat_cryptodev.c6
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.c8
-rw-r--r--drivers/crypto/scheduler/rte_cryptodev_scheduler.h2
-rw-r--r--drivers/crypto/scheduler/scheduler_multicore.c53
-rw-r--r--drivers/crypto/scheduler/scheduler_pkt_size_distr.c4
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd.c70
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_ops.c9
-rw-r--r--drivers/crypto/scheduler/scheduler_pmd_private.h2
-rw-r--r--drivers/crypto/snow3g/rte_snow3g_pmd.c6
-rw-r--r--drivers/crypto/virtio/Makefile35
-rw-r--r--drivers/crypto/virtio/meson.build8
-rw-r--r--drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.h28
-rw-r--r--drivers/crypto/virtio/virtio_crypto_capabilities.h51
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.c1504
-rw-r--r--drivers/crypto/virtio/virtio_cryptodev.h61
-rw-r--r--drivers/crypto/virtio/virtio_logs.h91
-rw-r--r--drivers/crypto/virtio/virtio_pci.c462
-rw-r--r--drivers/crypto/virtio/virtio_pci.h253
-rw-r--r--drivers/crypto/virtio/virtio_ring.h137
-rw-r--r--drivers/crypto/virtio/virtio_rxtx.c515
-rw-r--r--drivers/crypto/virtio/virtqueue.c43
-rw-r--r--drivers/crypto/virtio/virtqueue.h171
-rw-r--r--drivers/crypto/zuc/rte_zuc_pmd.c115
83 files changed, 10676 insertions, 1168 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 628bd142..1d0c88ef 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -6,15 +6,21 @@ include $(RTE_SDK)/mk/rte.vars.mk
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ARMV8_CRYPTO) += armv8
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
DIRS-$(CONFIG_RTE_LIBRTE_PMD_OPENSSL) += openssl
DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat
DIRS-$(CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER) += scheduler
DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g
DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi
DIRS-$(CONFIG_RTE_LIBRTE_PMD_ZUC) += zuc
-DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += mvsam
DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ifeq ($(CONFIG_RTE_EAL_VFIO)$(CONFIG_RTE_LIBRTE_FSLMC_BUS),yy)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+endif
+ifeq ($(CONFIG_RTE_LIBRTE_DPAA_BUS),y)
DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
+endif
+DIRS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio
include $(RTE_SDK)/mk/rte.subdir.mk
diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile
index d06c5444..0a5c1a87 100644
--- a/drivers/crypto/aesni_gcm/Makefile
+++ b/drivers/crypto/aesni_gcm/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_gcm.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_gcm_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
index 59e504ee..45061669 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <gcm_defines.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
/** Supported vector modes */
enum aesni_gcm_vector_mode {
diff --git a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
index 83e54480..80360dd9 100644
--- a/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
+++ b/drivers/crypto/aesni_gcm/aesni_gcm_pmd.c
@@ -352,7 +352,7 @@ post_process_gcm_crypto_op(struct aesni_gcm_qp *qp,
session->op == AESNI_GMAC_OP_VERIFY) {
uint8_t *digest;
- uint8_t *tag = (uint8_t *)&qp->temp_digest;
+ uint8_t *tag = qp->temp_digest;
if (session->op == AESNI_GMAC_OP_VERIFY)
digest = op->sym->auth.digest.data;
@@ -392,7 +392,7 @@ handle_completed_gcm_crypto_op(struct aesni_gcm_qp *qp,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_gcm_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -570,5 +570,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_GCM_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_gcm_crypto_drv, aesni_gcm_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index d9f8fb98..806a95eb 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_mb.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 4d596e85..5a1cba6c 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <mb_mgr.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
@@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_256_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-
typedef void (*aes_xcbc_expand_key_t)
(const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -78,9 +80,12 @@ struct aesni_mb_op_fns {
/**< AES192 key expansions */
aes_keyexp_256_t aes256;
/**< AES256 key expansions */
-
aes_xcbc_expand_key_t aes_xcbc;
- /**< AES XCBC key expansions */
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
} keyexp;
/**< Key expansion functions */
} aux;
@@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_sse,
aes_keyexp_192_sse,
aes_keyexp_256_sse,
- aes_xcbc_expand_key_sse
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
}
}
},
@@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx,
aes_keyexp_192_avx,
aes_keyexp_256_avx,
- aes_xcbc_expand_key_avx
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
}
}
},
@@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx2,
aes_keyexp_192_avx2,
aes_keyexp_256_avx2,
- aes_xcbc_expand_key_avx2
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
}
}
},
@@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx512,
aes_keyexp_192_avx512,
aes_keyexp_256_avx512,
- aes_xcbc_expand_key_avx512
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
}
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 636c6c37..bb35c66a 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2,7 +2,7 @@
* Copyright(c) 2015-2017 Intel Corporation
*/
-#include <des.h>
+#include <intel-ipsec-mb.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -338,16 +349,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -366,13 +380,13 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
@@ -517,15 +531,20 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
if (job->hash_alg == AES_XCBC) {
- job->_k1_expanded = session->auth.xcbc.k1_expanded;
- job->_k2 = session->auth.xcbc.k2;
- job->_k3 = session->auth.xcbc.k3;
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
} else if (job->hash_alg == AES_CCM) {
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
} else {
- job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
- job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
}
/* Mutable crypto operation parameters */
@@ -568,11 +587,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM)
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
else
- job->auth_tag_output_len_in_bytes = session->aead.digest_len;
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
/* Set IV parameters */
@@ -663,7 +682,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_mb_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -931,5 +950,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
- cryptodev_aesni_mb_pmd_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 9d685a09..01530523 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -289,8 +289,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
-
-
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 948e091c..a33b2f69 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -66,8 +66,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [AES_CMAC] = 16,
[AES_CCM] = 8,
- [NULL_HASH] = 0
+ [NULL_HASH] = 0
};
/**
@@ -91,7 +92,8 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_384] = 48,
[SHA_512] = 64,
[AES_XCBC] = 16,
- [NULL_HASH] = 0
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
};
/**
@@ -211,14 +213,24 @@ struct aesni_mb_session {
uint8_t k3[16] __rte_aligned(16);
/**< k3. */
} xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
/**< Expanded XCBC authentication keys */
};
+ /** digest size */
+ uint16_t digest_len;
+
} auth;
struct {
/** AAD data length */
uint16_t aad_len;
- /** digest size */
- uint16_t digest_len;
} aead;
} __rte_cache_aligned;
diff --git a/drivers/crypto/armv8/rte_armv8_pmd.c b/drivers/crypto/armv8/rte_armv8_pmd.c
index 59fffcf1..fbb08f72 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd.c
@@ -654,7 +654,7 @@ process_op(struct armv8_crypto_qp *qp, struct rte_crypto_op *op,
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct armv8_crypto_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -850,5 +850,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ARMV8_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(armv8_crypto_drv, armv8_crypto_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/armv8/rte_armv8_pmd_ops.c b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
index 3817ad7b..c64aef09 100644
--- a/drivers/crypto/armv8/rte_armv8_pmd_ops.c
+++ b/drivers/crypto/armv8/rte_armv8_pmd_ops.c
@@ -27,9 +27,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -48,9 +48,9 @@ static const struct rte_cryptodev_capabilities
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
new file mode 100644
index 00000000..f51d170f
--- /dev/null
+++ b/drivers/crypto/ccp/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# library name
+LIB = librte_pmd_ccp.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += -I$(SRCDIR)
+CFLAGS += $(WERROR_FLAGS)
+
+# library version
+LIBABIVER := 1
+
+# external library include paths
+LDLIBS += -lcrypto
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_bus_vdev
+LDLIBS += -lrte_kvargs
+
+# versioning export map
+EXPORT_MAP := rte_pmd_ccp_version.map
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
new file mode 100644
index 00000000..3ce0f39f
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.c
@@ -0,0 +1,2951 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <openssl/sha.h>
+#include <openssl/cmac.h> /*sub key apis*/
+#include <openssl/evp.h> /*sub key apis*/
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev_pmd.h>
+
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+#include <openssl/conf.h>
+#include <openssl/err.h>
+#include <openssl/hmac.h>
+
+/* SHA initial context values */
+static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+ SHA1_H2, SHA1_H1,
+ SHA1_H0, 0x0U,
+ 0x0U, 0x0U,
+};
+
+uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA224_H7, SHA224_H6,
+ SHA224_H5, SHA224_H4,
+ SHA224_H3, SHA224_H2,
+ SHA224_H1, SHA224_H0,
+};
+
+uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA256_H7, SHA256_H6,
+ SHA256_H5, SHA256_H4,
+ SHA256_H3, SHA256_H2,
+ SHA256_H1, SHA256_H0,
+};
+
+uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA384_H7, SHA384_H6,
+ SHA384_H5, SHA384_H4,
+ SHA384_H3, SHA384_H2,
+ SHA384_H1, SHA384_H0,
+};
+
+uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H7, SHA512_H6,
+ SHA512_H5, SHA512_H4,
+ SHA512_H3, SHA512_H2,
+ SHA512_H1, SHA512_H0,
+};
+
+#if defined(_MSC_VER)
+#define SHA3_CONST(x) x
+#else
+#define SHA3_CONST(x) x##L
+#endif
+
+/** 'Words' here refers to uint64_t */
+#define SHA3_KECCAK_SPONGE_WORDS \
+ (((1600) / 8) / sizeof(uint64_t))
+typedef struct sha3_context_ {
+ uint64_t saved;
+ /**
+ * The portion of the input message that we
+ * didn't consume yet
+ */
+ union {
+ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
+ /* Keccak's state */
+ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
+ /**total 200 ctx size**/
+ };
+ unsigned int byteIndex;
+ /**
+ * 0..7--the next byte after the set one
+ * (starts from 0; 0--none are buffered)
+ */
+ unsigned int wordIndex;
+ /**
+ * 0..24--the next word to integrate input
+ * (starts from 0)
+ */
+ unsigned int capacityWords;
+ /**
+ * the double size of the hash output in
+ * words (e.g. 16 for Keccak 512)
+ */
+} sha3_context;
+
+#ifndef SHA3_ROTL64
+#define SHA3_ROTL64(x, y) \
+ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
+#endif
+
+static const uint64_t keccakf_rndc[24] = {
+ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
+ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
+ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
+ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
+ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
+ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
+ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
+ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
+ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
+ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
+ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
+};
+
+static const unsigned int keccakf_rotc[24] = {
+ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
+ 18, 39, 61, 20, 44
+};
+
+static const unsigned int keccakf_piln[24] = {
+ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
+ 14, 22, 9, 6, 1
+};
+
+static enum ccp_cmd_order
+ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+{
+ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
+
+ if (xform == NULL)
+ return res;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ if (xform->next == NULL)
+ return CCP_CMD_AUTH;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return CCP_CMD_HASH_CIPHER;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ if (xform->next == NULL)
+ return CCP_CMD_CIPHER;
+ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return CCP_CMD_CIPHER_HASH;
+ }
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
+ return CCP_CMD_COMBINED;
+ return res;
+}
+
+/* partial hash using openssl */
+static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA_CTX ctx;
+
+ if (!SHA1_Init(&ctx))
+ return -EFAULT;
+ SHA1_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA224_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA256_CTX ctx;
+
+ if (!SHA256_Init(&ctx))
+ return -EFAULT;
+ SHA256_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA256_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA384_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+{
+ SHA512_CTX ctx;
+
+ if (!SHA512_Init(&ctx))
+ return -EFAULT;
+ SHA512_Transform(&ctx, data_in);
+ rte_memcpy(data_out, &ctx,
+ SHA512_DIGEST_LENGTH);
+ return 0;
+}
+
+static void
+keccakf(uint64_t s[25])
+{
+ int i, j, round;
+ uint64_t t, bc[5];
+#define KECCAK_ROUNDS 24
+
+ for (round = 0; round < KECCAK_ROUNDS; round++) {
+
+ /* Theta */
+ for (i = 0; i < 5; i++)
+ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
+ s[i + 20];
+
+ for (i = 0; i < 5; i++) {
+ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
+ for (j = 0; j < 25; j += 5)
+ s[j + i] ^= t;
+ }
+
+ /* Rho Pi */
+ t = s[1];
+ for (i = 0; i < 24; i++) {
+ j = keccakf_piln[i];
+ bc[0] = s[j];
+ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
+ t = bc[0];
+ }
+
+ /* Chi */
+ for (j = 0; j < 25; j += 5) {
+ for (i = 0; i < 5; i++)
+ bc[i] = s[j + i];
+ for (i = 0; i < 5; i++)
+ s[j + i] ^= (~bc[(i + 1) % 5]) &
+ bc[(i + 2) % 5];
+ }
+
+ /* Iota */
+ s[0] ^= keccakf_rndc[round];
+ }
+}
+
+static void
+sha3_Init224(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init256(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init384(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
+}
+
+static void
+sha3_Init512(void *priv)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+
+ memset(ctx, 0, sizeof(*ctx));
+ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
+}
+
+
+/* This is simply the 'update' with the padding block.
+ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
+ * bytes are always present, but they can be the same byte.
+ */
+static void
+sha3_Update(void *priv, void const *bufIn, size_t len)
+{
+ sha3_context *ctx = (sha3_context *) priv;
+ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
+ size_t words;
+ unsigned int tail;
+ size_t i;
+ const uint8_t *buf = bufIn;
+
+ if (len < old_tail) {
+ while (len--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+ return;
+ }
+
+ if (old_tail) {
+ len -= old_tail;
+ while (old_tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) <<
+ ((ctx->byteIndex++) * 8);
+
+ ctx->s[ctx->wordIndex] ^= ctx->saved;
+ ctx->byteIndex = 0;
+ ctx->saved = 0;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ words = len / sizeof(uint64_t);
+ tail = len - words * sizeof(uint64_t);
+
+ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
+ const uint64_t t = (uint64_t) (buf[0]) |
+ ((uint64_t) (buf[1]) << 8 * 1) |
+ ((uint64_t) (buf[2]) << 8 * 2) |
+ ((uint64_t) (buf[3]) << 8 * 3) |
+ ((uint64_t) (buf[4]) << 8 * 4) |
+ ((uint64_t) (buf[5]) << 8 * 5) |
+ ((uint64_t) (buf[6]) << 8 * 6) |
+ ((uint64_t) (buf[7]) << 8 * 7);
+ ctx->s[ctx->wordIndex] ^= t;
+ if (++ctx->wordIndex ==
+ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
+ keccakf(ctx->s);
+ ctx->wordIndex = 0;
+ }
+ }
+
+ while (tail--)
+ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
+}
+
+int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init224(ctx);
+ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init256(ctx);
+ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init384(ctx);
+ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
+{
+ sha3_context *ctx;
+ int i;
+
+ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
+ if (!ctx) {
+ CCP_LOG_ERR("sha3-ctx creation failed");
+ return -ENOMEM;
+ }
+ sha3_Init512(ctx);
+ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
+ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
+ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
+ rte_free(ctx);
+
+ return 0;
+}
+
+static int generate_partial_hash(struct ccp_session *sess)
+{
+
+ uint8_t ipad[sess->auth.block_size];
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
+ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
+ }
+
+ switch (sess->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ count = SHA1_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_224(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_224(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_256(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_256(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_384(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_384(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+
+ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
+ return -1;
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ hash_value_sha3 = sess->auth.pre_compute;
+ if (partial_hash_sha3_512(ipad, hash_value_sha3))
+ return -1;
+
+ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
+ + sess->auth.ctx_len);
+ if (partial_hash_sha3_512(opad, hash_value_sha3))
+ return -1;
+ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+ }
+}
+
+/* prepare temporary keys K1 and K2 */
+static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+{
+ int i;
+ /* Shift block to left, including carry */
+ for (i = 0; i < bl; i++) {
+ k[i] = l[i] << 1;
+ if (i < bl - 1 && l[i + 1] & 0x80)
+ k[i] |= 1;
+ }
+ /* If MSB set fixup with R */
+ if (l[0] & 0x80)
+ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
+}
+
+/* subkeys K1 and K2 generation for CMAC */
+static int
+generate_cmac_subkeys(struct ccp_session *sess)
+{
+ const EVP_CIPHER *algo;
+ EVP_CIPHER_CTX *ctx;
+ unsigned char *ccp_ctx;
+ size_t i;
+ int dstlen, totlen;
+ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
+ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
+ unsigned char k1[AES_BLOCK_SIZE] = {0};
+ unsigned char k2[AES_BLOCK_SIZE] = {0};
+
+ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
+ algo = EVP_aes_128_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
+ algo = EVP_aes_192_cbc();
+ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
+ algo = EVP_aes_256_cbc();
+ else {
+ CCP_LOG_ERR("Invalid CMAC type length");
+ return -1;
+ }
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (!ctx) {
+ CCP_LOG_ERR("ctx creation failed");
+ return -1;
+ }
+ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
+ (unsigned char *)zero_iv) <= 0)
+ goto key_generate_err;
+ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
+ AES_BLOCK_SIZE) <= 0)
+ goto key_generate_err;
+ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
+ goto key_generate_err;
+
+ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
+ prepare_key(k1, dst, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k1[i];
+
+ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
+ (2 * CCP_SB_BYTES) - 1);
+ prepare_key(k2, k1, AES_BLOCK_SIZE);
+ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
+ *ccp_ctx = k2[i];
+
+ EVP_CIPHER_CTX_free(ctx);
+
+ return 0;
+
+key_generate_err:
+ CCP_LOG_ERR("CMAC Init failed");
+ return -1;
+}
+
+/* configure session */
+static int
+ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+ /* set cipher direction */
+ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ else
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+
+ /* set cipher key */
+ sess->cipher.key_length = cipher_xform->key.length;
+ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
+ cipher_xform->key.length);
+
+ /* set iv parameters */
+ sess->iv.offset = cipher_xform->iv.offset;
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_ECB:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
+ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_3DES;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+ }
+
+
+ switch (sess->cipher.engine) {
+ case CCP_ENGINE_AES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length ; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
+ case CCP_ENGINE_3DES:
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
+ else {
+ CCP_LOG_ERR("Invalid cipher key length");
+ return -1;
+ }
+ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
+ for (i = 0; i < 8; i++)
+ sess->cipher.key_ccp[(8 + x) - i - 1] =
+ sess->cipher.key[i + x];
+ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+static int
+ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ size_t i;
+
+ auth_xform = &xform->auth;
+
+ sess->auth.digest_length = auth_xform->digest_length;
+ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_MD5_HMAC:
+ if (sess->auth_opt) {
+ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ MD5_DIGEST_SIZE);
+ sess->auth.key_length = auth_xform->key.length;
+ sess->auth.block_size = MD5_BLOCK_SIZE;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else
+ return -1; /* HMAC MD5 not supported on CCP */
+ break;
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx = (void *)ccp_sha1_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ sess->auth.block_size = SHA1_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx = (void *)ccp_sha224_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
+ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx = (void *)ccp_sha256_init;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
+ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx = (void *)ccp_sha384_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA384_DIGEST_SIZE);
+ sess->auth.block_size = SHA384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
+ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx = (void *)ccp_sha512_init;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
+ if (sess->auth_opt) {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ } else {
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = ((CCP_SB_BYTES << 1) -
+ SHA512_DIGEST_SIZE);
+ sess->auth.block_size = SHA512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0,
+ sess->auth.ctx_len << 1);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ }
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
+ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
+ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
+ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
+ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
+ sess->auth.key_length = auth_xform->key.length;
+ memset(sess->auth.key, 0, sess->auth.block_size);
+ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ if (sess->auth.key_length == 16)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->auth.key_length == 24)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->auth.key_length == 32)
+ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid CMAC key length");
+ return -1;
+ }
+ rte_memcpy(sess->auth.key, auth_xform->key.data,
+ sess->auth.key_length);
+ for (i = 0; i < sess->auth.key_length; i++)
+ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
+ sess->auth.key[i];
+ if (generate_cmac_subkeys(sess))
+ return -1;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ size_t i;
+
+ aead_xform = &xform->aead;
+
+ sess->cipher.key_length = aead_xform->key.length;
+ rte_memcpy(sess->cipher.key, aead_xform->key.data,
+ aead_xform->key.length);
+
+ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
+ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
+ sess->auth.op = CCP_AUTH_OP_GENERATE;
+ } else {
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
+ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+ /* set iv parameters */
+ sess->iv.offset = aead_xform->iv.offset;
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
+ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ if (sess->cipher.key_length == 16)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
+ else if (sess->cipher.key_length == 24)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
+ else if (sess->cipher.key_length == 32)
+ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
+ else {
+ CCP_LOG_ERR("Invalid aead key length");
+ return -1;
+ }
+ for (i = 0; i < sess->cipher.key_length; i++)
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
+ sess->auth.ctx_len = CCP_SB_BYTES;
+ sess->auth.offset = 0;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+ sess->cmd_id = CCP_CMD_COMBINED;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
+ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
+ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+}
+
+int
+ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals)
+{
+ const struct rte_crypto_sym_xform *cipher_xform = NULL;
+ const struct rte_crypto_sym_xform *auth_xform = NULL;
+ const struct rte_crypto_sym_xform *aead_xform = NULL;
+ int ret = 0;
+
+ sess->auth_opt = internals->auth_opt;
+ sess->cmd_id = ccp_get_cmd_id(xform);
+
+ switch (sess->cmd_id) {
+ case CCP_CMD_CIPHER:
+ cipher_xform = xform;
+ break;
+ case CCP_CMD_AUTH:
+ auth_xform = xform;
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ cipher_xform = xform;
+ auth_xform = xform->next;
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ auth_xform = xform;
+ cipher_xform = xform->next;
+ break;
+ case CCP_CMD_COMBINED:
+ aead_xform = xform;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ return -1;
+ }
+
+ /* Default IV length = 0 */
+ sess->iv.length = 0;
+ if (cipher_xform) {
+ ret = ccp_configure_session_cipher(sess, cipher_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
+ return ret;
+ }
+ }
+ if (auth_xform) {
+ ret = ccp_configure_session_auth(sess, auth_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported auth parameters");
+ return ret;
+ }
+ }
+ if (aead_xform) {
+ ret = ccp_configure_session_aead(sess, aead_xform);
+ if (ret != 0) {
+ CCP_LOG_ERR("Invalid/unsupported aead parameters");
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/* calculate CCP descriptors requirement */
+static inline int
+ccp_cipher_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ count = 1;
+ /**<only op*/
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ }
+ return count;
+}
+
+static inline int
+ccp_auth_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0)
+ count = 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+ * as init values);
+ * 3. Retrieve IHash 2 slots for 384/512
+ * 4. Load Phash2 = H(k ^ opad); to LSB
+ * 5. generate FHash = H(hash on Ihash with Phash2
+ * as init value);
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ if (session->auth_opt == 0)
+ count = 7;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ count = 1;
+ /**< only op ctx and dst in host memory*/
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ count = 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ count = 4;
+ /**
+ * 1. Op to Perform Ihash
+ * 2. Retrieve result from LSB to host memory
+ * 3. Perform final hash
+ */
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+ * op
+ * extra descriptor in padding case
+ * (k1/k2(255:128) with iv(127:0))
+ * Retrieve result
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ }
+
+ return count;
+}
+
+static int
+ccp_aead_slot(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->aead_algo) {
+ case RTE_CRYPTO_AEAD_AES_GCM:
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ count = 5;
+ /**
+ * 1. Passthru iv
+ * 2. Hash AAD
+ * 3. GCTR
+ * 4. Reload passthru
+ * 5. Hash Final tag
+ */
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
+ session->auth.algo);
+ }
+ return count;
+}
+
+int
+ccp_compute_slot_count(struct ccp_session *session)
+{
+ int count = 0;
+
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ count = ccp_cipher_slot(session);
+ break;
+ case CCP_CMD_AUTH:
+ count = ccp_auth_slot(session);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ case CCP_CMD_HASH_CIPHER:
+ count = ccp_cipher_slot(session);
+ count += ccp_auth_slot(session);
+ break;
+ case CCP_CMD_COMBINED:
+ count = ccp_aead_slot(session);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+
+ }
+
+ return count;
+}
+
+static uint8_t
+algo_select(int sessalgo,
+ const EVP_MD **algo)
+{
+ int res = 0;
+
+ switch (sessalgo) {
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ *algo = EVP_md5();
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ *algo = EVP_sha1();
+ break;
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ *algo = EVP_sha224();
+ break;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ *algo = EVP_sha256();
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ *algo = EVP_sha384();
+ break;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ *algo = EVP_sha512();
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
+ return res;
+}
+
+static int
+process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
+ __rte_unused uint8_t *iv,
+ EVP_PKEY *pkey,
+ int srclen,
+ EVP_MD_CTX *ctx,
+ const EVP_MD *algo,
+ uint16_t d_len)
+{
+ size_t dstlen;
+ unsigned char temp_dst[64];
+
+ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
+ goto process_auth_err;
+
+ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
+ goto process_auth_err;
+
+ memcpy(dst, temp_dst, d_len);
+ return 0;
+process_auth_err:
+ CCP_LOG_ERR("Process cpu auth failed");
+ return -EINVAL;
+}
+
+static int cpu_crypto_auth(struct ccp_qp *qp,
+ struct rte_crypto_op *op,
+ struct ccp_session *sess,
+ EVP_MD_CTX *ctx)
+{
+ uint8_t *src, *dst;
+ int srclen, status;
+ struct rte_mbuf *mbuf_src, *mbuf_dst;
+ const EVP_MD *algo = NULL;
+ EVP_PKEY *pkey;
+
+ algo_select(sess->auth.algo, &algo);
+ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
+ sess->auth.key_length);
+ mbuf_src = op->sym->m_src;
+ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
+ srclen = op->sym->auth.data.length;
+ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
+ op->sym->auth.data.offset);
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ dst = qp->temp_digest;
+ } else {
+ dst = op->sym->auth.digest.data;
+ if (dst == NULL) {
+ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
+ op->sym->auth.data.offset +
+ sess->auth.digest_length);
+ }
+ }
+ status = process_cpu_auth_hmac(src, dst, NULL,
+ pkey, srclen,
+ ctx,
+ algo,
+ sess->auth.digest_length);
+ if (status) {
+ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ return status;
+ }
+
+ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(dst, op->sym->auth.digest.data,
+ sess->auth.digest_length) != 0) {
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ } else {
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ }
+ EVP_PKEY_free(pkey);
+ return 0;
+}
+
+static void
+ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_desc *desc;
+ union ccp_function function;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 0;
+ CCP_CMD_EOM(desc) = 0;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
+ CCP_PT_BITWISE(&function) = pst->bit_mod;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = pst->len;
+
+ if (pst->dir) {
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
+ } else {
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
+ CCP_CMD_SRC_HI(desc) = 0;
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ }
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+}
+
+static int
+ccp_perform_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, dest_addr_t;
+ struct ccp_passthru pst;
+ uint64_t auth_msg_bits;
+ void *append_ptr;
+ uint8_t *addr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ addr = session->auth.pre_compute;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+ dest_addr_t = dest_addr;
+
+ /** Load PHash1 to LSB*/
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for IntermediateHash*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = (op->sym->auth.data.length +
+ session->auth.block_size) * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ }
+
+ /** Load PHash2 to LSB*/
+ addr += session->auth.ctx_len;
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**sha engine command descriptor for FinalHash*/
+ dest_addr_t += session->auth.offset;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
+ session->auth.offset);
+ auth_msg_bits = (session->auth.block_size +
+ session->auth.ctx_len -
+ session->auth.offset) * 8;
+
+ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Retrieve hmac output */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr;
+ struct ccp_passthru pst;
+ void *append_ptr;
+ uint64_t auth_msg_bits;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+
+ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
+
+ /** Passthru sha context*/
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
+ session->auth.ctx);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.len = session->auth.ctx_len;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /**prepare sha command descriptor*/
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ auth_msg_bits = op->sym->auth.data.length * 8;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
+ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
+ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Hash value retrieve */
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr;
+ pst.len = session->auth.ctx_len;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ else
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+
+}
+
+static int
+ccp_perform_sha3_hmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ struct ccp_passthru pst;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
+ *)session->auth.pre_compute);
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /*desc1 for SHA3-Ihash operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
+ CCP_CMD_DST_HI(desc) = 0;
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* Intermediate Hash value retrieve */
+ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
+ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
+
+ pst.src_addr =
+ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ } else {
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
+ pst.dest_addr = dest_addr_t;
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ /**sha engine command descriptor for FinalHash*/
+ ctx_paddr += CCP_SHA3_CTX_SIZE;
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
+ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
+ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
+ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
+ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
+ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
+ } else {
+ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
+ }
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_sha3(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_desc *desc;
+ uint8_t *ctx_addr, *append_ptr;
+ uint32_t tail;
+ phys_addr_t src_addr, dest_addr, ctx_paddr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ if (!append_ptr) {
+ CCP_LOG_ERR("CCP MBUF append failed\n");
+ return -1;
+ }
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+ ctx_addr = session->auth.sha3_ctx;
+ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for SHA3 operation */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ function.raw = 0;
+ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
+ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *src_tb, *append_ptr, *ctx_addr;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->auth.data.offset);
+ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
+ session->auth.ctx_len);
+ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
+
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
+ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
+
+ if (op->sym->auth.data.length % session->auth.block_size == 0) {
+
+ ctx_addr = session->auth.pre_compute;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for aes-cmac command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ } else {
+ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
+ memset(ctx_addr, 0, AES_BLOCK_SIZE);
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
+ length *= AES_BLOCK_SIZE;
+ non_align_len = op->sym->auth.data.length - length;
+ /* prepare desc for aes-cmac command */
+ /*Command 1*/
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ /*Command 2*/
+ append_ptr = append_ptr + CCP_SB_BYTES;
+ memset(append_ptr, 0, AES_BLOCK_SIZE);
+ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
+ uint8_t *,
+ op->sym->auth.data.offset +
+ length);
+ rte_memcpy(append_ptr, src_tb, non_align_len);
+ append_ptr[non_align_len] = CMAC_PAD_VALUE;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
+ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+ tail =
+ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+ }
+ /* Retrieve result */
+ pst.dest_addr = dest_addr;
+ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 0;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *lsb_buf;
+ struct ccp_passthru pst = {0};
+ struct ccp_desc *desc;
+ phys_addr_t src_addr, dest_addr, key_addr;
+ uint8_t *iv;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ function.raw = 0;
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
+ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
+ iv, session->iv.length);
+ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
+ CCP_AES_SIZE(&function) = 0x1F;
+ } else {
+ lsb_buf =
+ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ rte_memcpy(lsb_buf +
+ (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+ pst.src_addr = b_info->lsb_buf_phys +
+ (b_info->lsb_buf_idx * CCP_SB_BYTES);
+ b_info->lsb_buf_idx++;
+ }
+
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ }
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (likely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+ key_addr = session->cipher.key_phys;
+
+ /* prepare desc for aes command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_3des(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ unsigned char *lsb_buf;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint8_t *iv;
+ phys_addr_t src_addr, dest_addr, key_addr;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ switch (session->cipher.um.des_mode) {
+ case CCP_DES_MODE_CBC:
+ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
+ b_info->lsb_buf_idx++;
+
+ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
+ iv, session->iv.length);
+
+ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ccp_perform_passthru(&pst, cmd_q);
+ break;
+ case CCP_DES_MODE_CFB:
+ case CCP_DES_MODE_ECB:
+ CCP_LOG_ERR("Unsupported DES cipher mode");
+ return -ENOTSUP;
+ }
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->cipher.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr =
+ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->cipher.data.offset);
+ else
+ dest_addr = src_addr;
+
+ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+
+ memset(desc, 0, Q_DESC_SIZE);
+
+ /* prepare desc for des command */
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
+
+ CCP_CMD_SOC(desc) = 0;
+ CCP_CMD_IOC(desc) = 0;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_PROT(desc) = 0;
+
+ function.raw = 0;
+ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
+ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ if (session->cipher.um.des_mode)
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+
+ rte_wmb();
+
+ /* Write the new tail address back to the queue register */
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static int
+ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
+{
+ struct ccp_session *session;
+ union ccp_function function;
+ uint8_t *iv;
+ struct ccp_passthru pst;
+ struct ccp_desc *desc;
+ uint32_t tail;
+ uint64_t *temp;
+ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
+ phys_addr_t digest_dest_addr;
+ int length, non_align_len;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
+ key_addr = session->cipher.key_phys;
+
+ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
+ op->sym->aead.data.offset);
+ if (unlikely(op->sym->m_dst != NULL))
+ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
+ op->sym->aead.data.offset);
+ else
+ dest_addr = src_addr;
+ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
+ digest_dest_addr = op->sym->aead.digest.phys_addr;
+ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
+ *temp++ = rte_bswap64(session->auth.aad_length << 3);
+ *temp = rte_bswap64(op->sym->aead.data.length << 3);
+
+ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
+ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
+
+ aad_addr = op->sym->aead.aad.phys_addr;
+
+ /* CMD1 IV Passthru */
+ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
+ session->iv.length);
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = CCP_SB_BYTES;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD2 GHASH-AAD */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_INIT(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = session->auth.aad_length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD3 : GCTR Plain text */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+ if (non_align_len == 0)
+ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
+ else
+ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
+
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_EOM(desc) = 1;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+
+ CCP_CMD_LEN(desc) = length;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
+ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ /* CMD4 : PT to copy IV */
+ pst.src_addr = session->cipher.nonce_phys;
+ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
+ pst.len = AES_BLOCK_SIZE;
+ pst.dir = 1;
+ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
+ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
+ ccp_perform_passthru(&pst, cmd_q);
+
+ /* CMD5 : GHASH-Final */
+ function.raw = 0;
+ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
+ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
+ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
+
+ desc = &cmd_q->qbase_desc[cmd_q->qidx];
+ memset(desc, 0, Q_DESC_SIZE);
+
+ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
+ CCP_CMD_FUNCTION(desc) = function.raw;
+ /* Last block (AAD_len || PT_len)*/
+ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
+
+ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
+ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
+ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
+ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
+ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
+
+ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
+
+ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
+ rte_wmb();
+
+ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ return 0;
+}
+
+static inline int
+ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
+ case CCP_CIPHER_ALGO_AES_CBC:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_CTR:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ case CCP_CIPHER_ALGO_AES_ECB:
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
+ case CCP_CIPHER_ALGO_3DES_CBC:
+ result = ccp_perform_3des(op, cmd_q, b_info);
+ b_info->desccnt += 2;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+static inline int
+ccp_crypto_auth(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
+ case CCP_AUTH_ALGO_SHA224:
+ case CCP_AUTH_ALGO_SHA256:
+ case CCP_AUTH_ALGO_SHA384:
+ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_MD5_HMAC:
+ if (session->auth_opt == 0)
+ result = -1;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ if (session->auth_opt == 0) {
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ }
+ break;
+ case CCP_AUTH_ALGO_SHA3_224:
+ case CCP_AUTH_ALGO_SHA3_256:
+ case CCP_AUTH_ALGO_SHA3_384:
+ case CCP_AUTH_ALGO_SHA3_512:
+ result = ccp_perform_sha3(op, cmd_q);
+ b_info->desccnt += 1;
+ break;
+ case CCP_AUTH_ALGO_SHA3_224_HMAC:
+ case CCP_AUTH_ALGO_SHA3_256_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA3_384_HMAC:
+ case CCP_AUTH_ALGO_SHA3_512_HMAC:
+ result = ccp_perform_sha3_hmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+ return -ENOTSUP;
+ }
+
+ return result;
+}
+
+static inline int
+ccp_crypto_aead(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+{
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_AES_GCM:
+ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
+ CCP_LOG_ERR("Incorrect chain order");
+ return -1;
+ }
+ result = ccp_perform_aes_gcm(op, cmd_q);
+ b_info->desccnt += 5;
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ return -ENOTSUP;
+ }
+ return result;
+}
+
+int
+process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req)
+{
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ b_info->auth_ctr = 0;
+
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+ b_info->desccnt = 0;
+ b_info->cmd_q = cmd_q;
+ b_info->lsb_buf_phys =
+ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
+ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
+
+ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+ for (i = 0; i < nb_ops; i++) {
+ session = (struct ccp_session *)get_session_private_data(
+ op[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt) {
+ b_info->auth_ctr++;
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ if (result)
+ break;
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt) {
+ result = cpu_crypto_auth(qp, op[i],
+ session, auth_ctx);
+ if (op[i]->status !=
+ RTE_CRYPTO_OP_STATUS_SUCCESS)
+ continue;
+ } else
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
+
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+ break;
+ case CCP_CMD_COMBINED:
+ result = ccp_crypto_aead(op[i], cmd_q, b_info);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ result = -1;
+ }
+ if (unlikely(result < 0)) {
+ rte_atomic64_add(&b_info->cmd_q->free_slots,
+ (slots_req - b_info->desccnt));
+ break;
+ }
+ b_info->op[i] = op[i];
+ }
+
+ b_info->opcnt = i;
+ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
+ Q_DESC_SIZE);
+
+ rte_wmb();
+ /* Write the new tail address back to the queue register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ b_info->tail_offset);
+ /* Turn the queue back on using our cached control register */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol | CMD_Q_RUN);
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ return i;
+}
+
+static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+{
+ struct ccp_session *session;
+ uint8_t *digest_data, *addr;
+ struct rte_mbuf *m_last;
+ int offset, digest_offset;
+ uint8_t digest_le[64];
+
+ session = (struct ccp_session *)get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+ if (session->cmd_id == CCP_CMD_COMBINED) {
+ digest_data = op->sym->aead.digest.data;
+ digest_offset = op->sym->aead.data.offset +
+ op->sym->aead.data.length;
+ } else {
+ digest_data = op->sym->auth.digest.data;
+ digest_offset = op->sym->auth.data.offset +
+ op->sym->auth.data.length;
+ }
+ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
+ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
+ m_last->data_len - session->auth.ctx_len);
+
+ rte_mb();
+ offset = session->auth.offset;
+
+ if (session->auth.engine == CCP_ENGINE_SHA)
+ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
+ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
+ /* All other algorithms require byte
+ * swap done by host
+ */
+ unsigned int i;
+
+ offset = session->auth.ctx_len -
+ session->auth.offset - 1;
+ for (i = 0; i < session->auth.digest_length; i++)
+ digest_le[i] = addr[offset - i];
+ offset = 0;
+ addr = digest_le;
+ }
+
+ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
+ if (memcmp(addr + offset, digest_data,
+ session->auth.digest_length) != 0)
+ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
+
+ } else {
+ if (unlikely(digest_data == 0))
+ digest_data = rte_pktmbuf_mtod_offset(
+ op->sym->m_dst, uint8_t *,
+ digest_offset);
+ rte_memcpy(digest_data, addr + offset,
+ session->auth.digest_length);
+ }
+ /* Trim area used for digest from mbuf. */
+ rte_pktmbuf_trim(op->sym->m_src,
+ session->auth.ctx_len);
+}
+
+static int
+ccp_prepare_ops(struct ccp_qp *qp,
+ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+{
+ int i, min_ops;
+ struct ccp_session *session;
+
+ EVP_MD_CTX *auth_ctx = NULL;
+
+ auth_ctx = EVP_MD_CTX_create();
+ if (unlikely(!auth_ctx)) {
+ CCP_LOG_ERR("Unable to create auth ctx");
+ return 0;
+ }
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+ op_d[i] = b_info->op[b_info->op_idx++];
+ session = (struct ccp_session *)get_session_private_data(
+ op_d[i]->sym->session,
+ ccp_cryptodev_driver_id);
+ switch (session->cmd_id) {
+ case CCP_CMD_CIPHER:
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
+ if (session->auth_opt == 0)
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ if (session->auth_opt)
+ cpu_crypto_auth(qp, op_d[i],
+ session, auth_ctx);
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ if (session->auth_opt)
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ else
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+ default:
+ CCP_LOG_ERR("Unsupported cmd_id");
+ }
+ }
+
+ EVP_MD_CTX_destroy(auth_ctx);
+ b_info->opcnt -= min_ops;
+ return min_ops;
+}
+
+int
+process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops)
+{
+ struct ccp_batch_info *b_info;
+ uint32_t cur_head_offset;
+
+ if (qp->b_info != NULL) {
+ b_info = qp->b_info;
+ if (unlikely(b_info->op_idx > 0))
+ goto success;
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
+
+ if (b_info->auth_ctr == b_info->opcnt)
+ goto success;
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+ if (b_info->head_offset < b_info->tail_offset) {
+ if ((cur_head_offset >= b_info->head_offset) &&
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ } else {
+ if ((cur_head_offset >= b_info->head_offset) ||
+ (cur_head_offset < b_info->tail_offset)) {
+ qp->b_info = b_info;
+ return 0;
+ }
+ }
+
+
+success:
+ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+ qp->b_info = b_info;
+ } else {
+ rte_mempool_put(qp->batch_mp, (void *)b_info);
+ qp->b_info = NULL;
+ }
+
+ return nb_ops;
+}
diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
new file mode 100644
index 00000000..882b398a
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_crypto.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_CRYPTO_H_
+#define _CCP_CRYPTO_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+#include "ccp_dev.h"
+
+#define AES_BLOCK_SIZE 16
+#define CMAC_PAD_VALUE 0x80
+#define CTR_NONCE_SIZE 4
+#define CTR_IV_SIZE 8
+#define CCP_SHA3_CTX_SIZE 200
+
+/**Macro helpers for CCP command creation*/
+#define CCP_AES_SIZE(p) ((p)->aes.size)
+#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+#define CCP_AES_MODE(p) ((p)->aes.mode)
+#define CCP_AES_TYPE(p) ((p)->aes.type)
+#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+#define CCP_DES_MODE(p) ((p)->des.mode)
+#define CCP_DES_TYPE(p) ((p)->des.type)
+#define CCP_SHA_TYPE(p) ((p)->sha.type)
+#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+/* HMAC */
+#define HMAC_IPAD_VALUE 0x36
+#define HMAC_OPAD_VALUE 0x5c
+
+/* MD5 */
+#define MD5_DIGEST_SIZE 16
+#define MD5_BLOCK_SIZE 64
+
+/* SHA */
+#define SHA_COMMON_DIGEST_SIZE 32
+#define SHA1_DIGEST_SIZE 20
+#define SHA1_BLOCK_SIZE 64
+
+#define SHA224_DIGEST_SIZE 28
+#define SHA224_BLOCK_SIZE 64
+#define SHA3_224_BLOCK_SIZE 144
+
+#define SHA256_DIGEST_SIZE 32
+#define SHA256_BLOCK_SIZE 64
+#define SHA3_256_BLOCK_SIZE 136
+
+#define SHA384_DIGEST_SIZE 48
+#define SHA384_BLOCK_SIZE 128
+#define SHA3_384_BLOCK_SIZE 104
+
+#define SHA512_DIGEST_SIZE 64
+#define SHA512_BLOCK_SIZE 128
+#define SHA3_512_BLOCK_SIZE 72
+
+/* Maximum length for digest */
+#define DIGEST_LENGTH_MAX 64
+
+/* SHA LSB intialiazation values */
+
+#define SHA1_H0 0x67452301UL
+#define SHA1_H1 0xefcdab89UL
+#define SHA1_H2 0x98badcfeUL
+#define SHA1_H3 0x10325476UL
+#define SHA1_H4 0xc3d2e1f0UL
+
+#define SHA224_H0 0xc1059ed8UL
+#define SHA224_H1 0x367cd507UL
+#define SHA224_H2 0x3070dd17UL
+#define SHA224_H3 0xf70e5939UL
+#define SHA224_H4 0xffc00b31UL
+#define SHA224_H5 0x68581511UL
+#define SHA224_H6 0x64f98fa7UL
+#define SHA224_H7 0xbefa4fa4UL
+
+#define SHA256_H0 0x6a09e667UL
+#define SHA256_H1 0xbb67ae85UL
+#define SHA256_H2 0x3c6ef372UL
+#define SHA256_H3 0xa54ff53aUL
+#define SHA256_H4 0x510e527fUL
+#define SHA256_H5 0x9b05688cUL
+#define SHA256_H6 0x1f83d9abUL
+#define SHA256_H7 0x5be0cd19UL
+
+#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
+#define SHA384_H1 0x629a292a367cd507ULL
+#define SHA384_H2 0x9159015a3070dd17ULL
+#define SHA384_H3 0x152fecd8f70e5939ULL
+#define SHA384_H4 0x67332667ffc00b31ULL
+#define SHA384_H5 0x8eb44a8768581511ULL
+#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
+#define SHA384_H7 0x47b5481dbefa4fa4ULL
+
+#define SHA512_H0 0x6a09e667f3bcc908ULL
+#define SHA512_H1 0xbb67ae8584caa73bULL
+#define SHA512_H2 0x3c6ef372fe94f82bULL
+#define SHA512_H3 0xa54ff53a5f1d36f1ULL
+#define SHA512_H4 0x510e527fade682d1ULL
+#define SHA512_H5 0x9b05688c2b3e6c1fULL
+#define SHA512_H6 0x1f83d9abfb41bd6bULL
+#define SHA512_H7 0x5be0cd19137e2179ULL
+
+/**
+ * CCP supported AES modes
+ */
+enum ccp_aes_mode {
+ CCP_AES_MODE_ECB = 0,
+ CCP_AES_MODE_CBC,
+ CCP_AES_MODE_OFB,
+ CCP_AES_MODE_CFB,
+ CCP_AES_MODE_CTR,
+ CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE__LAST,
+};
+
+/**
+ * CCP AES GHASH mode
+ */
+enum ccp_aes_ghash_mode {
+ CCP_AES_MODE_GHASH_AAD = 0,
+ CCP_AES_MODE_GHASH_FINAL
+};
+
+/**
+ * CCP supported AES types
+ */
+enum ccp_aes_type {
+ CCP_AES_TYPE_128 = 0,
+ CCP_AES_TYPE_192,
+ CCP_AES_TYPE_256,
+ CCP_AES_TYPE__LAST,
+};
+
+/***** 3DES engine *****/
+
+/**
+ * CCP supported DES/3DES modes
+ */
+enum ccp_des_mode {
+ CCP_DES_MODE_ECB = 0, /* Not supported */
+ CCP_DES_MODE_CBC,
+ CCP_DES_MODE_CFB,
+};
+
+/**
+ * CCP supported DES types
+ */
+enum ccp_des_type {
+ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
+ CCP_DES_TYPE_192, /* 168 + 24 parity */
+ CCP_DES_TYPE__LAST,
+};
+
+/***** SHA engine *****/
+
+/**
+ * ccp_sha_type - type of SHA operation
+ *
+ * @CCP_SHA_TYPE_1: SHA-1 operation
+ * @CCP_SHA_TYPE_224: SHA-224 operation
+ * @CCP_SHA_TYPE_256: SHA-256 operation
+ */
+enum ccp_sha_type {
+ CCP_SHA_TYPE_1 = 1,
+ CCP_SHA_TYPE_224,
+ CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
+ CCP_SHA_TYPE_RSVD1,
+ CCP_SHA_TYPE_RSVD2,
+ CCP_SHA3_TYPE_224,
+ CCP_SHA3_TYPE_256,
+ CCP_SHA3_TYPE_384,
+ CCP_SHA3_TYPE_512,
+ CCP_SHA_TYPE__LAST,
+};
+
+/**
+ * CCP supported cipher algorithms
+ */
+enum ccp_cipher_algo {
+ CCP_CIPHER_ALGO_AES_CBC = 0,
+ CCP_CIPHER_ALGO_AES_ECB,
+ CCP_CIPHER_ALGO_AES_CTR,
+ CCP_CIPHER_ALGO_AES_GCM,
+ CCP_CIPHER_ALGO_3DES_CBC,
+};
+
+/**
+ * CCP cipher operation type
+ */
+enum ccp_cipher_dir {
+ CCP_CIPHER_DIR_DECRYPT = 0,
+ CCP_CIPHER_DIR_ENCRYPT = 1,
+};
+
+/**
+ * CCP supported hash algorithms
+ */
+enum ccp_hash_algo {
+ CCP_AUTH_ALGO_SHA1 = 0,
+ CCP_AUTH_ALGO_SHA1_HMAC,
+ CCP_AUTH_ALGO_SHA224,
+ CCP_AUTH_ALGO_SHA224_HMAC,
+ CCP_AUTH_ALGO_SHA3_224,
+ CCP_AUTH_ALGO_SHA3_224_HMAC,
+ CCP_AUTH_ALGO_SHA256,
+ CCP_AUTH_ALGO_SHA256_HMAC,
+ CCP_AUTH_ALGO_SHA3_256,
+ CCP_AUTH_ALGO_SHA3_256_HMAC,
+ CCP_AUTH_ALGO_SHA384,
+ CCP_AUTH_ALGO_SHA384_HMAC,
+ CCP_AUTH_ALGO_SHA3_384,
+ CCP_AUTH_ALGO_SHA3_384_HMAC,
+ CCP_AUTH_ALGO_SHA512,
+ CCP_AUTH_ALGO_SHA512_HMAC,
+ CCP_AUTH_ALGO_SHA3_512,
+ CCP_AUTH_ALGO_SHA3_512_HMAC,
+ CCP_AUTH_ALGO_AES_CMAC,
+ CCP_AUTH_ALGO_AES_GCM,
+ CCP_AUTH_ALGO_MD5_HMAC,
+};
+
+/**
+ * CCP hash operation type
+ */
+enum ccp_hash_op {
+ CCP_AUTH_OP_GENERATE = 0,
+ CCP_AUTH_OP_VERIFY = 1,
+};
+
+/* CCP crypto private session structure */
+struct ccp_session {
+ bool auth_opt;
+ enum ccp_cmd_order cmd_id;
+ /**< chain order mode */
+ struct {
+ uint16_t length;
+ uint16_t offset;
+ } iv;
+ /**< IV parameters */
+ struct {
+ enum ccp_cipher_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ enum ccp_des_mode des_mode;
+ } um;
+ union {
+ enum ccp_aes_type aes_type;
+ enum ccp_des_type des_type;
+ } ut;
+ enum ccp_cipher_dir dir;
+ uint64_t key_length;
+ /**< max cipher key size 256 bits */
+ uint8_t key[32];
+ /**ccp key format*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ /**AES-ctr nonce(4) iv(8) ctr*/
+ uint8_t nonce[32];
+ phys_addr_t nonce_phys;
+ } cipher;
+ /**< Cipher Parameters */
+
+ struct {
+ enum ccp_hash_algo algo;
+ enum ccp_engine engine;
+ union {
+ enum ccp_aes_mode aes_mode;
+ } um;
+ union {
+ enum ccp_sha_type sha_type;
+ enum ccp_aes_type aes_type;
+ } ut;
+ enum ccp_hash_op op;
+ uint64_t key_length;
+ /**< max hash key size 144 bytes (struct capabilties) */
+ uint8_t key[144];
+ /**< max be key size of AES is 32*/
+ uint8_t key_ccp[32];
+ phys_addr_t key_phys;
+ uint64_t digest_length;
+ void *ctx;
+ int ctx_len;
+ int offset;
+ int block_size;
+ /**< Buffer to store Software generated precomute values*/
+ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
+ /**< For CMAC K1 IV and K2 IV*/
+ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
+ /**< SHA3 initial ctx all zeros*/
+ uint8_t sha3_ctx[200];
+ int aad_length;
+ } auth;
+ /**< Authentication Parameters */
+ enum rte_crypto_aead_algorithm aead_algo;
+ /**< AEAD Algorithm */
+
+ uint32_t reserved;
+} __rte_cache_aligned;
+
+extern uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_qp;
+struct ccp_private;
+
+/**
+ * Set and validate CCP crypto session parameters
+ *
+ * @param sess ccp private session
+ * @param xform crypto xform for this session
+ * @return 0 on success otherwise -1
+ */
+int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform,
+ struct ccp_private *internals);
+
+/**
+ * Find count of slots
+ *
+ * @param session CCP private session
+ * @return count of free slots available
+ */
+int ccp_compute_slot_count(struct ccp_session *session);
+
+/**
+ * process crypto ops to be enqueued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param cmd_q CCP cmd queue
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+ int slots_req);
+
+/**
+ * process crypto ops to be dequeued
+ *
+ * @param qp CCP crypto queue-pair
+ * @param op crypto ops table
+ * @param nb_ops requested no. of ops
+ * @return 0 on success otherwise -1
+ */
+int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
+
+/**
+ * Apis for SHA3 partial hash generation
+ * @param data_in buffer pointer on which phash is applied
+ * @param data_out phash result in ccp be format is written
+ */
+int partial_hash_sha3_224(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_256(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_384(uint8_t *data_in,
+ uint8_t *data_out);
+
+int partial_hash_sha3_512(uint8_t *data_in,
+ uint8_t *data_out);
+
+#endif /* _CCP_CRYPTO_H_ */
diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
new file mode 100644
index 00000000..80fe6a45
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.c
@@ -0,0 +1,810 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/queue.h>
+#include <sys/types.h>
+#include <sys/file.h>
+#include <unistd.h>
+
+#include <rte_hexdump.h>
+#include <rte_memzone.h>
+#include <rte_malloc.h>
+#include <rte_memory.h>
+#include <rte_spinlock.h>
+#include <rte_string_fns.h>
+
+#include "ccp_dev.h"
+#include "ccp_pci.h"
+#include "ccp_pmd_private.h"
+
+struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+static int ccp_dev_id;
+
+int
+ccp_dev_start(struct rte_cryptodev *dev)
+{
+ struct ccp_private *priv = dev->data->dev_private;
+
+ priv->last_dev = TAILQ_FIRST(&ccp_list);
+ return 0;
+}
+
+struct ccp_queue *
+ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+{
+ int i, ret = 0;
+ struct ccp_device *dev;
+ struct ccp_private *priv = cdev->data->dev_private;
+
+ dev = TAILQ_NEXT(priv->last_dev, next);
+ if (unlikely(dev == NULL))
+ dev = TAILQ_FIRST(&ccp_list);
+ priv->last_dev = dev;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->qidx++;
+ if (dev->qidx >= dev->cmd_q_count)
+ dev->qidx = 0;
+ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
+ if (ret >= slot_req)
+ return &dev->cmd_q[dev->qidx];
+ }
+ return NULL;
+}
+
+int
+ccp_read_hwrng(uint32_t *value)
+{
+ struct ccp_device *dev;
+
+ TAILQ_FOREACH(dev, &ccp_list, next) {
+ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
+ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
+ if (*value) {
+ dev->hwrng_retries = 0;
+ return 0;
+ }
+ }
+ dev->hwrng_retries = 0;
+ }
+ return -1;
+}
+
+static const struct rte_memzone *
+ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+ int socket_id)
+{
+ const struct rte_memzone *mz;
+
+ mz = rte_memzone_lookup(queue_name);
+ if (mz != 0) {
+ if (((size_t)queue_size <= mz->len) &&
+ ((socket_id == SOCKET_ID_ANY) ||
+ (socket_id == mz->socket_id))) {
+ CCP_LOG_INFO("re-use memzone already "
+ "allocated for %s", queue_name);
+ return mz;
+ }
+ CCP_LOG_ERR("Incompatible memzone already "
+ "allocated %s, size %u, socket %d. "
+ "Requested size %u, socket %u",
+ queue_name, (uint32_t)mz->len,
+ mz->socket_id, queue_size, socket_id);
+ return NULL;
+ }
+
+ CCP_LOG_INFO("Allocate memzone for %s, size %u on socket %u",
+ queue_name, queue_size, socket_id);
+
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
+}
+
+/* bitmap support apis */
+static inline void
+ccp_set_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
+}
+
+static inline void
+ccp_clear_bit(unsigned long *bitmap, int n)
+{
+ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
+}
+
+static inline uint32_t
+ccp_get_bit(unsigned long *bitmap, int n)
+{
+ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
+}
+
+
+static inline uint32_t
+ccp_ffz(unsigned long word)
+{
+ unsigned long first_zero;
+
+ first_zero = __builtin_ffsl(~word);
+ return first_zero ? (first_zero - 1) :
+ BITS_PER_WORD;
+}
+
+static inline uint32_t
+ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
+{
+ uint32_t i;
+ uint32_t nwords = 0;
+
+ nwords = (limit - 1) / BITS_PER_WORD + 1;
+ for (i = 0; i < nwords; i++) {
+ if (addr[i] == 0UL)
+ return i * BITS_PER_WORD;
+ if (addr[i] < ~(0UL))
+ break;
+ }
+ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
+}
+
+static void
+ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_set >= 0) {
+ *p |= mask_to_set;
+ len -= bits_to_set;
+ bits_to_set = BITS_PER_WORD;
+ mask_to_set = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p |= mask_to_set;
+ }
+}
+
+static void
+ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
+{
+ unsigned long *p = map + WORD_OFFSET(start);
+ const unsigned int size = start + len;
+ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
+ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
+
+ while (len - bits_to_clear >= 0) {
+ *p &= ~mask_to_clear;
+ len -= bits_to_clear;
+ bits_to_clear = BITS_PER_WORD;
+ mask_to_clear = ~0UL;
+ p++;
+ }
+ if (len) {
+ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
+ *p &= ~mask_to_clear;
+ }
+}
+
+
+static unsigned long
+_ccp_find_next_bit(const unsigned long *addr,
+ unsigned long nbits,
+ unsigned long start,
+ unsigned long invert)
+{
+ unsigned long tmp;
+
+ if (!nbits || start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+
+ /* Handle 1st word. */
+ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
+ start = ccp_round_down(start, BITS_PER_WORD);
+
+ while (!tmp) {
+ start += BITS_PER_WORD;
+ if (start >= nbits)
+ return nbits;
+
+ tmp = addr[start / BITS_PER_WORD] ^ invert;
+ }
+
+ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
+}
+
+static unsigned long
+ccp_find_next_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, 0UL);
+}
+
+static unsigned long
+ccp_find_next_zero_bit(const unsigned long *addr,
+ unsigned long size,
+ unsigned long offset)
+{
+ return _ccp_find_next_bit(addr, size, offset, ~0UL);
+}
+
+/**
+ * bitmap_find_next_zero_area - find a contiguous aligned zero area
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ */
+static unsigned long
+ccp_bitmap_find_next_zero_area(unsigned long *map,
+ unsigned long size,
+ unsigned long start,
+ unsigned int nr)
+{
+ unsigned long index, end, i;
+
+again:
+ index = ccp_find_next_zero_bit(map, size, start);
+
+ end = index + nr;
+ if (end > size)
+ return end;
+ i = ccp_find_next_bit(map, end, index);
+ if (i < end) {
+ start = i + 1;
+ goto again;
+ }
+ return index;
+}
+
+static uint32_t
+ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
+{
+ struct ccp_device *ccp;
+ int start;
+
+ /* First look at the map for the queue */
+ if (cmd_q->lsb >= 0) {
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
+ LSB_SIZE, 0,
+ count);
+ if (start < LSB_SIZE) {
+ ccp_bitmap_set(cmd_q->lsbmap, start, count);
+ return start + cmd_q->lsb * LSB_SIZE;
+ }
+ }
+
+ /* try to get an entry from the shared blocks */
+ ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+
+ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
+ MAX_LSB_CNT * LSB_SIZE,
+ 0, count);
+ if (start <= MAX_LSB_CNT * LSB_SIZE) {
+ ccp_bitmap_set(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ return start * LSB_ITEM_SIZE;
+ }
+ CCP_LOG_ERR("NO LSBs available");
+
+ rte_spinlock_unlock(&ccp->lsb_lock);
+
+ return 0;
+}
+
+static void __rte_unused
+ccp_lsb_free(struct ccp_queue *cmd_q,
+ unsigned int start,
+ unsigned int count)
+{
+ int lsbno = start / LSB_SIZE;
+
+ if (!start)
+ return;
+
+ if (cmd_q->lsb == lsbno) {
+ /* An entry from the private LSB */
+ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
+ } else {
+ /* From the shared LSBs */
+ struct ccp_device *ccp = cmd_q->dev;
+
+ rte_spinlock_lock(&ccp->lsb_lock);
+ ccp_bitmap_clear(ccp->lsbmap, start, count);
+ rte_spinlock_unlock(&ccp->lsb_lock);
+ }
+}
+
+static int
+ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
+{
+ int q_mask = 1 << cmd_q->id;
+ int weight = 0;
+ int j;
+
+ /* Build a bit mask to know which LSBs
+ * this queue has access to.
+ * Don't bother with segment 0
+ * as it has special
+ * privileges.
+ */
+ cmd_q->lsbmask = 0;
+ status >>= LSB_REGION_WIDTH;
+ for (j = 1; j < MAX_LSB_CNT; j++) {
+ if (status & q_mask)
+ ccp_set_bit(&cmd_q->lsbmask, j);
+
+ status >>= LSB_REGION_WIDTH;
+ }
+
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ weight++;
+
+ printf("Queue %d can access %d LSB regions of mask %lu\n",
+ (int)cmd_q->id, weight, cmd_q->lsbmask);
+
+ return weight ? 0 : -EINVAL;
+}
+
+static int
+ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
+ int lsb_cnt, int n_lsbs,
+ unsigned long *lsb_pub)
+{
+ unsigned long qlsb = 0;
+ int bitno = 0;
+ int qlsb_wgt = 0;
+ int i, j;
+
+ /* For each queue:
+ * If the count of potential LSBs available to a queue matches the
+ * ordinal given to us in lsb_cnt:
+ * Copy the mask of possible LSBs for this queue into "qlsb";
+ * For each bit in qlsb, see if the corresponding bit in the
+ * aggregation mask is set; if so, we have a match.
+ * If we have a match, clear the bit in the aggregation to
+ * mark it as no longer available.
+ * If there is no match, clear the bit in qlsb and keep looking.
+ */
+ for (i = 0; i < ccp->cmd_q_count; i++) {
+ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
+
+ qlsb_wgt = 0;
+ for (j = 0; j < MAX_LSB_CNT; j++)
+ if (ccp_get_bit(&cmd_q->lsbmask, j))
+ qlsb_wgt++;
+
+ if (qlsb_wgt == lsb_cnt) {
+ qlsb = cmd_q->lsbmask;
+
+ bitno = ffs(qlsb) - 1;
+ while (bitno < MAX_LSB_CNT) {
+ if (ccp_get_bit(lsb_pub, bitno)) {
+ /* We found an available LSB
+ * that this queue can access
+ */
+ cmd_q->lsb = bitno;
+ ccp_clear_bit(lsb_pub, bitno);
+ break;
+ }
+ ccp_clear_bit(&qlsb, bitno);
+ bitno = ffs(qlsb) - 1;
+ }
+ if (bitno >= MAX_LSB_CNT)
+ return -EINVAL;
+ n_lsbs--;
+ }
+ }
+ return n_lsbs;
+}
+
+/* For each queue, from the most- to least-constrained:
+ * find an LSB that can be assigned to the queue. If there are N queues that
+ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
+ * dedicated LSB. Remaining LSB regions become a shared resource.
+ * If we have fewer LSBs than queues, all LSB regions become shared
+ * resources.
+ */
+static int
+ccp_assign_lsbs(struct ccp_device *ccp)
+{
+ unsigned long lsb_pub = 0, qlsb = 0;
+ int n_lsbs = 0;
+ int bitno;
+ int i, lsb_cnt;
+ int rc = 0;
+
+ rte_spinlock_init(&ccp->lsb_lock);
+
+ /* Create an aggregate bitmap to get a total count of available LSBs */
+ for (i = 0; i < ccp->cmd_q_count; i++)
+ lsb_pub |= ccp->cmd_q[i].lsbmask;
+
+ for (i = 0; i < MAX_LSB_CNT; i++)
+ if (ccp_get_bit(&lsb_pub, i))
+ n_lsbs++;
+
+ if (n_lsbs >= ccp->cmd_q_count) {
+ /* We have enough LSBS to give every queue a private LSB.
+ * Brute force search to start with the queues that are more
+ * constrained in LSB choice. When an LSB is privately
+ * assigned, it is removed from the public mask.
+ * This is an ugly N squared algorithm with some optimization.
+ */
+ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
+ lsb_cnt++) {
+ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
+ &lsb_pub);
+ if (rc < 0)
+ return -EINVAL;
+ n_lsbs = rc;
+ }
+ }
+
+ rc = 0;
+ /* What's left of the LSBs, according to the public mask, now become
+ * shared. Any zero bits in the lsb_pub mask represent an LSB region
+ * that can't be used as a shared resource, so mark the LSB slots for
+ * them as "in use".
+ */
+ qlsb = lsb_pub;
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ while (bitno < MAX_LSB_CNT) {
+ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
+ ccp_set_bit(&qlsb, bitno);
+ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
+ }
+
+ return rc;
+}
+
+static int
+ccp_add_device(struct ccp_device *dev, int type)
+{
+ int i;
+ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
+ uint64_t status;
+ struct ccp_queue *cmd_q;
+ const struct rte_memzone *q_mz;
+ void *vaddr;
+
+ if (dev == NULL)
+ return -1;
+
+ dev->id = ccp_dev_id++;
+ dev->qidx = 0;
+ vaddr = (void *)(dev->pci.mem_resource[2].addr);
+
+ if (type == CCP_VERSION_5B) {
+ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
+ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
+ for (i = 0; i < 12; i++) {
+ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
+ CCP_READ_REG(vaddr, TRNG_OUT_REG));
+ }
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
+ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
+ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
+
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
+ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
+
+ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
+ }
+ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
+
+ /* Copy the private LSB mask to the public registers */
+ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
+ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
+ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
+ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
+
+ dev->cmd_q_count = 0;
+ /* Find available queues */
+ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+ if (!(qmr & (1 << i)))
+ continue;
+ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
+ cmd_q->dev = dev;
+ cmd_q->id = i;
+ cmd_q->qidx = 0;
+ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
+
+ cmd_q->reg_base = (uint8_t *)vaddr +
+ CMD_Q_STATUS_INCR * (i + 1);
+
+ /* CCP queue memory */
+ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
+ "%s_%d_%s_%d_%s",
+ "ccp_dev",
+ (int)dev->id, "queue",
+ (int)cmd_q->id, "mem");
+ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
+ cmd_q->qsize, SOCKET_ID_ANY);
+ cmd_q->qbase_addr = (void *)q_mz->addr;
+ cmd_q->qbase_desc = (void *)q_mz->addr;
+ cmd_q->qbase_phys_addr = q_mz->phys_addr;
+
+ cmd_q->qcontrol = 0;
+ /* init control reg to zero */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* Disable the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
+ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
+
+ /* Clear the interrupts */
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
+ ALL_INTERRUPTS);
+
+ /* Configure size of each virtual queue accessible to host */
+ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
+ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
+
+ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
+ (uint32_t)dma_addr_lo);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
+ (uint32_t)dma_addr_lo);
+
+ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
+ cmd_q->qcontrol |= (dma_addr_hi << 16);
+ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
+ cmd_q->qcontrol);
+
+ /* create LSB Mask map */
+ if (ccp_find_lsb_regions(cmd_q, status))
+ CCP_LOG_ERR("queue doesn't have lsb regions");
+ cmd_q->lsb = -1;
+
+ rte_atomic64_init(&cmd_q->free_slots);
+ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
+ /* unused slot barrier b/w H&T */
+ }
+
+ if (ccp_assign_lsbs(dev))
+ CCP_LOG_ERR("Unable to assign lsb region");
+
+ /* pre-allocate LSB slots */
+ for (i = 0; i < dev->cmd_q_count; i++) {
+ dev->cmd_q[i].sb_key =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_iv =
+ ccp_lsb_alloc(&dev->cmd_q[i], 1);
+ dev->cmd_q[i].sb_sha =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ dev->cmd_q[i].sb_hmac =
+ ccp_lsb_alloc(&dev->cmd_q[i], 2);
+ }
+
+ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
+ return 0;
+}
+
+static void
+ccp_remove_device(struct ccp_device *dev)
+{
+ if (dev == NULL)
+ return;
+
+ TAILQ_REMOVE(&ccp_list, dev, next);
+}
+
+static int
+is_ccp_device(const char *dirname,
+ const struct rte_pci_id *ccp_id,
+ int *type)
+{
+ char filename[PATH_MAX];
+ const struct rte_pci_id *id;
+ uint16_t vendor, device_id;
+ int i;
+ unsigned long tmp;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ vendor = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ return 0;
+ device_id = (uint16_t)tmp;
+
+ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
+ if (vendor == id->vendor_id &&
+ device_id == id->device_id) {
+ *type = i;
+ return 1; /* Matched device */
+ }
+ }
+ return 0;
+}
+
+static int
+ccp_probe_device(const char *dirname, uint16_t domain,
+ uint8_t bus, uint8_t devid,
+ uint8_t function, int ccp_type)
+{
+ struct ccp_device *ccp_dev = NULL;
+ struct rte_pci_device *pci;
+ char filename[PATH_MAX];
+ unsigned long tmp;
+ int uio_fd = -1, i, uio_num;
+ char uio_devname[PATH_MAX];
+ void *map_addr;
+
+ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
+ RTE_CACHE_LINE_SIZE);
+ if (ccp_dev == NULL)
+ goto fail;
+ pci = &(ccp_dev->pci);
+
+ pci->addr.domain = domain;
+ pci->addr.bus = bus;
+ pci->addr.devid = devid;
+ pci->addr.function = function;
+
+ /* get vendor id */
+ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.vendor_id = (uint16_t)tmp;
+
+ /* get device id */
+ snprintf(filename, sizeof(filename), "%s/device", dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.device_id = (uint16_t)tmp;
+
+ /* get subsystem_vendor id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_vendor_id = (uint16_t)tmp;
+
+ /* get subsystem_device id */
+ snprintf(filename, sizeof(filename), "%s/subsystem_device",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ pci->id.subsystem_device_id = (uint16_t)tmp;
+
+ /* get class_id */
+ snprintf(filename, sizeof(filename), "%s/class",
+ dirname);
+ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
+ goto fail;
+ /* the least 24 bits are valid: class, subclass, program interface */
+ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
+
+ /* parse resources */
+ snprintf(filename, sizeof(filename), "%s/resource", dirname);
+ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
+ goto fail;
+
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0) {
+ /*
+ * It may take time for uio device to appear,
+ * wait here and try again
+ */
+ usleep(100000);
+ uio_num = ccp_find_uio_devname(dirname);
+ if (uio_num < 0)
+ goto fail;
+ }
+ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
+
+ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
+ if (uio_fd < 0)
+ goto fail;
+ if (flock(uio_fd, LOCK_EX | LOCK_NB))
+ goto fail;
+
+ /* Map the PCI memory resource of device */
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+
+ char devname[PATH_MAX];
+ int res_fd;
+
+ if (pci->mem_resource[i].phys_addr == 0)
+ continue;
+ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
+ res_fd = open(devname, O_RDWR);
+ if (res_fd < 0)
+ goto fail;
+ map_addr = mmap(NULL, pci->mem_resource[i].len,
+ PROT_READ | PROT_WRITE,
+ MAP_SHARED, res_fd, 0);
+ if (map_addr == MAP_FAILED)
+ goto fail;
+
+ pci->mem_resource[i].addr = map_addr;
+ }
+
+ /* device is valid, add in list */
+ if (ccp_add_device(ccp_dev, ccp_type)) {
+ ccp_remove_device(ccp_dev);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ CCP_LOG_ERR("CCP Device probe failed");
+ if (uio_fd > 0)
+ close(uio_fd);
+ if (ccp_dev)
+ rte_free(ccp_dev);
+ return -1;
+}
+
+int
+ccp_probe_devices(const struct rte_pci_id *ccp_id)
+{
+ int dev_cnt = 0;
+ int ccp_type = 0;
+ struct dirent *d;
+ DIR *dir;
+ int ret = 0;
+ int module_idx = 0;
+ uint16_t domain;
+ uint8_t bus, devid, function;
+ char dirname[PATH_MAX];
+
+ module_idx = ccp_check_pci_uio_module();
+ if (module_idx < 0)
+ return -1;
+
+ TAILQ_INIT(&ccp_list);
+ dir = opendir(SYSFS_PCI_DEVICES);
+ if (dir == NULL)
+ return -1;
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.')
+ continue;
+ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
+ &domain, &bus, &devid, &function) != 0)
+ continue;
+ snprintf(dirname, sizeof(dirname), "%s/%s",
+ SYSFS_PCI_DEVICES, d->d_name);
+ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
+ printf("CCP : Detected CCP device with ID = 0x%x\n",
+ ccp_id[ccp_type].device_id);
+ ret = ccp_probe_device(dirname, domain, bus, devid,
+ function, ccp_type);
+ if (ret == 0)
+ dev_cnt++;
+ }
+ }
+ closedir(dir);
+ return dev_cnt;
+}
diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
new file mode 100644
index 00000000..de3e4bcc
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_dev.h
@@ -0,0 +1,495 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_DEV_H_
+#define _CCP_DEV_H_
+
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+
+#include <rte_bus_pci.h>
+#include <rte_atomic.h>
+#include <rte_byteorder.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_spinlock.h>
+#include <rte_crypto_sym.h>
+#include <rte_cryptodev.h>
+
+/**< CCP sspecific */
+#define MAX_HW_QUEUES 5
+#define CCP_MAX_TRNG_RETRIES 10
+#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+/**< CCP Register Mappings */
+#define Q_MASK_REG 0x000
+#define TRNG_OUT_REG 0x00c
+
+/* CCP Version 5 Specifics */
+#define CMD_QUEUE_MASK_OFFSET 0x00
+#define CMD_QUEUE_PRIO_OFFSET 0x04
+#define CMD_REQID_CONFIG_OFFSET 0x08
+#define CMD_CMD_TIMEOUT_OFFSET 0x10
+#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
+#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
+#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
+#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
+
+#define CMD_Q_CONTROL_BASE 0x0000
+#define CMD_Q_TAIL_LO_BASE 0x0004
+#define CMD_Q_HEAD_LO_BASE 0x0008
+#define CMD_Q_INT_ENABLE_BASE 0x000C
+#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
+
+#define CMD_Q_STATUS_BASE 0x0100
+#define CMD_Q_INT_STATUS_BASE 0x0104
+
+#define CMD_CONFIG_0_OFFSET 0x6000
+#define CMD_TRNG_CTL_OFFSET 0x6008
+#define CMD_AES_MASK_OFFSET 0x6010
+#define CMD_CLK_GATE_CTL_OFFSET 0x603C
+
+/* Address offset between two virtual queue registers */
+#define CMD_Q_STATUS_INCR 0x1000
+
+/* Bit masks */
+#define CMD_Q_RUN 0x1
+#define CMD_Q_SIZE 0x1F
+#define CMD_Q_SHIFT 3
+#define COMMANDS_PER_QUEUE 2048
+
+#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
+ CMD_Q_SIZE)
+#define Q_DESC_SIZE sizeof(struct ccp_desc)
+#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
+
+#define INT_COMPLETION 0x1
+#define INT_ERROR 0x2
+#define INT_QUEUE_STOPPED 0x4
+#define ALL_INTERRUPTS (INT_COMPLETION| \
+ INT_ERROR| \
+ INT_QUEUE_STOPPED)
+
+#define LSB_REGION_WIDTH 5
+#define MAX_LSB_CNT 8
+
+#define LSB_SIZE 16
+#define LSB_ITEM_SIZE 32
+#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+/* General CCP Defines */
+
+#define CCP_SB_BYTES 32
+/* Word 0 */
+#define CCP_CMD_DW0(p) ((p)->dw0)
+#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
+#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
+#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
+#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
+#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
+#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
+#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
+
+/* Word 1 */
+#define CCP_CMD_DW1(p) ((p)->length)
+#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
+
+/* Word 2 */
+#define CCP_CMD_DW2(p) ((p)->src_lo)
+#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
+
+/* Word 3 */
+#define CCP_CMD_DW3(p) ((p)->dw3)
+#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
+#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
+#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
+#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
+
+/* Words 4/5 */
+#define CCP_CMD_DW4(p) ((p)->dw4)
+#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
+#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
+#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
+#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
+#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
+#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
+#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
+
+/* Word 6/7 */
+#define CCP_CMD_DW6(p) ((p)->key_lo)
+#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
+#define CCP_CMD_DW7(p) ((p)->dw7)
+#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
+#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+/* bitmap */
+enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+};
+
+#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
+#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
+
+#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#define CCP_BITMAP_SIZE(nr) \
+ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
+
+#define CCP_BITMAP_FIRST_WORD_MASK(start) \
+ (~0UL << ((start) & (BITS_PER_WORD - 1)))
+#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
+ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
+
+#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
+#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
+
+/** CCP registers Write/Read */
+
+static inline void ccp_pci_reg_write(void *base, int offset,
+ uint32_t value)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
+}
+
+static inline uint32_t ccp_pci_reg_read(void *base, int offset)
+{
+ volatile void *reg_addr = ((uint8_t *)base + offset);
+
+ return rte_le_to_cpu_32(rte_read32(reg_addr));
+}
+
+#define CCP_READ_REG(hw_addr, reg_offset) \
+ ccp_pci_reg_read(hw_addr, reg_offset)
+
+#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
+ ccp_pci_reg_write(hw_addr, reg_offset, value)
+
+TAILQ_HEAD(ccp_list, ccp_device);
+
+extern struct ccp_list ccp_list;
+
+/**
+ * CCP device version
+ */
+enum ccp_device_version {
+ CCP_VERSION_5A = 0,
+ CCP_VERSION_5B,
+};
+
+/**
+ * A structure describing a CCP command queue.
+ */
+struct ccp_queue {
+ struct ccp_device *dev;
+ char memz_name[RTE_MEMZONE_NAMESIZE];
+
+ rte_atomic64_t free_slots;
+ /**< available free slots updated from enq/deq calls */
+
+ /* Queue identifier */
+ uint64_t id; /**< queue id */
+ uint64_t qidx; /**< queue index */
+ uint64_t qsize; /**< queue size */
+
+ /* Queue address */
+ struct ccp_desc *qbase_desc;
+ void *qbase_addr;
+ phys_addr_t qbase_phys_addr;
+ /**< queue-page registers addr */
+ void *reg_base;
+
+ uint32_t qcontrol;
+ /**< queue ctrl reg */
+
+ int lsb;
+ /**< lsb region assigned to queue */
+ unsigned long lsbmask;
+ /**< lsb regions queue can access */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
+ /**< all lsb resources which queue is using */
+ uint32_t sb_key;
+ /**< lsb assigned for queue */
+ uint32_t sb_iv;
+ /**< lsb assigned for iv */
+ uint32_t sb_sha;
+ /**< lsb assigned for sha ctx */
+ uint32_t sb_hmac;
+ /**< lsb assigned for hmac ctx */
+} ____cacheline_aligned;
+
+/**
+ * A structure describing a CCP device.
+ */
+struct ccp_device {
+ TAILQ_ENTRY(ccp_device) next;
+ int id;
+ /**< ccp dev id on platform */
+ struct ccp_queue cmd_q[MAX_HW_QUEUES];
+ /**< ccp queue */
+ int cmd_q_count;
+ /**< no. of ccp Queues */
+ struct rte_pci_device pci;
+ /**< ccp pci identifier */
+ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
+ /**< shared lsb mask of ccp */
+ rte_spinlock_t lsb_lock;
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
+ int hwrng_retries;
+ /**< retry counter for CCP TRNG */
+} __rte_cache_aligned;
+
+/**< CCP H/W engine related */
+/**
+ * ccp_engine - CCP operation identifiers
+ *
+ * @CCP_ENGINE_AES: AES operation
+ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
+ * @CCP_ENGINE_3DES: DES/3DES operation
+ * @CCP_ENGINE_SHA: SHA operation
+ * @CCP_ENGINE_RSA: RSA operation
+ * @CCP_ENGINE_PASSTHRU: pass-through operation
+ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
+ * @CCP_ENGINE_ECC: ECC operation
+ */
+enum ccp_engine {
+ CCP_ENGINE_AES = 0,
+ CCP_ENGINE_XTS_AES_128,
+ CCP_ENGINE_3DES,
+ CCP_ENGINE_SHA,
+ CCP_ENGINE_RSA,
+ CCP_ENGINE_PASSTHRU,
+ CCP_ENGINE_ZLIB_DECOMPRESS,
+ CCP_ENGINE_ECC,
+ CCP_ENGINE__LAST,
+};
+
+/* Passthru engine */
+/**
+ * ccp_passthru_bitwise - type of bitwise passthru operation
+ *
+ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
+ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
+ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
+ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
+ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
+ */
+enum ccp_passthru_bitwise {
+ CCP_PASSTHRU_BITWISE_NOOP = 0,
+ CCP_PASSTHRU_BITWISE_AND,
+ CCP_PASSTHRU_BITWISE_OR,
+ CCP_PASSTHRU_BITWISE_XOR,
+ CCP_PASSTHRU_BITWISE_MASK,
+ CCP_PASSTHRU_BITWISE__LAST,
+};
+
+/**
+ * ccp_passthru_byteswap - type of byteswap passthru operation
+ *
+ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
+ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
+ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
+ */
+enum ccp_passthru_byteswap {
+ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
+ CCP_PASSTHRU_BYTESWAP_32BIT,
+ CCP_PASSTHRU_BYTESWAP_256BIT,
+ CCP_PASSTHRU_BYTESWAP__LAST,
+};
+
+/**
+ * CCP passthru
+ */
+struct ccp_passthru {
+ phys_addr_t src_addr;
+ phys_addr_t dest_addr;
+ enum ccp_passthru_bitwise bit_mod;
+ enum ccp_passthru_byteswap byte_swap;
+ int len;
+ int dir;
+};
+
+/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
+union ccp_function {
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } aes;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t mode:5;
+ uint16_t type:2;
+ } des;
+ struct {
+ uint16_t size:7;
+ uint16_t encrypt:1;
+ uint16_t rsvd:5;
+ uint16_t type:2;
+ } aes_xts;
+ struct {
+ uint16_t rsvd1:10;
+ uint16_t type:4;
+ uint16_t rsvd2:1;
+ } sha;
+ struct {
+ uint16_t mode:3;
+ uint16_t size:12;
+ } rsa;
+ struct {
+ uint16_t byteswap:2;
+ uint16_t bitwise:3;
+ uint16_t reflect:2;
+ uint16_t rsvd:8;
+ } pt;
+ struct {
+ uint16_t rsvd:13;
+ } zlib;
+ struct {
+ uint16_t size:10;
+ uint16_t type:2;
+ uint16_t mode:3;
+ } ecc;
+ uint16_t raw;
+};
+
+
+/**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+ * word 0: function; engine; control bits
+ * word 1: length of source data
+ * word 2: low 32 bits of source pointer
+ * word 3: upper 16 bits of source pointer; source memory type
+ * word 4: low 32 bits of destination pointer
+ * word 5: upper 16 bits of destination pointer; destination memory
+ * type
+ * word 6: low 32 bits of key pointer
+ * word 7: upper 16 bits of key pointer; key memory type
+ */
+struct dword0 {
+ uint32_t soc:1;
+ uint32_t ioc:1;
+ uint32_t rsvd1:1;
+ uint32_t init:1;
+ uint32_t eom:1;
+ uint32_t function:15;
+ uint32_t engine:4;
+ uint32_t prot:1;
+ uint32_t rsvd2:7;
+};
+
+struct dword3 {
+ uint32_t src_hi:16;
+ uint32_t src_mem:2;
+ uint32_t lsb_cxt_id:8;
+ uint32_t rsvd1:5;
+ uint32_t fixed:1;
+};
+
+union dword4 {
+ uint32_t dst_lo; /* NON-SHA */
+ uint32_t sha_len_lo; /* SHA */
+};
+
+union dword5 {
+ struct {
+ uint32_t dst_hi:16;
+ uint32_t dst_mem:2;
+ uint32_t rsvd1:13;
+ uint32_t fixed:1;
+ }
+ fields;
+ uint32_t sha_len_hi;
+};
+
+struct dword7 {
+ uint32_t key_hi:16;
+ uint32_t key_mem:2;
+ uint32_t rsvd1:14;
+};
+
+struct ccp_desc {
+ struct dword0 dw0;
+ uint32_t length;
+ uint32_t src_lo;
+ struct dword3 dw3;
+ union dword4 dw4;
+ union dword5 dw5;
+ uint32_t key_lo;
+ struct dword7 dw7;
+};
+
+/**
+ * ccp memory type
+ */
+enum ccp_memtype {
+ CCP_MEMTYPE_SYSTEM = 0,
+ CCP_MEMTYPE_SB,
+ CCP_MEMTYPE_LOCAL,
+ CCP_MEMTYPE_LAST,
+};
+
+/**
+ * cmd id to follow order
+ */
+enum ccp_cmd_order {
+ CCP_CMD_CIPHER = 0,
+ CCP_CMD_AUTH,
+ CCP_CMD_CIPHER_HASH,
+ CCP_CMD_HASH_CIPHER,
+ CCP_CMD_COMBINED,
+ CCP_CMD_NOT_SUPPORTED,
+};
+
+static inline uint32_t
+low32_value(unsigned long addr)
+{
+ return ((uint64_t)addr) & 0x0ffffffff;
+}
+
+static inline uint32_t
+high32_value(unsigned long addr)
+{
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+}
+
+/*
+ * Start CCP device
+ */
+int ccp_dev_start(struct rte_cryptodev *dev);
+
+/**
+ * Detect ccp platform and initialize all ccp devices
+ *
+ * @param ccp_id rte_pci_id list for supported CCP devices
+ * @return no. of successfully initialized CCP devices
+ */
+int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
+/**
+ * allocate a ccp command queue
+ *
+ * @dev rte crypto device
+ * @param slot_req number of required
+ * @return allotted CCP queue on success otherwise NULL
+ */
+struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
+/**
+ * read hwrng value
+ *
+ * @param trng_value data pointer to write RNG value
+ * @return 0 on success otherwise -1
+ */
+int ccp_read_hwrng(uint32_t *trng_value);
+
+#endif /* _CCP_DEV_H_ */
diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
new file mode 100644
index 00000000..59152ca5
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.c
@@ -0,0 +1,236 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <rte_string_fns.h>
+
+#include "ccp_pci.h"
+
+static const char * const uio_module_names[] = {
+ "igb_uio",
+ "uio_pci_generic",
+};
+
+int
+ccp_check_pci_uio_module(void)
+{
+ FILE *fp;
+ int i;
+ char buf[BUFSIZ];
+
+ fp = fopen(PROC_MODULES, "r");
+ if (fp == NULL)
+ return -1;
+ i = 0;
+ while (uio_module_names[i] != NULL) {
+ while (fgets(buf, sizeof(buf), fp) != NULL) {
+ if (!strncmp(buf, uio_module_names[i],
+ strlen(uio_module_names[i])))
+ return i;
+ }
+ i++;
+ rewind(fp);
+ }
+ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
+ return -1;/* uio not inserted */
+}
+
+/*
+ * split up a pci address into its constituent parts.
+ */
+int
+ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function)
+{
+ /* first split on ':' */
+ union splitaddr {
+ struct {
+ char *domain;
+ char *bus;
+ char *devid;
+ char *function;
+ };
+ char *str[PCI_FMT_NVAL];
+ /* last element-separator is "." not ":" */
+ } splitaddr;
+
+ char *buf_copy = strndup(buf, bufsize);
+
+ if (buf_copy == NULL)
+ return -1;
+
+ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
+ != PCI_FMT_NVAL - 1)
+ goto error;
+ /* final split is on '.' between devid and function */
+ splitaddr.function = strchr(splitaddr.devid, '.');
+ if (splitaddr.function == NULL)
+ goto error;
+ *splitaddr.function++ = '\0';
+
+ /* now convert to int values */
+ errno = 0;
+ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
+ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
+ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
+ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
+ if (errno != 0)
+ goto error;
+
+ free(buf_copy); /* free the copy made with strdup */
+ return 0;
+error:
+ free(buf_copy);
+ return -1;
+}
+
+int
+ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
+{
+ FILE *f;
+ char buf[BUFSIZ];
+ char *end = NULL;
+
+ f = fopen(filename, "r");
+ if (f == NULL)
+ return -1;
+ if (fgets(buf, sizeof(buf), f) == NULL) {
+ fclose(f);
+ return -1;
+ }
+ *val = strtoul(buf, &end, 0);
+ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
+ fclose(f);
+ return -1;
+ }
+ fclose(f);
+ return 0;
+}
+
+/** IO resource type: */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+/* parse one line of the "resource" sysfs file (note that the 'line'
+ * string is modified)
+ */
+static int
+ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
+ uint64_t *end_addr, uint64_t *flags)
+{
+ union pci_resource_info {
+ struct {
+ char *phys_addr;
+ char *end_addr;
+ char *flags;
+ };
+ char *ptrs[PCI_RESOURCE_FMT_NVAL];
+ } res_info;
+
+ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
+ return -1;
+ errno = 0;
+ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
+ *end_addr = strtoull(res_info.end_addr, NULL, 16);
+ *flags = strtoull(res_info.flags, NULL, 16);
+ if (errno != 0)
+ return -1;
+
+ return 0;
+}
+
+/* parse the "resource" sysfs file */
+int
+ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
+{
+ FILE *fp;
+ char buf[BUFSIZ];
+ int i;
+ uint64_t phys_addr, end_addr, flags;
+
+ fp = fopen(filename, "r");
+ if (fp == NULL)
+ return -1;
+
+ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
+ if (fgets(buf, sizeof(buf), fp) == NULL)
+ goto error;
+ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
+ &phys_addr, &end_addr, &flags) < 0)
+ goto error;
+
+ if (flags & IORESOURCE_MEM) {
+ dev->mem_resource[i].phys_addr = phys_addr;
+ dev->mem_resource[i].len = end_addr - phys_addr + 1;
+ /* not mapped for now */
+ dev->mem_resource[i].addr = NULL;
+ }
+ }
+ fclose(fp);
+ return 0;
+
+error:
+ fclose(fp);
+ return -1;
+}
+
+int
+ccp_find_uio_devname(const char *dirname)
+{
+
+ DIR *dir;
+ struct dirent *e;
+ char dirname_uio[PATH_MAX];
+ unsigned int uio_num;
+ int ret = -1;
+
+ /* depending on kernel version, uio can be located in uio/uioX
+ * or uio:uioX
+ */
+ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
+ dir = opendir(dirname_uio);
+ if (dir == NULL) {
+ /* retry with the parent directory might be different kernel version*/
+ dir = opendir(dirname);
+ if (dir == NULL)
+ return -1;
+ }
+
+ /* take the first file starting with "uio" */
+ while ((e = readdir(dir)) != NULL) {
+ /* format could be uio%d ...*/
+ int shortprefix_len = sizeof("uio") - 1;
+ /* ... or uio:uio%d */
+ int longprefix_len = sizeof("uio:uio") - 1;
+ char *endptr;
+
+ if (strncmp(e->d_name, "uio", 3) != 0)
+ continue;
+
+ /* first try uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+
+ /* then try uio:uio%d */
+ errno = 0;
+ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
+ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
+ ret = uio_num;
+ break;
+ }
+ }
+ closedir(dir);
+ return ret;
+
+
+}
diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
new file mode 100644
index 00000000..7ed3bac4
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pci.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PCI_H_
+#define _CCP_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_bus_pci.h>
+
+#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
+#define PROC_MODULES "/proc/modules"
+
+int ccp_check_pci_uio_module(void);
+
+int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
+ uint8_t *bus, uint8_t *devid, uint8_t *function);
+
+int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
+
+int ccp_pci_parse_sysfs_resource(const char *filename,
+ struct rte_pci_device *dev);
+
+int ccp_find_uio_devname(const char *dirname);
+
+#endif /* _CCP_PCI_H_ */
diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
new file mode 100644
index 00000000..80b75ccb
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_ops.c
@@ -0,0 +1,848 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_malloc.h>
+
+#include "ccp_pmd_private.h"
+#include "ccp_dev.h"
+#include "ccp_crypto.h"
+
+#define CCP_BASE_SYM_CRYPTO_CAPABILITIES \
+ { /* SHA1 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 20, \
+ .max = 20, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA224 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-224 HMAC*/ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC, \
+ .block_size = 144, \
+ .key_size = { \
+ .min = 1, \
+ .max = 144, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 28, \
+ .max = 28, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA256 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-256-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC, \
+ .block_size = 136, \
+ .key_size = { \
+ .min = 1, \
+ .max = 136, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 32, \
+ .max = 32, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA384 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-384-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC, \
+ .block_size = 104, \
+ .key_size = { \
+ .min = 1, \
+ .max = 104, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 48, \
+ .max = 48, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA512 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC, \
+ .block_size = 128, \
+ .key_size = { \
+ .min = 1, \
+ .max = 128, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512 */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* SHA3-512-HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC, \
+ .block_size = 72, \
+ .key_size = { \
+ .min = 1, \
+ .max = 72, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 64, \
+ .max = 64, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /*AES-CMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ }, } \
+ }, } \
+ }, \
+ { /* AES ECB */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_ECB, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 0, \
+ .max = 0, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CTR */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CTR, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* 3DES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC, \
+ .block_size = 8, \
+ .key_size = { \
+ .min = 16, \
+ .max = 24, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 8, \
+ .max = 8, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES GCM */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD, \
+ {.aead = { \
+ .algo = RTE_CRYPTO_AEAD_AES_GCM, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { \
+ .min = 0, \
+ .max = 65535, \
+ .increment = 1 \
+ }, \
+ .iv_size = { \
+ .min = 12, \
+ .max = 16, \
+ .increment = 4 \
+ }, \
+ }, } \
+ }, } \
+ }
+
+#define CCP_EXTRA_SYM_CRYPTO_CAPABILITIES \
+ { /* MD5 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_MD5_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ }, \
+ .aad_size = { 0 } \
+ }, } \
+ }, } \
+ }
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap[] = {
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static const struct rte_cryptodev_capabilities ccp_crypto_cap_complete[] = {
+ CCP_EXTRA_SYM_CRYPTO_CAPABILITIES,
+ CCP_BASE_SYM_CRYPTO_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+static int
+ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ return 0;
+}
+
+static int
+ccp_pmd_start(struct rte_cryptodev *dev)
+{
+ return ccp_dev_start(dev);
+}
+
+static void
+ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
+{
+
+}
+
+static int
+ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+static void
+ccp_pmd_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ stats->enqueued_count += qp->qp_stats.enqueued_count;
+ stats->dequeued_count += qp->qp_stats.dequeued_count;
+
+ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
+ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
+ }
+
+}
+
+static void
+ccp_pmd_stats_reset(struct rte_cryptodev *dev)
+{
+ int qp_id;
+
+ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
+ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
+
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ }
+}
+
+static void
+ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+
+ if (dev_info != NULL) {
+ dev_info->driver_id = dev->driver_id;
+ dev_info->feature_flags = dev->feature_flags;
+ dev_info->capabilities = ccp_crypto_cap;
+ if (internals->auth_opt == 1)
+ dev_info->capabilities = ccp_crypto_cap_complete;
+ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
+ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ }
+}
+
+static int
+ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
+{
+ struct ccp_qp *qp;
+
+ if (dev->data->queue_pairs[qp_id] != NULL) {
+ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
+ rte_ring_free(qp->processed_pkts);
+ rte_mempool_free(qp->batch_mp);
+ rte_free(qp);
+ dev->data->queue_pairs[qp_id] = NULL;
+ }
+ return 0;
+}
+
+static int
+ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
+ struct ccp_qp *qp)
+{
+ unsigned int n = snprintf(qp->name, sizeof(qp->name),
+ "ccp_pmd_%u_qp_%u",
+ dev->data->dev_id, qp->id);
+
+ if (n > sizeof(qp->name))
+ return -1;
+
+ return 0;
+}
+
+static struct rte_ring *
+ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
+ unsigned int ring_size, int socket_id)
+{
+ struct rte_ring *r;
+
+ r = rte_ring_lookup(qp->name);
+ if (r) {
+ if (r->size >= ring_size) {
+ CCP_LOG_INFO(
+ "Reusing ring %s for processed packets",
+ qp->name);
+ return r;
+ }
+ CCP_LOG_INFO(
+ "Unable to reuse ring %s for processed packets",
+ qp->name);
+ return NULL;
+ }
+
+ return rte_ring_create(qp->name, ring_size, socket_id,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+}
+
+static int
+ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id, struct rte_mempool *session_pool)
+{
+ struct ccp_private *internals = dev->data->dev_private;
+ struct ccp_qp *qp;
+ int retval = 0;
+
+ if (qp_id >= internals->max_nb_qpairs) {
+ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
+ qp_id, internals->max_nb_qpairs);
+ return (-EINVAL);
+ }
+
+ /* Free memory prior to re-allocation if needed. */
+ if (dev->data->queue_pairs[qp_id] != NULL)
+ ccp_pmd_qp_release(dev, qp_id);
+
+ /* Allocate the queue pair data structure. */
+ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (qp == NULL) {
+ CCP_LOG_ERR("Failed to allocate queue pair memory");
+ return (-ENOMEM);
+ }
+
+ qp->dev = dev;
+ qp->id = qp_id;
+ dev->data->queue_pairs[qp_id] = qp;
+
+ retval = ccp_pmd_qp_set_unique_name(dev, qp);
+ if (retval) {
+ CCP_LOG_ERR("Failed to create unique name for ccp qp");
+ goto qp_setup_cleanup;
+ }
+
+ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
+ qp_conf->nb_descriptors, socket_id);
+ if (qp->processed_pkts == NULL) {
+ CCP_LOG_ERR("Failed to create batch info ring");
+ goto qp_setup_cleanup;
+ }
+
+ qp->sess_mp = session_pool;
+
+ /* mempool for batch info */
+ qp->batch_mp = rte_mempool_create(
+ qp->name,
+ qp_conf->nb_descriptors,
+ sizeof(struct ccp_batch_info),
+ RTE_CACHE_LINE_SIZE,
+ 0, NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (qp->batch_mp == NULL)
+ goto qp_setup_cleanup;
+ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
+ return 0;
+
+qp_setup_cleanup:
+ dev->data->queue_pairs[qp_id] = NULL;
+ if (qp)
+ rte_free(qp);
+ return -1;
+}
+
+static int
+ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
+ uint16_t queue_pair_id __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+static int
+ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
+ uint16_t queue_pair_id __rte_unused)
+{
+ return -ENOTSUP;
+}
+
+static uint32_t
+ccp_pmd_qp_count(struct rte_cryptodev *dev)
+{
+ return dev->data->nb_queue_pairs;
+}
+
+static unsigned
+ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+{
+ return sizeof(struct ccp_session);
+}
+
+static int
+ccp_pmd_session_configure(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ void *sess_private_data;
+ struct ccp_private *internals;
+
+ if (unlikely(sess == NULL || xform == NULL)) {
+ CCP_LOG_ERR("Invalid session struct or xform");
+ return -ENOMEM;
+ }
+
+ if (rte_mempool_get(mempool, &sess_private_data)) {
+ CCP_LOG_ERR("Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+ internals = (struct ccp_private *)dev->data->dev_private;
+ ret = ccp_set_session_parameters(sess_private_data, xform, internals);
+ if (ret != 0) {
+ CCP_LOG_ERR("failed configure session parameters");
+
+ /* Return session to mempool */
+ rte_mempool_put(mempool, sess_private_data);
+ return ret;
+ }
+ set_session_private_data(sess, dev->driver_id,
+ sess_private_data);
+
+ return 0;
+}
+
+static void
+ccp_pmd_session_clear(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ uint8_t index = dev->driver_id;
+ void *sess_priv = get_session_private_data(sess, index);
+
+ if (sess_priv) {
+ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
+
+ rte_mempool_put(sess_mp, sess_priv);
+ memset(sess_priv, 0, sizeof(struct ccp_session));
+ set_session_private_data(sess, index, NULL);
+ }
+}
+
+struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+ .stats_get = ccp_pmd_stats_get,
+ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = ccp_pmd_qp_setup,
+ .queue_pair_release = ccp_pmd_qp_release,
+ .queue_pair_start = ccp_pmd_qp_start,
+ .queue_pair_stop = ccp_pmd_qp_stop,
+ .queue_pair_count = ccp_pmd_qp_count,
+
+ .session_get_size = ccp_pmd_session_get_size,
+ .session_configure = ccp_pmd_session_configure,
+ .session_clear = ccp_pmd_session_clear,
+};
+
+struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
new file mode 100644
index 00000000..f4498048
--- /dev/null
+++ b/drivers/crypto/ccp/ccp_pmd_private.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _CCP_PMD_PRIVATE_H_
+#define _CCP_PMD_PRIVATE_H_
+
+#include <rte_cryptodev.h>
+#include "ccp_crypto.h"
+
+#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+#define CCP_LOG_ERR(fmt, args...) \
+ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#ifdef RTE_LIBRTE_CCP_DEBUG
+#define CCP_LOG_INFO(fmt, args...) \
+ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+
+#define CCP_LOG_DBG(fmt, args...) \
+ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
+ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
+ __func__, __LINE__, ## args)
+#else
+#define CCP_LOG_INFO(fmt, args...)
+#define CCP_LOG_DBG(fmt, args...)
+#endif
+
+/**< Maximum queue pairs supported by CCP PMD */
+#define CCP_PMD_MAX_QUEUE_PAIRS 1
+#define CCP_NB_MAX_DESCRIPTORS 1024
+#define CCP_MAX_BURST 64
+
+#include "ccp_dev.h"
+
+/* private data structure for each CCP crypto device */
+struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
+ bool auth_opt; /**< Authentication offload option */
+ struct ccp_device *last_dev; /**< Last working crypto device */
+};
+
+/* CCP batch info */
+struct ccp_batch_info {
+ struct rte_crypto_op *op[CCP_MAX_BURST];
+ /**< optable populated at enque time from app*/
+ int op_idx;
+ struct ccp_queue *cmd_q;
+ uint16_t opcnt;
+ /**< no. of crypto ops in batch*/
+ int desccnt;
+ /**< no. of ccp queue descriptors*/
+ uint32_t head_offset;
+ /**< ccp queue head tail offsets time of enqueue*/
+ uint32_t tail_offset;
+ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
+ uint16_t auth_ctr;
+ /**< auth only ops batch for CPU based auth */
+} __rte_cache_aligned;
+
+/**< CCP crypto queue pair */
+struct ccp_qp {
+ uint16_t id;
+ /**< Queue Pair Identifier */
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+ /**< Unique Queue Pair Name */
+ struct rte_ring *processed_pkts;
+ /**< Ring for placing process packets */
+ struct rte_mempool *sess_mp;
+ /**< Session Mempool */
+ struct rte_mempool *batch_mp;
+ /**< Session Mempool for batch info */
+ struct rte_cryptodev_stats qp_stats;
+ /**< Queue pair statistics */
+ struct ccp_batch_info *b_info;
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
+ uint8_t temp_digest[DIGEST_LENGTH_MAX];
+ /**< Buffer used to store the digest generated
+ * by the driver when verifying a digest provided
+ * by the user (using authentication verify operation)
+ */
+} __rte_cache_aligned;
+
+
+/**< device specific operations function pointer structure */
+extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
+uint16_t
+ccp_cpu_pmd_enqueue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+uint16_t
+ccp_cpu_pmd_dequeue_burst(void *queue_pair,
+ struct rte_crypto_op **ops,
+ uint16_t nb_ops);
+
+#endif /* _CCP_PMD_PRIVATE_H_ */
diff --git a/drivers/crypto/ccp/meson.build b/drivers/crypto/ccp/meson.build
new file mode 100644
index 00000000..e43b0059
--- /dev/null
+++ b/drivers/crypto/ccp/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+dep = dependency('libcrypto', required: false)
+if not dep.found()
+ build = false
+endif
+deps += 'bus_vdev'
+deps += 'bus_pci'
+
+sources = files('rte_ccp_pmd.c',
+ 'ccp_crypto.c',
+ 'ccp_dev.c',
+ 'ccp_pci.c',
+ 'ccp_pmd_ops.c')
+
+ext_deps += dep
+pkgconfig_extra_libs += '-lcrypto'
diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
new file mode 100644
index 00000000..2061f465
--- /dev/null
+++ b/drivers/crypto/ccp/rte_ccp_pmd.c
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#include <rte_bus_pci.h>
+#include <rte_bus_vdev.h>
+#include <rte_common.h>
+#include <rte_config.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_pci.h>
+#include <rte_dev.h>
+#include <rte_malloc.h>
+
+#include "ccp_crypto.h"
+#include "ccp_dev.h"
+#include "ccp_pmd_private.h"
+
+/**
+ * Global static parameter used to find if CCP device is already initialized.
+ */
+static unsigned int ccp_pmd_init_done;
+uint8_t ccp_cryptodev_driver_id;
+
+struct ccp_pmd_init_params {
+ struct rte_cryptodev_pmd_init_params def_p;
+ bool auth_opt;
+};
+
+#define CCP_CRYPTODEV_PARAM_NAME ("name")
+#define CCP_CRYPTODEV_PARAM_SOCKET_ID ("socket_id")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_QP ("max_nb_queue_pairs")
+#define CCP_CRYPTODEV_PARAM_MAX_NB_SESS ("max_nb_sessions")
+#define CCP_CRYPTODEV_PARAM_AUTH_OPT ("ccp_auth_opt")
+
+const char *ccp_pmd_valid_params[] = {
+ CCP_CRYPTODEV_PARAM_NAME,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+};
+
+/** ccp pmd auth option */
+enum ccp_pmd_auth_opt {
+ CCP_PMD_AUTH_OPT_CCP = 0,
+ CCP_PMD_AUTH_OPT_CPU,
+};
+
+/** parse integer from integer argument */
+static int
+parse_integer_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ int *i = (int *) extra_args;
+
+ *i = atoi(value);
+ if (*i < 0) {
+ CCP_LOG_ERR("Argument has to be positive.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/** parse name argument */
+static int
+parse_name_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct rte_cryptodev_pmd_init_params *params = extra_args;
+
+ if (strlen(value) >= RTE_CRYPTODEV_NAME_MAX_LEN - 1) {
+ CCP_LOG_ERR("Invalid name %s, should be less than "
+ "%u bytes.\n", value,
+ RTE_CRYPTODEV_NAME_MAX_LEN - 1);
+ return -EINVAL;
+ }
+
+ strncpy(params->name, value, RTE_CRYPTODEV_NAME_MAX_LEN);
+
+ return 0;
+}
+
+/** parse authentication operation option */
+static int
+parse_auth_opt_arg(const char *key __rte_unused,
+ const char *value, void *extra_args)
+{
+ struct ccp_pmd_init_params *params = extra_args;
+ int i;
+
+ i = atoi(value);
+ if (i < CCP_PMD_AUTH_OPT_CCP || i > CCP_PMD_AUTH_OPT_CPU) {
+ CCP_LOG_ERR("Invalid ccp pmd auth option. "
+ "0->auth on CCP(default), "
+ "1->auth on CPU\n");
+ return -EINVAL;
+ }
+ params->auth_opt = i;
+ return 0;
+}
+
+static int
+ccp_pmd_parse_input_args(struct ccp_pmd_init_params *params,
+ const char *input_args)
+{
+ struct rte_kvargs *kvlist = NULL;
+ int ret = 0;
+
+ if (params == NULL)
+ return -EINVAL;
+
+ if (input_args) {
+ kvlist = rte_kvargs_parse(input_args,
+ ccp_pmd_valid_params);
+ if (kvlist == NULL)
+ return -1;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_QP,
+ &parse_integer_arg,
+ &params->def_p.max_nb_queue_pairs);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_MAX_NB_SESS,
+ &parse_integer_arg,
+ &params->def_p.max_nb_sessions);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_SOCKET_ID,
+ &parse_integer_arg,
+ &params->def_p.socket_id);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_NAME,
+ &parse_name_arg,
+ &params->def_p);
+ if (ret < 0)
+ goto free_kvlist;
+
+ ret = rte_kvargs_process(kvlist,
+ CCP_CRYPTODEV_PARAM_AUTH_OPT,
+ &parse_auth_opt_arg,
+ params);
+ if (ret < 0)
+ goto free_kvlist;
+
+ }
+
+free_kvlist:
+ rte_kvargs_free(kvlist);
+ return ret;
+}
+
+static struct ccp_session *
+get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+{
+ struct ccp_session *sess = NULL;
+
+ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
+ if (unlikely(op->sym->session == NULL))
+ return NULL;
+
+ sess = (struct ccp_session *)
+ get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
+ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
+ void *_sess;
+ void *_sess_private_data = NULL;
+ struct ccp_private *internals;
+
+ if (rte_mempool_get(qp->sess_mp, &_sess))
+ return NULL;
+ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
+ return NULL;
+
+ sess = (struct ccp_session *)_sess_private_data;
+
+ internals = (struct ccp_private *)qp->dev->data->dev_private;
+ if (unlikely(ccp_set_session_parameters(sess, op->sym->xform,
+ internals) != 0)) {
+ rte_mempool_put(qp->sess_mp, _sess);
+ rte_mempool_put(qp->sess_mp, _sess_private_data);
+ sess = NULL;
+ }
+ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
+ set_session_private_data(op->sym->session,
+ ccp_cryptodev_driver_id,
+ _sess_private_data);
+ }
+
+ return sess;
+}
+
+static uint16_t
+ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_session *sess = NULL;
+ struct ccp_qp *qp = queue_pair;
+ struct ccp_queue *cmd_q;
+ struct rte_cryptodev *dev = qp->dev;
+ uint16_t i, enq_cnt = 0, slots_req = 0;
+
+ if (nb_ops == 0)
+ return 0;
+
+ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
+ return 0;
+
+ for (i = 0; i < nb_ops; i++) {
+ sess = get_ccp_session(qp, ops[i]);
+ if (unlikely(sess == NULL) && (i == 0)) {
+ qp->qp_stats.enqueue_err_count++;
+ return 0;
+ } else if (sess == NULL) {
+ nb_ops = i;
+ break;
+ }
+ slots_req += ccp_compute_slot_count(sess);
+ }
+
+ cmd_q = ccp_allot_queue(dev, slots_req);
+ if (unlikely(cmd_q == NULL))
+ return 0;
+
+ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
+ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+}
+
+static uint16_t
+ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+{
+ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
+ /* Free session if a session-less crypto op */
+ for (i = 0; i < nb_dequeued; i++)
+ if (unlikely(ops[i]->sess_type ==
+ RTE_CRYPTO_OP_SESSIONLESS)) {
+ rte_mempool_put(qp->sess_mp,
+ ops[i]->sym->session);
+ ops[i]->sym->session = NULL;
+ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
+}
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static struct rte_pci_id ccp_pci_id[] = {
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
+ },
+ {
+ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
+ },
+ {.device_id = 0},
+};
+
+/** Remove ccp pmd */
+static int
+cryptodev_ccp_remove(struct rte_vdev_device *dev)
+{
+ const char *name;
+
+ ccp_pmd_init_done = 0;
+ name = rte_vdev_device_name(dev);
+ if (name == NULL)
+ return -EINVAL;
+
+ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
+ name, rte_socket_id());
+
+ return 0;
+}
+
+/** Create crypto device */
+static int
+cryptodev_ccp_create(const char *name,
+ struct rte_vdev_device *vdev,
+ struct ccp_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *dev;
+ struct ccp_private *internals;
+ uint8_t cryptodev_cnt = 0;
+
+ if (init_params->def_p.name[0] == '\0')
+ snprintf(init_params->def_p.name,
+ sizeof(init_params->def_p.name),
+ "%s", name);
+
+ dev = rte_cryptodev_pmd_create(init_params->def_p.name,
+ &vdev->device,
+ &init_params->def_p);
+ if (dev == NULL) {
+ CCP_LOG_ERR("failed to create cryptodev vdev");
+ goto init_error;
+ }
+
+ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
+
+ if (cryptodev_cnt == 0) {
+ CCP_LOG_ERR("failed to detect CCP crypto device");
+ goto init_error;
+ }
+
+ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
+ dev->driver_id = ccp_cryptodev_driver_id;
+
+ /* register rx/tx burst functions for data path */
+ dev->dev_ops = ccp_pmd_ops;
+ dev->enqueue_burst = ccp_pmd_enqueue_burst;
+ dev->dequeue_burst = ccp_pmd_dequeue_burst;
+
+ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_HW_ACCELERATED |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ internals = dev->data->dev_private;
+
+ internals->max_nb_qpairs = init_params->def_p.max_nb_queue_pairs;
+ internals->max_nb_sessions = init_params->def_p.max_nb_sessions;
+ internals->auth_opt = init_params->auth_opt;
+ internals->crypto_num_dev = cryptodev_cnt;
+
+ return 0;
+
+init_error:
+ CCP_LOG_ERR("driver %s: %s() failed",
+ init_params->def_p.name, __func__);
+ cryptodev_ccp_remove(vdev);
+
+ return -EFAULT;
+}
+
+/** Probe ccp pmd */
+static int
+cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+{
+ int rc = 0;
+ const char *name;
+ struct ccp_pmd_init_params init_params = {
+ .def_p = {
+ "",
+ sizeof(struct ccp_private),
+ rte_socket_id(),
+ CCP_PMD_MAX_QUEUE_PAIRS,
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ },
+ .auth_opt = CCP_PMD_AUTH_OPT_CCP,
+ };
+ const char *input_args;
+
+ if (ccp_pmd_init_done) {
+ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
+ return -EFAULT;
+ }
+ name = rte_vdev_device_name(vdev);
+ if (name == NULL)
+ return -EINVAL;
+
+ input_args = rte_vdev_device_args(vdev);
+ ccp_pmd_parse_input_args(&init_params, input_args);
+ init_params.def_p.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
+
+ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
+ init_params.def_p.socket_id);
+ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
+ init_params.def_p.max_nb_queue_pairs);
+ RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
+ init_params.def_p.max_nb_sessions);
+ RTE_LOG(INFO, PMD, "Authentication offload to %s\n",
+ ((init_params.auth_opt == 0) ? "CCP" : "CPU"));
+
+ rc = cryptodev_ccp_create(name, vdev, &init_params);
+ if (rc)
+ return rc;
+ ccp_pmd_init_done = 1;
+ return 0;
+}
+
+static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
+ .probe = cryptodev_ccp_probe,
+ .remove = cryptodev_ccp_remove
+};
+
+static struct cryptodev_driver ccp_crypto_drv;
+
+RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
+RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
+ "max_nb_queue_pairs=<int> "
+ "max_nb_sessions=<int> "
+ "socket_id=<int> "
+ "ccp_auth_opt=<int>");
+RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv.driver,
+ ccp_cryptodev_driver_id);
diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
new file mode 100644
index 00000000..9b9ab1a4
--- /dev/null
+++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};
diff --git a/drivers/crypto/dpaa2_sec/Makefile b/drivers/crypto/dpaa2_sec/Makefile
index cb6c63e6..da3d8f84 100644
--- a/drivers/crypto/dpaa2_sec/Makefile
+++ b/drivers/crypto/dpaa2_sec/Makefile
@@ -18,13 +18,8 @@ LIB = librte_pmd_dpaa2_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -D _GNU_SOURCE
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
index 9a790ddd..56fa969d 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_dpseci.c
@@ -30,6 +30,9 @@
#include "dpaa2_sec_priv.h"
#include "dpaa2_sec_logs.h"
+/* Required types */
+typedef uint64_t dma_addr_t;
+
/* RTA header files */
#include <hw/desc/ipsec.h>
#include <hw/desc/algo.h>
@@ -56,6 +59,8 @@ enum rta_sec_era rta_sec_era = RTA_SEC_ERA_8;
static uint8_t cryptodev_driver_id;
+int dpaa2_logtype_sec;
+
static inline int
build_proto_fd(dpaa2_sec_session *sess,
struct rte_crypto_op *op,
@@ -77,11 +82,11 @@ build_proto_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(sym_op->m_src));
DPAA2_SET_FD_OFFSET(fd, sym_op->m_src->data_off);
DPAA2_SET_FD_LEN(fd, sym_op->m_src->pkt_len);
- DPAA2_SET_FD_FLC(fd, ((uint64_t)flc));
+ DPAA2_SET_FD_FLC(fd, (ptrdiff_t)flc);
/* save physical address of mbuf */
op->sym->aead.digest.phys_addr = mbuf->buf_iova;
- mbuf->buf_iova = (uint64_t)op;
+ mbuf->buf_iova = (size_t)op;
return 0;
}
@@ -113,12 +118,12 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "GCM SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (size_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -132,11 +137,11 @@ build_authenc_gcm_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM SG: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -264,12 +269,12 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "GCM: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("GCM: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -297,11 +302,11 @@ build_authenc_gcm_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
+ DPAA2_SEC_DP_DEBUG("GCM: auth_off: 0x%x/length %d, digest-len=%d\n"
"iv-len=%d data_off: 0x%x\n",
sym_op->aead.data.offset,
sym_op->aead.data.length,
- sym_op->aead.digest.length,
+ sess->digest_length,
sess->iv.length,
sym_op->m_src->data_off);
@@ -409,12 +414,12 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTHENC SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTHENC SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -428,16 +433,16 @@ build_authenc_sg_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sym_op->auth.digest.length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC SG: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_SG_EXT(op_fle);
@@ -558,12 +563,12 @@ build_authenc_fd(dpaa2_sec_session *sess,
*/
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
if (likely(bpid < MAX_BPID)) {
@@ -591,15 +596,16 @@ build_authenc_fd(dpaa2_sec_session *sess,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG, "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
- "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
- sym_op->auth.data.offset,
- sym_op->auth.data.length,
- sess->digest_length,
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "AUTHENC: auth_off: 0x%x/length %d, digest-len=%d\n"
+ "cipher_off: 0x%x/length %d, iv-len=%d data_off: 0x%x\n",
+ sym_op->auth.data.offset,
+ sym_op->auth.data.length,
+ sess->digest_length,
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* Configure Output FLE with Scatter/Gather Entry */
DPAA2_SET_FLE_ADDR(fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -686,13 +692,13 @@ static inline int build_auth_sg_fd(
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (unlikely(!fle)) {
- RTE_LOG(ERR, PMD, "AUTH SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
sge = fle + 3;
@@ -762,7 +768,7 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "AUTH Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("AUTH Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -772,8 +778,8 @@ build_auth_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
if (likely(bpid < MAX_BPID)) {
@@ -859,13 +865,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle = (struct qbman_fle *)rte_malloc(NULL, FLE_SG_MEM_SIZE,
RTE_CACHE_LINE_SIZE);
if (!fle) {
- RTE_LOG(ERR, PMD, "CIPHER SG: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER SG: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_SG_MEM_SIZE);
/* first FLE entry used to store mbuf and session ctxt */
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
op_fle = fle + 1;
ip_fle = fle + 2;
@@ -873,12 +879,13 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
flc = &priv->flc_desc[0].flc;
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: cipher_off: 0x%x/length %d,ivlen=%d data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sym_op->cipher.iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: cipher_off: 0x%x/length %d, ivlen=%d"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
/* o/p fle */
DPAA2_SET_FLE_ADDR(op_fle, DPAA2_VADDR_TO_IOVA(sge));
@@ -901,10 +908,10 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
}
DPAA2_SET_FLE_FIN(sge);
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: 1 - flc = %p, fle = %p FLEaddr = %x-%x, len %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
/* i/p fle */
mbuf = sym_op->m_src;
@@ -944,13 +951,14 @@ build_cipher_sg_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER SG: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER SG: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -976,7 +984,7 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
retval = rte_mempool_get(priv->fle_pool, (void **)(&fle));
if (retval) {
- RTE_LOG(ERR, PMD, "CIPHER: Memory alloc failed for SGE\n");
+ DPAA2_SEC_ERR("CIPHER: Memory alloc failed for SGE");
return -1;
}
memset(fle, 0, FLE_POOL_BUF_SIZE);
@@ -986,8 +994,8 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
* to get the MBUF Addr from the previous FLE.
* We can have a better approach to use the inline Mbuf
*/
- DPAA2_SET_FLE_ADDR(fle, DPAA2_OP_VADDR_TO_IOVA(op));
- DPAA2_FLE_SAVE_CTXT(fle, priv);
+ DPAA2_SET_FLE_ADDR(fle, (size_t)op);
+ DPAA2_FLE_SAVE_CTXT(fle, (ptrdiff_t)priv);
fle = fle + 1;
sge = fle + 2;
@@ -1012,12 +1020,13 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FD_COMPOUND_FMT(fd);
DPAA2_SET_FD_FLC(fd, DPAA2_VADDR_TO_IOVA(flc));
- PMD_TX_LOG(DEBUG,
- "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d, data_off: 0x%x",
- sym_op->cipher.data.offset,
- sym_op->cipher.data.length,
- sess->iv.length,
- sym_op->m_src->data_off);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: cipher_off: 0x%x/length %d, ivlen=%d,"
+ " data_off: 0x%x\n",
+ sym_op->cipher.data.offset,
+ sym_op->cipher.data.length,
+ sess->iv.length,
+ sym_op->m_src->data_off);
DPAA2_SET_FLE_ADDR(fle, DPAA2_MBUF_VADDR_TO_IOVA(dst));
DPAA2_SET_FLE_OFFSET(fle, sym_op->cipher.data.offset +
@@ -1025,10 +1034,10 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
fle->length = sym_op->cipher.data.length + sess->iv.length;
- PMD_TX_LOG(DEBUG,
- "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d",
- flc, fle, fle->addr_hi, fle->addr_lo,
- fle->length);
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: 1 - flc = %p, fle = %p FLEaddr = %x-%x, length %d\n",
+ flc, fle, fle->addr_hi, fle->addr_lo,
+ fle->length);
fle++;
@@ -1049,13 +1058,14 @@ build_cipher_fd(dpaa2_sec_session *sess, struct rte_crypto_op *op,
DPAA2_SET_FLE_FIN(sge);
DPAA2_SET_FLE_FIN(fle);
- PMD_TX_LOG(DEBUG,
- "CIPHER: fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[bpid].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG(
+ "CIPHER: fdaddr =%" PRIx64 " bpid =%d meta =%d"
+ " off =%d, len =%d\n",
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[bpid].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
return 0;
}
@@ -1095,7 +1105,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
} else {
switch (sess->ctxt_type) {
@@ -1116,7 +1126,7 @@ build_sec_fd(struct rte_crypto_op *op,
break;
case DPAA2_SEC_HASH_CIPHER:
default:
- RTE_LOG(ERR, PMD, "error: Unsupported session\n");
+ DPAA2_SEC_ERR("error: Unsupported session");
}
}
return ret;
@@ -1143,7 +1153,7 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
return 0;
if (ops[0]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- RTE_LOG(ERR, PMD, "sessionless crypto op not supported\n");
+ DPAA2_SEC_ERR("sessionless crypto op not supported");
return 0;
}
/*Prepare enqueue descriptor*/
@@ -1152,14 +1162,14 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
qbman_eq_desc_set_response(&eqdesc, 0, 0);
qbman_eq_desc_set_fq(&eqdesc, dpaa2_qp->tx_vq.fqid);
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
while (nb_ops) {
frames_to_send = (nb_ops >> 3) ? MAX_TX_RING_SLOTS : nb_ops;
@@ -1171,8 +1181,8 @@ dpaa2_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
bpid = mempool_to_bpid(mb_pool);
ret = build_sec_fd(*ops, &fd_arr[loop], bpid);
if (ret) {
- PMD_DRV_LOG(ERR, "error: Improper packet"
- " contents for crypto operation\n");
+ DPAA2_SEC_ERR("error: Improper packet contents"
+ " for crypto operation");
goto skip_tx;
}
ops++;
@@ -1206,7 +1216,7 @@ sec_simple_fd_to_mbuf(const struct qbman_fd *fd, __rte_unused uint8_t id)
DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
- op = (struct rte_crypto_op *)mbuf->buf_iova;
+ op = (struct rte_crypto_op *)(size_t)mbuf->buf_iova;
mbuf->buf_iova = op->sym->aead.digest.phys_addr;
op->sym->aead.digest.phys_addr = 0L;
@@ -1236,8 +1246,8 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
fle = (struct qbman_fle *)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
- PMD_RX_LOG(DEBUG, "FLE addr = %x - %x, offset = %x",
- fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
+ DPAA2_SEC_DP_DEBUG("FLE addr = %x - %x, offset = %x\n",
+ fle->addr_hi, fle->addr_lo, fle->fin_bpid_offset);
/* we are using the first FLE entry to store Mbuf.
* Currently we donot know which FLE has the mbuf stored.
@@ -1248,11 +1258,10 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
if (unlikely(DPAA2_GET_FD_IVP(fd))) {
/* TODO complete it. */
- RTE_LOG(ERR, PMD, "error: Non inline buffer - WHAT to DO?\n");
+ DPAA2_SEC_ERR("error: non inline buffer");
return NULL;
}
- op = (struct rte_crypto_op *)DPAA2_IOVA_TO_VADDR(
- DPAA2_GET_FLE_ADDR((fle - 1)));
+ op = (struct rte_crypto_op *)DPAA2_GET_FLE_ADDR((fle - 1));
/* Prefeth op */
src = op->sym->m_src;
@@ -1264,19 +1273,19 @@ sec_fd_to_mbuf(const struct qbman_fd *fd, uint8_t driver_id)
} else
dst = src;
- PMD_RX_LOG(DEBUG, "mbuf %p BMAN buf addr %p",
- (void *)dst, dst->buf_addr);
-
- PMD_RX_LOG(DEBUG, "fdaddr =%p bpid =%d meta =%d off =%d, len =%d",
- (void *)DPAA2_GET_FD_ADDR(fd),
- DPAA2_GET_FD_BPID(fd),
- rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
- DPAA2_GET_FD_OFFSET(fd),
- DPAA2_GET_FD_LEN(fd));
+ DPAA2_SEC_DP_DEBUG("mbuf %p BMAN buf addr %p,"
+ " fdaddr =%" PRIx64 " bpid =%d meta =%d off =%d, len =%d\n",
+ (void *)dst,
+ dst->buf_addr,
+ DPAA2_GET_FD_ADDR(fd),
+ DPAA2_GET_FD_BPID(fd),
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
+ DPAA2_GET_FD_OFFSET(fd),
+ DPAA2_GET_FD_LEN(fd));
/* free the fle memory */
if (likely(rte_pktmbuf_is_contiguous(src))) {
- priv = (struct ctxt_priv *)DPAA2_GET_FLE_CTXT(fle - 1);
+ priv = (struct ctxt_priv *)(size_t)DPAA2_GET_FLE_CTXT(fle - 1);
rte_mempool_put(priv->fle_pool, (void *)(fle-1));
} else
rte_free((void *)(fle-1));
@@ -1300,14 +1309,14 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
- if (!DPAA2_PER_LCORE_SEC_DPIO) {
- ret = dpaa2_affine_qbman_swp_sec();
+ if (!DPAA2_PER_LCORE_DPIO) {
+ ret = dpaa2_affine_qbman_swp();
if (ret) {
- RTE_LOG(ERR, PMD, "Failure in affining portal\n");
+ DPAA2_SEC_ERR("Failure in affining portal");
return 0;
}
}
- swp = DPAA2_PER_LCORE_SEC_PORTAL;
+ swp = DPAA2_PER_LCORE_PORTAL;
dq_storage = dpaa2_qp->rx_vq.q_storage->dq_storage[0];
qbman_pull_desc_clear(&pulldesc);
@@ -1322,8 +1331,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
/*Issue a volatile dequeue command. */
while (1) {
if (qbman_swp_pull(swp, &pulldesc)) {
- RTE_LOG(WARNING, PMD,
- "SEC VDQ command is not issued : QBMAN busy\n");
+ DPAA2_SEC_WARN(
+ "SEC VDQ command is not issued : QBMAN busy");
/* Portal was busy, try again */
continue;
}
@@ -1355,7 +1364,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
status = (uint8_t)qbman_result_DQ_flags(dq_storage);
if (unlikely(
(status & QBMAN_DQ_STAT_VALIDFRAME) == 0)) {
- PMD_RX_LOG(DEBUG, "No frame is delivered");
+ DPAA2_SEC_DP_DEBUG("No frame is delivered\n");
continue;
}
}
@@ -1365,8 +1374,8 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
if (unlikely(fd->simple.frc)) {
/* TODO Parse SEC errors */
- RTE_LOG(ERR, PMD, "SEC returned Error - %x\n",
- fd->simple.frc);
+ DPAA2_SEC_ERR("SEC returned Error - %x",
+ fd->simple.frc);
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_ERROR;
} else {
ops[num_rx]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
@@ -1378,7 +1387,7 @@ dpaa2_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa2_qp->rx_vq.rx_pkts += num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets", num_rx);
+ DPAA2_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
/*Return the total number of packets received to DPAA2 app*/
return num_rx;
}
@@ -1420,11 +1429,11 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
/* If qp is already in use free ring memory and qp metadata. */
if (dev->data->queue_pairs[qp_id] != NULL) {
- PMD_DRV_LOG(INFO, "QP already setup");
+ DPAA2_SEC_INFO("QP already setup");
return 0;
}
- PMD_DRV_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
+ DPAA2_SEC_DEBUG("dev =%p, queue =%d, conf =%p",
dev, qp_id, qp_conf);
memset(&cfg, 0, sizeof(struct dpseci_rx_queue_cfg));
@@ -1432,7 +1441,7 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
qp = rte_malloc(NULL, sizeof(struct dpaa2_sec_qp),
RTE_CACHE_LINE_SIZE);
if (!qp) {
- RTE_LOG(ERR, PMD, "malloc failed for rx/tx queues\n");
+ DPAA2_SEC_ERR("malloc failed for rx/tx queues");
return -1;
}
@@ -1442,20 +1451,20 @@ dpaa2_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
sizeof(struct queue_storage_info_t),
RTE_CACHE_LINE_SIZE);
if (!qp->rx_vq.q_storage) {
- RTE_LOG(ERR, PMD, "malloc failed for q_storage\n");
+ DPAA2_SEC_ERR("malloc failed for q_storage");
return -1;
}
memset(qp->rx_vq.q_storage, 0, sizeof(struct queue_storage_info_t));
if (dpaa2_alloc_dq_storage(qp->rx_vq.q_storage)) {
- RTE_LOG(ERR, PMD, "dpaa2_alloc_dq_storage failed\n");
+ DPAA2_SEC_ERR("Unable to allocate dequeue storage");
return -1;
}
dev->data->queue_pairs[qp_id] = qp;
cfg.options = cfg.options | DPSECI_QUEUE_OPT_USER_CTX;
- cfg.user_ctx = (uint64_t)(&qp->rx_vq);
+ cfg.user_ctx = (size_t)(&qp->rx_vq);
retcode = dpseci_set_rx_queue(dpseci, CMD_PRI_LOW, priv->token,
qp_id, &cfg);
return retcode;
@@ -1517,7 +1526,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1528,7 +1537,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1536,7 +1545,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
memcpy(session->cipher_key.data, xform->cipher.key.data,
xform->cipher.key.length);
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -1571,11 +1580,11 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
case RTE_CRYPTO_CIPHER_ZUC_EEA3:
case RTE_CRYPTO_CIPHER_NULL:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
xform->cipher.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
xform->cipher.algo);
goto error_out;
}
@@ -1586,7 +1595,7 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
&cipherdata, NULL, session->iv.length,
session->dir);
if (bufsize < 0) {
- RTE_LOG(ERR, PMD, "Crypto: Descriptor build failed\n");
+ DPAA2_SEC_ERR("Crypto: Descriptor build failed");
goto error_out;
}
flc->dhr = 0;
@@ -1595,16 +1604,15 @@ dpaa2_sec_cipher_init(struct rte_cryptodev *dev,
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[0].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x", i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1621,7 +1629,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
{
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
@@ -1633,7 +1641,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1643,7 +1651,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("Unable to allocate memory for auth key");
rte_free(priv);
return -1;
}
@@ -1651,7 +1659,7 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, xform->auth.key.data,
xform->auth.key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1703,12 +1711,12 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %un",
+ xform->auth.algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- xform->auth.algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ xform->auth.algo);
goto error_out;
}
session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
@@ -1717,18 +1725,22 @@ dpaa2_sec_auth_init(struct rte_cryptodev *dev,
bufsize = cnstr_shdsc_hmac(priv->flc_desc[DESC_INITFINAL].desc,
1, 0, &authdata, !session->dir,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
- i, priv->flc_desc[DESC_INITFINAL].desc[i]);
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
+ i, priv->flc_desc[DESC_INITFINAL].desc[i]);
return 0;
@@ -1747,7 +1759,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo aeaddata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_aead_xform *aead_xform = &xform->aead;
@@ -1765,7 +1777,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1775,7 +1787,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for aead key\n");
+ DPAA2_SEC_ERR("No Memory for aead key");
rte_free(priv);
return -1;
}
@@ -1786,7 +1798,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_key.length = aead_xform->key.length;
ctxt->auth_only_len = aead_xform->aad_length;
- aeaddata.key = (uint64_t)session->aead_key.data;
+ aeaddata.key = (size_t)session->aead_key.data;
aeaddata.keylen = session->aead_key.length;
aeaddata.key_enc_flags = 0;
aeaddata.key_type = RTA_DATA_IMM;
@@ -1798,12 +1810,12 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
break;
case RTE_CRYPTO_AEAD_AES_CCM:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported AEAD alg %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported AEAD alg %u",
+ aead_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined AEAD specified %u\n",
- aead_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined AEAD specified %u",
+ aead_xform->algo);
goto error_out;
}
session->dir = (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
@@ -1816,7 +1828,7 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[1], 1);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[1] & 1) {
@@ -1838,16 +1850,21 @@ dpaa2_sec_aead_init(struct rte_cryptodev *dev,
priv->flc_desc[0].desc, 1, 0,
&aeaddata, session->iv.length,
session->digest_length);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x\n",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -1867,7 +1884,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
struct dpaa2_sec_aead_ctxt *ctxt = &session->ext_params.aead_ctxt;
struct dpaa2_sec_dev_private *dev_priv = dev->data->dev_private;
struct alginfo authdata, cipherdata;
- unsigned int bufsize, i;
+ int bufsize, i;
struct ctxt_priv *priv;
struct sec_flow_context *flc;
struct rte_crypto_cipher_xform *cipher_xform;
@@ -1899,7 +1916,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
sizeof(struct ctxt_priv) + sizeof(struct sec_flc_desc),
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "No Memory for priv CTXT\n");
+ DPAA2_SEC_ERR("No Memory for priv CTXT");
return -1;
}
@@ -1909,7 +1926,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -1;
}
@@ -1917,7 +1934,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -1;
@@ -1928,7 +1945,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -1980,15 +1997,15 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_AES_CMAC:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto error_out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2014,12 +2031,12 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto error_out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto error_out;
}
session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
@@ -2033,7 +2050,7 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
&priv->flc_desc[0].desc[2], 2);
if (err < 0) {
- PMD_DRV_LOG(ERR, "Crypto: Incorrect key lengths\n");
+ DPAA2_SEC_ERR("Crypto: Incorrect key lengths");
goto error_out;
}
if (priv->flc_desc[0].desc[2] & 1) {
@@ -2059,21 +2076,25 @@ dpaa2_sec_aead_chain_init(struct rte_cryptodev *dev,
ctxt->auth_only_len,
session->digest_length,
session->dir);
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto error_out;
+ }
} else {
- RTE_LOG(ERR, PMD, "Hash before cipher not supported\n");
+ DPAA2_SEC_ERR("Hash before cipher not supported");
goto error_out;
}
flc->word1_sdl = (uint8_t)bufsize;
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
session->ctxt = priv;
for (i = 0; i < bufsize; i++)
- PMD_DRV_LOG(DEBUG, "DESC[%d]:0x%x\n",
+ DPAA2_SEC_DEBUG("DESC[%d]:0x%x",
i, priv->flc_desc[0].desc[i]);
return 0;
@@ -2094,7 +2115,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA2_SEC_ERR("Invalid session struct");
return -1;
}
@@ -2130,7 +2151,7 @@ dpaa2_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa2_sec_aead_init(dev, xform, session);
} else {
- RTE_LOG(ERR, PMD, "Invalid crypto type\n");
+ DPAA2_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
@@ -2150,7 +2171,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
struct ipsec_encap_pdb encap_pdb;
struct ipsec_decap_pdb decap_pdb;
struct alginfo authdata, cipherdata;
- unsigned int bufsize;
+ int bufsize;
struct sec_flow_context *flc;
PMD_INIT_FUNC_TRACE();
@@ -2168,7 +2189,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (priv == NULL) {
- RTE_LOG(ERR, PMD, "\nNo memory for priv CTXT");
+ DPAA2_SEC_ERR("No memory for priv CTXT");
return -ENOMEM;
}
@@ -2180,7 +2201,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA2_SEC_ERR("No Memory for cipher key");
rte_free(priv);
return -ENOMEM;
}
@@ -2191,7 +2212,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA2_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
rte_free(priv);
return -ENOMEM;
@@ -2202,7 +2223,7 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
memcpy(session->auth_key.data, auth_xform->key.data,
auth_xform->key.length);
- authdata.key = (uint64_t)session->auth_key.data;
+ authdata.key = (size_t)session->auth_key.data;
authdata.keylen = session->auth_key.length;
authdata.key_enc_flags = 0;
authdata.key_type = RTA_DATA_IMM;
@@ -2253,15 +2274,15 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported auth alg %u",
+ auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
- auth_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Auth specified %u",
+ auth_xform->algo);
goto out;
}
- cipherdata.key = (uint64_t)session->cipher_key.data;
+ cipherdata.key = (size_t)session->cipher_key.data;
cipherdata.keylen = session->cipher_key.length;
cipherdata.key_enc_flags = 0;
cipherdata.key_type = RTA_DATA_IMM;
@@ -2289,12 +2310,12 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Unsupported Cipher alg %u",
+ cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
- cipher_xform->algo);
+ DPAA2_SEC_ERR("Crypto: Undefined Cipher specified %u",
+ cipher_xform->algo);
goto out;
}
@@ -2340,15 +2361,21 @@ dpaa2_sec_set_ipsec_session(struct rte_cryptodev *dev,
1, 0, &decap_pdb, &cipherdata, &authdata);
} else
goto out;
+
+ if (bufsize < 0) {
+ DPAA2_SEC_ERR("Crypto: Invalid buffer length");
+ goto out;
+ }
+
flc->word1_sdl = (uint8_t)bufsize;
/* Enable the stashing control bit */
DPAA2_SET_FLC_RSC(flc);
flc->word2_rflc_31_0 = lower_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq) | 0x14);
flc->word3_rflc_63_32 = upper_32_bits(
- (uint64_t)&(((struct dpaa2_sec_qp *)
+ (size_t)&(((struct dpaa2_sec_qp *)
dev->data->queue_pairs[0])->rx_vq));
/* Set EWS bit i.e. enable write-safe */
@@ -2379,8 +2406,7 @@ dpaa2_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2395,9 +2421,7 @@ dpaa2_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2441,16 +2465,13 @@ dpaa2_sec_session_configure(struct rte_cryptodev *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA2_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa2_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA2 PMD: failed to configure "
- "session parameters");
-
+ DPAA2_SEC_ERR("Failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2511,14 +2532,13 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
ret = dpseci_enable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "DPSECI with HW_ID = %d ENABLE FAILED\n",
- priv->hw_id);
+ DPAA2_SEC_ERR("DPSECI with HW_ID = %d ENABLE FAILED",
+ priv->hw_id);
goto get_attr_failure;
}
ret = dpseci_get_attributes(dpseci, CMD_PRI_LOW, priv->token, &attr);
if (ret) {
- PMD_INIT_LOG(ERR,
- "DPSEC ATTRIBUTE READ FAILED, disabling DPSEC\n");
+ DPAA2_SEC_ERR("DPSEC ATTRIBUTE READ FAILED, disabling DPSEC");
goto get_attr_failure;
}
for (i = 0; i < attr.num_rx_queues && qp[i]; i++) {
@@ -2526,14 +2546,14 @@ dpaa2_sec_dev_start(struct rte_cryptodev *dev)
dpseci_get_rx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&rx_attr);
dpaa2_q->fqid = rx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "rx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("rx_fqid: %d", dpaa2_q->fqid);
}
for (i = 0; i < attr.num_tx_queues && qp[i]; i++) {
dpaa2_q = &qp[i]->tx_vq;
dpseci_get_tx_queue(dpseci, CMD_PRI_LOW, priv->token, i,
&tx_attr);
dpaa2_q->fqid = tx_attr.fqid;
- PMD_INIT_LOG(DEBUG, "tx_fqid: %d", dpaa2_q->fqid);
+ DPAA2_SEC_DEBUG("tx_fqid: %d", dpaa2_q->fqid);
}
return 0;
@@ -2553,15 +2573,14 @@ dpaa2_sec_dev_stop(struct rte_cryptodev *dev)
ret = dpseci_disable(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure in disabling dpseci %d device",
+ DPAA2_SEC_ERR("Failure in disabling dpseci %d device",
priv->hw_id);
return;
}
ret = dpseci_reset(dpseci, CMD_PRI_LOW, priv->token);
if (ret < 0) {
- PMD_INIT_LOG(ERR, "SEC Device cannot be reset:Error = %0x\n",
- ret);
+ DPAA2_SEC_ERR("SEC Device cannot be reset:Error = %0x", ret);
return;
}
}
@@ -2585,8 +2604,7 @@ dpaa2_sec_dev_close(struct rte_cryptodev *dev)
/*Close the device at underlying layer*/
ret = dpseci_close(dpseci, CMD_PRI_LOW, priv->token);
if (ret) {
- PMD_INIT_LOG(ERR, "Failure closing dpseci device with"
- " error code %d\n", ret);
+ DPAA2_SEC_ERR("Failure closing dpseci device: err(%d)", ret);
return -1;
}
@@ -2626,12 +2644,12 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (stats == NULL) {
- PMD_DRV_LOG(ERR, "invalid stats ptr NULL");
+ DPAA2_SEC_ERR("Invalid stats ptr NULL");
return;
}
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
@@ -2644,16 +2662,16 @@ void dpaa2_sec_stats_get(struct rte_cryptodev *dev,
ret = dpseci_get_sec_counters(dpseci, CMD_PRI_LOW, priv->token,
&counters);
if (ret) {
- PMD_DRV_LOG(ERR, "dpseci_get_sec_counters failed\n");
+ DPAA2_SEC_ERR("SEC counters failed");
} else {
- PMD_DRV_LOG(INFO, "dpseci hw stats:"
- "\n\tNumber of Requests Dequeued = %lu"
- "\n\tNumber of Outbound Encrypt Requests = %lu"
- "\n\tNumber of Inbound Decrypt Requests = %lu"
- "\n\tNumber of Outbound Bytes Encrypted = %lu"
- "\n\tNumber of Outbound Bytes Protected = %lu"
- "\n\tNumber of Inbound Bytes Decrypted = %lu"
- "\n\tNumber of Inbound Bytes Validated = %lu",
+ DPAA2_SEC_INFO("dpseci hardware stats:"
+ "\n\tNum of Requests Dequeued = %" PRIu64
+ "\n\tNum of Outbound Encrypt Requests = %" PRIu64
+ "\n\tNum of Inbound Decrypt Requests = %" PRIu64
+ "\n\tNum of Outbound Bytes Encrypted = %" PRIu64
+ "\n\tNum of Outbound Bytes Protected = %" PRIu64
+ "\n\tNum of Inbound Bytes Decrypted = %" PRIu64
+ "\n\tNum of Inbound Bytes Validated = %" PRIu64,
counters.dequeued_requests,
counters.ob_enc_requests,
counters.ib_dec_requests,
@@ -2675,7 +2693,7 @@ void dpaa2_sec_stats_reset(struct rte_cryptodev *dev)
for (i = 0; i < dev->data->nb_queue_pairs; i++) {
if (qp[i] == NULL) {
- PMD_DRV_LOG(DEBUG, "Uninitialised queue pair");
+ DPAA2_SEC_DEBUG("Uninitialised queue pair");
continue;
}
qp[i]->tx_vq.rx_pkts = 0;
@@ -2729,8 +2747,8 @@ dpaa2_sec_uninit(const struct rte_cryptodev *dev)
rte_mempool_free(internals->fle_pool);
- PMD_INIT_LOG(INFO, "Closing DPAA2_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA2_SEC_INFO("Closing DPAA2_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2751,7 +2769,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
PMD_INIT_FUNC_TRACE();
dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device);
if (dpaa2_dev == NULL) {
- PMD_INIT_LOG(ERR, "dpaa2_device not found\n");
+ DPAA2_SEC_ERR("DPAA2 SEC device not found");
return -1;
}
hw_id = dpaa2_dev->object_id;
@@ -2776,7 +2794,7 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA2_SEC_DEBUG("Device already init by primary process");
return 0;
}
@@ -2794,21 +2812,21 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
dpseci = (struct fsl_mc_io *)rte_calloc(NULL, 1,
sizeof(struct fsl_mc_io), 0);
if (!dpseci) {
- PMD_INIT_LOG(ERR,
- "Error in allocating the memory for dpsec object");
+ DPAA2_SEC_ERR(
+ "Error in allocating the memory for dpsec object");
return -1;
}
dpseci->regs = rte_mcp_ptr_list[0];
retcode = dpseci_open(dpseci, CMD_PRI_LOW, hw_id, &token);
if (retcode != 0) {
- PMD_INIT_LOG(ERR, "Cannot open the dpsec device: Error = %x",
- retcode);
+ DPAA2_SEC_ERR("Cannot open the dpsec device: Error = %x",
+ retcode);
goto init_error;
}
retcode = dpseci_get_attributes(dpseci, CMD_PRI_LOW, token, &attr);
if (retcode != 0) {
- PMD_INIT_LOG(ERR,
+ DPAA2_SEC_ERR(
"Cannot get dpsec device attributed: Error = %x",
retcode);
goto init_error;
@@ -2828,15 +2846,15 @@ dpaa2_sec_dev_init(struct rte_cryptodev *cryptodev)
NULL, NULL, NULL, NULL,
SOCKET_ID_ANY, 0);
if (!internals->fle_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
+ DPAA2_SEC_ERR("Mempool (%s) creation failed", str);
goto init_error;
}
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ DPAA2_SEC_INFO("driver %s: created", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA2_SEC_ERR("driver %s: create failed", cryptodev->data->name);
/* dpaa2_sec_uninit(crypto_dev_name); */
return -EFAULT;
@@ -2866,7 +2884,7 @@ cryptodev_dpaa2_sec_probe(struct rte_dpaa2_driver *dpaa2_drv,
if (cryptodev->data->dev_private == NULL)
rte_panic("Cannot allocate memzone for private "
- "device data");
+ "device data");
}
dpaa2_dev->cryptodev = cryptodev;
@@ -2919,5 +2937,15 @@ static struct rte_dpaa2_driver rte_dpaa2_sec_driver = {
static struct cryptodev_driver dpaa2_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA2(CRYPTODEV_NAME_DPAA2_SEC_PMD, rte_dpaa2_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv, rte_dpaa2_sec_driver,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa2_sec_crypto_drv,
+ rte_dpaa2_sec_driver.driver, cryptodev_driver_id);
+
+RTE_INIT(dpaa2_sec_init_log);
+static void
+dpaa2_sec_init_log(void)
+{
+ /* Bus level logs */
+ dpaa2_logtype_sec = rte_log_register("pmd.crypto.dpaa2");
+ if (dpaa2_logtype_sec >= 0)
+ rte_log_set_level(dpaa2_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
index 23251141..7c1f5e73 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_logs.h
@@ -8,37 +8,35 @@
#ifndef _DPAA2_SEC_LOGS_H_
#define _DPAA2_SEC_LOGS_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA2_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa2_logtype_sec;
+
+#define DPAA2_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa2_logtype_sec, "dpaa2_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA2_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa2_logtype_sec, "dpaa2_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA2_SEC_LOG(DEBUG, " >>")
+
+#define DPAA2_SEC_INFO(fmt, args...) \
+ DPAA2_SEC_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_ERR(fmt, args...) \
+ DPAA2_SEC_LOG(ERR, fmt, ## args)
+#define DPAA2_SEC_WARN(fmt, args...) \
+ DPAA2_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA2_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA2_SEC_DP_DEBUG(fmt, args...) \
+ DPAA2_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA2_SEC_DP_INFO(fmt, args...) \
+ DPAA2_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA2_SEC_DP_WARN(fmt, args...) \
+ DPAA2_SEC_DP_LOG(WARNING, fmt, ## args)
+
#endif /* _DPAA2_SEC_LOGS_H_ */
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
index e8ac95ba..a9d83ebc 100644
--- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
+++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h
@@ -185,9 +185,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -206,9 +206,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -227,9 +227,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -248,9 +248,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
- .max = 32,
- .increment = 0
+ .min = 1,
+ .max = 32,
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -269,9 +269,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
@@ -290,9 +290,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
.iv_size = { 0 }
}, }
diff --git a/drivers/crypto/dpaa2_sec/meson.build b/drivers/crypto/dpaa2_sec/meson.build
new file mode 100644
index 00000000..01afc587
--- /dev/null
+++ b/drivers/crypto/dpaa2_sec/meson.build
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['security', 'mempool_dpaa2']
+sources = files('dpaa2_sec_dpseci.c',
+ 'mc/dpseci.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('mc', 'hw')
diff --git a/drivers/crypto/dpaa_sec/Makefile b/drivers/crypto/dpaa_sec/Makefile
index fe2c5932..9be44704 100644
--- a/drivers/crypto/dpaa_sec/Makefile
+++ b/drivers/crypto/dpaa_sec/Makefile
@@ -12,13 +12,8 @@ LIB = librte_pmd_dpaa_sec.a
# build flags
CFLAGS += -DALLOW_EXPERIMENTAL_API
CFLAGS += -D _GNU_SOURCE
-ifeq ($(CONFIG_RTE_LIBRTE_DPAA_SEC_DEBUG_INIT),y)
-CFLAGS += -O0 -g
-CFLAGS += "-Wno-error"
-else
CFLAGS += -O3
CFLAGS += $(WERROR_FLAGS)
-endif
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa
CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.c b/drivers/crypto/dpaa_sec/dpaa_sec.c
index 18681cf3..06f7e437 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.c
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.c
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2018 NXP
*
*/
@@ -39,6 +39,8 @@
enum rta_sec_era rta_sec_era;
+int dpaa_logtype_sec;
+
static uint8_t cryptodev_driver_id;
static __thread struct rte_crypto_op **dpaa_sec_ops;
@@ -53,7 +55,7 @@ dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
if (!ctx->fd_status) {
ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
@@ -69,7 +71,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
if (!ctx || retval) {
- PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
+ DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
return NULL;
}
/*
@@ -84,7 +86,7 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
ctx->ctx_pool = ses->ctx_pool;
- ctx->vtop_offset = (uint64_t) ctx
+ ctx->vtop_offset = (size_t) ctx
- rte_mempool_virt2iova(ctx);
return ctx;
@@ -93,43 +95,18 @@ dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
static inline rte_iova_t
dpaa_mem_vtop(void *vaddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- uint64_t vaddr_64, paddr;
- int i;
-
- vaddr_64 = (uint64_t)vaddr;
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (vaddr_64 >= memseg[i].addr_64 &&
- vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
- paddr = memseg[i].iova +
- (vaddr_64 - memseg[i].addr_64);
-
- return (rte_iova_t)paddr;
- }
- }
- return (rte_iova_t)(NULL);
-}
+ const struct rte_memseg *ms;
-/* virtual address conversin when mempool support is available for ctx */
-static inline phys_addr_t
-dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
-{
- return (uint64_t)vaddr - ctx->vtop_offset;
+ ms = rte_mem_virt2memseg(vaddr, NULL);
+ if (ms)
+ return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
+ return (size_t)NULL;
}
static inline void *
dpaa_mem_ptov(rte_iova_t paddr)
{
- const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
- int i;
-
- for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
- if (paddr >= memseg[i].iova &&
- (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
- return (void *)(memseg[i].addr_64 +
- (paddr - memseg[i].iova));
- }
- return NULL;
+ return rte_mem_iova2virt(paddr);
}
static void
@@ -137,8 +114,8 @@ ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
struct qman_fq *fq,
const struct qm_mr_entry *msg)
{
- RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
- fq->fqid, msg->ern.rc, msg->ern.seqnum);
+ DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
+ fq->fqid, msg->ern.rc, msg->ern.seqnum);
}
/* initialize the queue with dest chan as caam chan so that
@@ -166,11 +143,11 @@ dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
fq_in->cb.ern = ern_sec_fq_handler;
- PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
+ DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
ret = qman_init_fq(fq_in, flags, &fq_opts);
if (unlikely(ret != 0))
- PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
+ DPAA_SEC_ERR("qman_init_fq failed %d", ret);
return ret;
}
@@ -229,7 +206,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_create_fq(0, flags, fq);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "qman_create_fq failed");
+ DPAA_SEC_ERR("qman_create_fq failed");
return ret;
}
@@ -244,7 +221,7 @@ dpaa_sec_init_tx(struct qman_fq *fq)
ret = qman_init_fq(fq, 0, &opts);
if (unlikely(ret)) {
- PMD_INIT_LOG(ERR, "unable to init caam source fq!");
+ DPAA_SEC_ERR("unable to init caam source fq!");
return ret;
}
@@ -336,7 +313,7 @@ caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
alginfo_a->algmode = OP_ALG_AAI_HMAC;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
+ DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
}
}
@@ -365,7 +342,7 @@ caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
alginfo_c->algmode = OP_ALG_AAI_CTR;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
+ DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
}
}
@@ -378,7 +355,7 @@ caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
alginfo->algmode = OP_ALG_AAI_GCM;
break;
default:
- PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
+ DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
}
}
@@ -388,7 +365,7 @@ static int
dpaa_sec_prep_cdb(dpaa_sec_session *ses)
{
struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
- uint32_t shared_desc_len = 0;
+ int32_t shared_desc_len = 0;
struct sec_cdb *cdb = &ses->cdb;
int err;
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
@@ -402,11 +379,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
if (is_cipher_only(ses)) {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
@@ -420,11 +397,11 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_auth_only(ses)) {
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -436,10 +413,10 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else if (is_aead(ses)) {
caam_aead_alg(ses, &alginfo);
if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported aead alg\n");
+ DPAA_SEC_ERR("not supported aead alg");
return -ENOTSUP;
}
- alginfo.key = (uint64_t)ses->aead_key.data;
+ alginfo.key = (size_t)ses->aead_key.data;
alginfo.keylen = ses->aead_key.length;
alginfo.key_enc_flags = 0;
alginfo.key_type = RTA_DATA_IMM;
@@ -459,22 +436,22 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
} else {
caam_cipher_alg(ses, &alginfo_c);
if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported cipher alg\n");
+ DPAA_SEC_ERR("not supported cipher alg");
return -ENOTSUP;
}
- alginfo_c.key = (uint64_t)ses->cipher_key.data;
+ alginfo_c.key = (size_t)ses->cipher_key.data;
alginfo_c.keylen = ses->cipher_key.length;
alginfo_c.key_enc_flags = 0;
alginfo_c.key_type = RTA_DATA_IMM;
caam_auth_alg(ses, &alginfo_a);
if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
- PMD_TX_LOG(ERR, "not supported auth alg\n");
+ DPAA_SEC_ERR("not supported auth alg");
return -ENOTSUP;
}
- alginfo_a.key = (uint64_t)ses->auth_key.data;
+ alginfo_a.key = (size_t)ses->auth_key.data;
alginfo_a.keylen = ses->auth_key.length;
alginfo_a.key_enc_flags = 0;
alginfo_a.key_type = RTA_DATA_IMM;
@@ -487,21 +464,21 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
&cdb->sh_desc[2], 2);
if (err < 0) {
- PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
+ DPAA_SEC_ERR("Crypto: Incorrect key lengths");
return err;
}
if (cdb->sh_desc[2] & 1)
alginfo_c.key_type = RTA_DATA_IMM;
else {
- alginfo_c.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_c.key);
+ alginfo_c.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_c.key);
alginfo_c.key_type = RTA_DATA_PTR;
}
if (cdb->sh_desc[2] & (1<<1))
alginfo_a.key_type = RTA_DATA_IMM;
else {
- alginfo_a.key = (uint64_t)dpaa_mem_vtop(
- (void *)alginfo_a.key);
+ alginfo_a.key = (size_t)dpaa_mem_vtop(
+ (void *)(size_t)alginfo_a.key);
alginfo_a.key_type = RTA_DATA_PTR;
}
cdb->sh_desc[0] = 0;
@@ -530,6 +507,12 @@ dpaa_sec_prep_cdb(dpaa_sec_session *ses)
ses->digest_length, ses->dir);
}
}
+
+ if (shared_desc_len < 0) {
+ DPAA_SEC_ERR("error in preparing command block");
+ return shared_desc_len;
+ }
+
cdb->sh_hdr.hi.field.idlen = shared_desc_len;
cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
@@ -585,7 +568,7 @@ dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
if (!ctx->fd_status) {
op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
} else {
- printf("\nSEC return err: 0x%x", ctx->fd_status);
+ DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
}
ops[pkts++] = op;
@@ -616,8 +599,8 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
extra_segs = 2;
if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
ctx = dpaa_sec_alloc_ctx(ses);
@@ -640,7 +623,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->extension = 1;
in_sg->final = 1;
in_sg->length = sym->auth.data.length;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
/* 1st seg */
sg = in_sg + 1;
@@ -664,7 +647,7 @@ build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
rte_memcpy(old_digest, sym->auth.digest.data,
ses->digest_length);
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
in_sg->length += ses->digest_length;
@@ -718,7 +701,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
if (is_decode(ses)) {
/* need to extend the input to a compound frame */
sg->extension = 1;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
sg->length = sym->auth.data.length + ses->digest_length;
sg->final = 1;
cpu_to_hw_sg(sg);
@@ -732,7 +715,7 @@ build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
cpu_to_hw_sg(sg);
/* let's check digest by hw */
- start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
+ start_addr = dpaa_mem_vtop(old_digest);
sg++;
qm_sg_entry_set64(sg, start_addr);
sg->length = ses->digest_length;
@@ -769,8 +752,8 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
- MAX_SG_ENTRIES);
+ DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
+ MAX_SG_ENTRIES);
return NULL;
}
@@ -785,7 +768,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
out_sg = &cf->sg[0];
out_sg->extension = 1;
out_sg->length = sym->cipher.data.length;
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -814,7 +797,7 @@ build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
in_sg->length = sym->cipher.data.length + ses->iv.length;
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* IV */
@@ -881,7 +864,7 @@ build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg->extension = 1;
sg->final = 1;
sg->length = sym->cipher.data.length + ses->iv.length;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
cpu_to_hw_sg(sg);
sg = &cf->sg[2];
@@ -922,7 +905,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
req_segs++;
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -947,7 +930,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -991,7 +974,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1028,7 +1011,7 @@ build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->aead.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1066,7 +1049,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1111,7 +1094,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1125,7 +1108,7 @@ build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg,
dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
sg->length = sym->aead.data.length + ses->auth_only_len;
@@ -1170,7 +1153,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
}
if (req_segs > MAX_SG_ENTRIES) {
- PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
+ DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
MAX_SG_ENTRIES);
return NULL;
}
@@ -1194,7 +1177,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output sg entries */
sg = &cf->sg[2];
- qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(out_sg);
/* 1st seg */
@@ -1236,7 +1219,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input sg entries */
sg++;
- qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
cpu_to_hw_sg(in_sg);
/* 1st seg IV */
@@ -1266,7 +1249,7 @@ build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
sg++;
memcpy(ctx->digest, sym->auth.digest.data,
ses->digest_length);
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
}
sg->final = 1;
@@ -1303,7 +1286,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* input */
rte_prefetch0(cf->sg);
sg = &cf->sg[2];
- qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
if (is_encode(ses)) {
qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
sg->length = ses->iv.length;
@@ -1333,7 +1316,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
ses->digest_length);
sg++;
- qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
+ qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
sg->length = ses->digest_length;
length += sg->length;
sg->final = 1;
@@ -1347,7 +1330,7 @@ build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
/* output */
sg++;
- qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
+ qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
sg->length = sym->cipher.data.length;
length = sg->length;
@@ -1422,7 +1405,6 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
struct rte_crypto_op *op;
struct dpaa_sec_job *cf;
dpaa_sec_session *ses;
- struct dpaa_sec_op_ctx *ctx;
uint32_t auth_only_len;
struct qman_fq *inq[DPAA_SEC_BURST];
@@ -1444,15 +1426,15 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
op->sym->sec_session);
break;
default:
- PMD_TX_LOG(ERR,
+ DPAA_SEC_DP_ERR(
"sessionless crypto op not supported");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
}
if (unlikely(!ses->qp || ses->qp != qp)) {
- PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
- ses->qp, qp);
+ DPAA_SEC_DP_ERR("sess->qp - %p qp %p",
+ ses->qp, qp);
if (dpaa_sec_attach_sess_q(qp, ses)) {
frames_to_send = loop;
nb_ops = loop;
@@ -1475,7 +1457,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_proto_ipsec(ses)) {
cf = build_proto(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1491,7 +1473,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
} else if (is_auth_cipher(ses)) {
cf = build_cipher_auth_sg(op, ses);
} else {
- PMD_TX_LOG(ERR, "not supported sec op");
+ DPAA_SEC_DP_ERR("not supported ops");
frames_to_send = loop;
nb_ops = loop;
goto send_pkts;
@@ -1507,8 +1489,7 @@ dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
inq[loop] = ses->inq;
fd->opaque_addr = 0;
fd->cmd = 0;
- ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
- qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
+ qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
fd->_format1 = qm_fd_compound;
fd->length29 = 2 * sizeof(struct qm_sg_entry);
/* Auth_only_len is set as 0 in descriptor and it is
@@ -1547,7 +1528,7 @@ dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
dpaa_qp->rx_pkts += num_rx;
dpaa_qp->rx_errs += nb_ops - num_rx;
- PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
+ DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
return num_rx;
}
@@ -1562,11 +1543,11 @@ dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1588,12 +1569,11 @@ dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
struct dpaa_sec_dev_private *internals;
struct dpaa_sec_qp *qp = NULL;
- PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
- dev, qp_id, qp_conf);
+ DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
internals = dev->data->dev_private;
if (qp_id >= internals->max_nb_queue_pairs) {
- PMD_INIT_LOG(ERR, "Max supported qpid %d",
+ DPAA_SEC_ERR("Max supported qpid %d",
internals->max_nb_queue_pairs);
return -EINVAL;
}
@@ -1654,7 +1634,7 @@ dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
session->cipher_key.length = xform->cipher.key.length;
@@ -1676,7 +1656,7 @@ dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
return -ENOMEM;
}
session->auth_key.length = xform->auth.key.length;
@@ -1702,7 +1682,7 @@ dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
RTE_CACHE_LINE_SIZE);
if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
- PMD_INIT_LOG(ERR, "No Memory for aead key\n");
+ DPAA_SEC_ERR("No Memory for aead key\n");
return -ENOMEM;
}
session->aead_key.length = xform->aead.key.length;
@@ -1727,7 +1707,7 @@ dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
return &qi->inq[i];
}
}
- PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
+ DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
return NULL;
}
@@ -1756,14 +1736,20 @@ dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
sess->qp = qp;
ret = dpaa_sec_prep_cdb(sess);
if (ret) {
- PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
+ DPAA_SEC_ERR("Unable to prepare sec cdb");
return -1;
}
-
+ if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
+ ret = rte_dpaa_portal_init((void *)0);
+ if (ret) {
+ DPAA_SEC_ERR("Failure in affining portal");
+ return ret;
+ }
+ }
ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
qman_fq_fqid(&qp->outq));
if (ret)
- PMD_DRV_LOG(ERR, "Unable to init sec queue");
+ DPAA_SEC_ERR("Unable to init sec queue");
return ret;
}
@@ -1806,7 +1792,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (unlikely(sess == NULL)) {
- RTE_LOG(ERR, PMD, "invalid session struct\n");
+ DPAA_SEC_ERR("invalid session struct");
return -EINVAL;
}
@@ -1831,7 +1817,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_cipher_init(dev, xform, session);
dpaa_sec_auth_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1842,7 +1828,7 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_auth_init(dev, xform, session);
dpaa_sec_cipher_init(dev, xform->next, session);
} else {
- PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
+ DPAA_SEC_ERR("Not supported: Auth then Cipher");
return -EINVAL;
}
@@ -1852,13 +1838,13 @@ dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
dpaa_sec_aead_init(dev, xform, session);
} else {
- PMD_DRV_LOG(ERR, "Invalid crypto type");
+ DPAA_SEC_ERR("Invalid crypto type");
return -EINVAL;
}
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto err1;
}
@@ -1884,15 +1870,13 @@ dpaa_sec_session_configure(struct rte_cryptodev *dev,
PMD_INIT_FUNC_TRACE();
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
if (ret != 0) {
- PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
- "session parameters");
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
@@ -1958,7 +1942,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->cipher_key.data == NULL &&
cipher_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
+ DPAA_SEC_ERR("No Memory for cipher key");
return -ENOMEM;
}
@@ -1968,7 +1952,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
RTE_CACHE_LINE_SIZE);
if (session->auth_key.data == NULL &&
auth_xform->key.length > 0) {
- RTE_LOG(ERR, PMD, "No Memory for auth key\n");
+ DPAA_SEC_ERR("No Memory for auth key");
rte_free(session->cipher_key.data);
return -ENOMEM;
}
@@ -2013,11 +1997,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_AUTH_KASUMI_F9:
case RTE_CRYPTO_AUTH_AES_CBC_MAC:
case RTE_CRYPTO_AUTH_ZUC_EIA3:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
auth_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
auth_xform->algo);
goto out;
}
@@ -2037,11 +2021,11 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
case RTE_CRYPTO_CIPHER_3DES_ECB:
case RTE_CRYPTO_CIPHER_AES_ECB:
case RTE_CRYPTO_CIPHER_KASUMI_F8:
- RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
+ DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
cipher_xform->algo);
goto out;
default:
- RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
+ DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
cipher_xform->algo);
goto out;
}
@@ -2086,7 +2070,7 @@ dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
session->ctx_pool = internals->ctx_pool;
session->inq = dpaa_sec_attach_rxq(internals);
if (session->inq == NULL) {
- PMD_DRV_LOG(ERR, "unable to attach sec queue");
+ DPAA_SEC_ERR("unable to attach sec queue");
goto out;
}
@@ -2110,8 +2094,7 @@ dpaa_sec_security_session_create(void *dev,
int ret;
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ DPAA_SEC_ERR("Couldn't get object from session mempool");
return -ENOMEM;
}
@@ -2126,9 +2109,7 @@ dpaa_sec_security_session_create(void *dev,
return -EINVAL;
}
if (ret != 0) {
- PMD_DRV_LOG(ERR,
- "DPAA2 PMD: failed to configure session parameters");
-
+ DPAA_SEC_ERR("failed to configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
@@ -2163,11 +2144,32 @@ dpaa_sec_security_session_destroy(void *dev __rte_unused,
static int
-dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
+dpaa_sec_dev_configure(struct rte_cryptodev *dev,
struct rte_cryptodev_config *config __rte_unused)
{
+
+ char str[20];
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+ internals = dev->data->dev_private;
+ sprintf(str, "ctx_pool_%d", dev->data->dev_id);
+ if (!internals->ctx_pool) {
+ internals->ctx_pool = rte_mempool_create((const char *)str,
+ CTX_POOL_NUM_BUFS,
+ CTX_POOL_BUF_SIZE,
+ CTX_POOL_CACHE_SIZE, 0,
+ NULL, NULL, NULL, NULL,
+ SOCKET_ID_ANY, 0);
+ if (!internals->ctx_pool) {
+ DPAA_SEC_ERR("%s create failed\n", str);
+ return -ENOMEM;
+ }
+ } else
+ DPAA_SEC_INFO("mempool already created for dev_id : %d",
+ dev->data->dev_id);
+
return 0;
}
@@ -2185,9 +2187,19 @@ dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
}
static int
-dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
+dpaa_sec_dev_close(struct rte_cryptodev *dev)
{
+ struct dpaa_sec_dev_private *internals;
+
PMD_INIT_FUNC_TRACE();
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ internals = dev->data->dev_private;
+ rte_mempool_free(internals->ctx_pool);
+ internals->ctx_pool = NULL;
+
return 0;
}
@@ -2246,18 +2258,20 @@ struct rte_security_ops dpaa_sec_security_ops = {
static int
dpaa_sec_uninit(struct rte_cryptodev *dev)
{
- struct dpaa_sec_dev_private *internals = dev->data->dev_private;
+ struct dpaa_sec_dev_private *internals;
if (dev == NULL)
return -ENODEV;
+ internals = dev->data->dev_private;
rte_free(dev->security_ctx);
+ /* In case close has been called, internals->ctx_pool would be NULL */
rte_mempool_free(internals->ctx_pool);
rte_free(internals);
- PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
- dev->data->name, rte_socket_id());
+ DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
+ dev->data->name, rte_socket_id());
return 0;
}
@@ -2270,7 +2284,6 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
struct dpaa_sec_qp *qp;
uint32_t i, flags;
int ret;
- char str[20];
PMD_INIT_FUNC_TRACE();
@@ -2295,7 +2308,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
* RX function
*/
if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
- PMD_INIT_LOG(DEBUG, "Device already init by primary process");
+ DPAA_SEC_WARN("Device already init by primary process");
return 0;
}
@@ -2314,7 +2327,7 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
qp = &internals->qps[i];
ret = dpaa_sec_init_tx(&qp->outq);
if (ret) {
- PMD_INIT_LOG(ERR, "config tx of queue pair %d", i);
+ DPAA_SEC_ERR("config tx of queue pair %d", i);
goto init_error;
}
}
@@ -2325,28 +2338,16 @@ dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
/* create rx qman fq for sessions*/
ret = qman_create_fq(0, flags, &internals->inq[i]);
if (unlikely(ret != 0)) {
- PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
+ DPAA_SEC_ERR("sec qman_create_fq failed");
goto init_error;
}
}
- sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
- internals->ctx_pool = rte_mempool_create((const char *)str,
- CTX_POOL_NUM_BUFS,
- CTX_POOL_BUF_SIZE,
- CTX_POOL_CACHE_SIZE, 0,
- NULL, NULL, NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (!internals->ctx_pool) {
- RTE_LOG(ERR, PMD, "%s create failed\n", str);
- goto init_error;
- }
-
- PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
+ RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
return 0;
init_error:
- PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
+ DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
dpaa_sec_uninit(cryptodev);
return -EFAULT;
@@ -2445,5 +2446,14 @@ static struct rte_dpaa_driver rte_dpaa_sec_driver = {
static struct cryptodev_driver dpaa_sec_crypto_drv;
RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
cryptodev_driver_id);
+
+RTE_INIT(dpaa_sec_init_log);
+static void
+dpaa_sec_init_log(void)
+{
+ dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
+ if (dpaa_logtype_sec >= 0)
+ rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec.h b/drivers/crypto/dpaa_sec/dpaa_sec.h
index f45b36cb..e15e373f 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec.h
@@ -7,6 +7,9 @@
#ifndef _DPAA_SEC_H_
#define _DPAA_SEC_H_
+#define CRYPTODEV_NAME_DPAA_SEC_PMD crypto_dpaa_sec
+/**< NXP DPAA - SEC PMD device name */
+
#define NUM_POOL_CHANNELS 4
#define DPAA_SEC_BURST 7
#define DPAA_SEC_ALG_UNSUPPORT (-1)
@@ -133,7 +136,7 @@ struct dpaa_sec_qp {
int tx_errs;
};
-#define RTE_DPAA_MAX_NB_SEC_QPS 1
+#define RTE_DPAA_MAX_NB_SEC_QPS 8
#define RTE_DPAA_MAX_RX_QUEUE RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS
#define DPAA_MAX_DEQUEUE_NUM_FRAMES 63
@@ -182,10 +185,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 16,
+ .min = 1,
.max = 16,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -202,10 +206,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 20,
+ .min = 1,
.max = 20,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -222,10 +227,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 28,
+ .min = 1,
.max = 28,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -242,10 +248,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 32,
+ .min = 1,
.max = 32,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -262,10 +269,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 48,
+ .min = 1,
.max = 48,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
@@ -282,10 +290,11 @@ static const struct rte_cryptodev_capabilities dpaa_sec_capabilities[] = {
.increment = 1
},
.digest_size = {
- .min = 64,
+ .min = 1,
.max = 64,
- .increment = 0
+ .increment = 1
},
+ .iv_size = { 0 }
}, }
}, }
},
diff --git a/drivers/crypto/dpaa_sec/dpaa_sec_log.h b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
index 992a79f5..9784fcbf 100644
--- a/drivers/crypto/dpaa_sec/dpaa_sec_log.h
+++ b/drivers/crypto/dpaa_sec/dpaa_sec_log.h
@@ -1,44 +1,43 @@
/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright NXP 2017.
+ * Copyright 2017-2018 NXP
*
*/
#ifndef _DPAA_SEC_LOG_H_
#define _DPAA_SEC_LOG_H_
-#define PMD_INIT_LOG(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ##args)
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_INIT
-#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-#else
-#define PMD_INIT_FUNC_TRACE() do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_RX
-#define PMD_RX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_RX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_TX
-#define PMD_TX_LOG(level, fmt, args...) \
- RTE_LOG_DP(level, PMD, "%s(): " fmt "\n", __func__, ## args)
-#else
-#define PMD_TX_LOG(level, fmt, args...) do { } while (0)
-#endif
-
-#ifdef RTE_LIBRTE_DPAA_SEC_DEBUG_DRIVER
-#define PMD_DRV_LOG_RAW(level, fmt, args...) \
- RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args)
-#else
-#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0)
-#endif
-
-#define PMD_DRV_LOG(level, fmt, args...) \
- PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
+extern int dpaa_logtype_sec;
+
+#define DPAA_SEC_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, dpaa_logtype_sec, "dpaa_sec: " \
+ fmt "\n", ##args)
+
+#define DPAA_SEC_DEBUG(fmt, args...) \
+ rte_log(RTE_LOG_DEBUG, dpaa_logtype_sec, "dpaa_sec: %s(): " \
+ fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() DPAA_SEC_LOG(DEBUG, " >>")
+
+#define DPAA_SEC_INFO(fmt, args...) \
+ DPAA_SEC_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_ERR(fmt, args...) \
+ DPAA_SEC_LOG(ERR, fmt, ## args)
+#define DPAA_SEC_WARN(fmt, args...) \
+ DPAA_SEC_LOG(WARNING, fmt, ## args)
+
+/* DP Logs, toggled out at compile time if level lower than current level */
+#define DPAA_SEC_DP_LOG(level, fmt, args...) \
+ RTE_LOG_DP(level, PMD, fmt, ## args)
+
+#define DPAA_SEC_DP_DEBUG(fmt, args...) \
+ DPAA_SEC_DP_LOG(DEBUG, fmt, ## args)
+#define DPAA_SEC_DP_INFO(fmt, args...) \
+ DPAA_SEC_DP_LOG(INFO, fmt, ## args)
+#define DPAA_SEC_DP_WARN(fmt, args...) \
+ DPAA_SEC_DP_LOG(WARNING, fmt, ## args)
+#define DPAA_SEC_DP_ERR(fmt, args...) \
+ DPAA_SEC_DP_LOG(ERR, fmt, ## args)
#endif /* _DPAA_SEC_LOG_H_ */
diff --git a/drivers/crypto/dpaa_sec/meson.build b/drivers/crypto/dpaa_sec/meson.build
new file mode 100644
index 00000000..8a570984
--- /dev/null
+++ b/drivers/crypto/dpaa_sec/meson.build
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright 2018 NXP
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+deps += ['bus_dpaa', 'security']
+sources = files('dpaa_sec.c')
+
+allow_experimental_apis = true
+
+includes += include_directories('../dpaa2_sec/')
diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c
index 356621d4..205dc1de 100644
--- a/drivers/crypto/kasumi/rte_kasumi_pmd.c
+++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c
@@ -320,7 +320,7 @@ process_ops(struct rte_crypto_op **ops, struct kasumi_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct kasumi_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -619,5 +619,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_KASUMI_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv, cryptodev_kasumi_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(kasumi_crypto_drv,
+ cryptodev_kasumi_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/meson.build b/drivers/crypto/meson.build
index 17041ad8..d64ca418 100644
--- a/drivers/crypto/meson.build
+++ b/drivers/crypto/meson.build
@@ -1,7 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2017 Intel Corporation
-drivers = ['qat', 'null', 'openssl']
+drivers = ['ccp', 'dpaa_sec', 'dpaa2_sec', 'mvsam',
+ 'null', 'openssl', 'qat', 'virtio']
+
std_deps = ['cryptodev'] # cryptodev pulls in all other needed deps
config_flag_fmt = 'RTE_LIBRTE_@0@_PMD'
driver_name_fmt = 'rte_pmd_@0@'
diff --git a/drivers/crypto/mrvl/Makefile b/drivers/crypto/mrvl/Makefile
deleted file mode 100644
index bc5c2270..00000000
--- a/drivers/crypto/mrvl/Makefile
+++ /dev/null
@@ -1,67 +0,0 @@
-# BSD LICENSE
-#
-# Copyright(c) 2017 Marvell International Ltd.
-# Copyright(c) 2017 Semihalf.
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in
-# the documentation and/or other materials provided with the
-# distribution.
-# * Neither the name of the copyright holder nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-include $(RTE_SDK)/mk/rte.vars.mk
-
-ifneq ($(MAKECMDGOALS),clean)
-ifneq ($(MAKECMDGOALS),config)
-ifeq ($(LIBMUSDK_PATH),)
-$(error "Please define LIBMUSDK_PATH environment variable")
-endif
-endif
-endif
-
-# library name
-LIB = librte_pmd_mrvl_crypto.a
-
-# build flags
-CFLAGS += -O3
-CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -I$(LIBMUSDK_PATH)/include
-CFLAGS += -DMVCONF_TYPES_PUBLIC
-CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
-
-# library version
-LIBABIVER := 1
-
-# versioning export map
-EXPORT_MAP := rte_pmd_mrvl_version.map
-
-# external library dependencies
-LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
-LDLIBS += -lrte_bus_vdev
-
-# library source files
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd.c
-SRCS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += rte_mrvl_pmd_ops.c
-
-include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mrvl/rte_mrvl_compat.h b/drivers/crypto/mrvl/rte_mrvl_compat.h
deleted file mode 100644
index 22cd1840..00000000
--- a/drivers/crypto/mrvl/rte_mrvl_compat.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RTE_MRVL_COMPAT_H_
-#define _RTE_MRVL_COMPAT_H_
-
-/* Unluckily, container_of is defined by both DPDK and MUSDK,
- * we'll declare only one version.
- *
- * Note that it is not used in this PMD anyway.
- */
-#ifdef container_of
-#undef container_of
-#endif
-#include "env/mv_autogen_comp_flags.h"
-#include "drivers/mv_sam.h"
-#include "drivers/mv_sam_cio.h"
-#include "drivers/mv_sam_session.h"
-
-#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mvsam/Makefile b/drivers/crypto/mvsam/Makefile
new file mode 100644
index 00000000..c3dc72c1
--- /dev/null
+++ b/drivers/crypto/mvsam/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Marvell International Ltd.
+# Copyright(c) 2017 Semihalf.
+# All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(MAKECMDGOALS),config)
+ifeq ($(LIBMUSDK_PATH),)
+$(error "Please define LIBMUSDK_PATH environment variable")
+endif
+endif
+endif
+
+# library name
+LIB = librte_pmd_mvsam_crypto.a
+
+# build flags
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -I$(LIBMUSDK_PATH)/include
+CFLAGS += -DMVCONF_TYPES_PUBLIC
+CFLAGS += -DMVCONF_DMA_PHYS_ADDR_T_PUBLIC
+
+# library version
+LIBABIVER := 1
+
+# versioning export map
+EXPORT_MAP := rte_pmd_mvsam_version.map
+
+# external library dependencies
+LDLIBS += -L$(LIBMUSDK_PATH)/lib -lmusdk
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_bus_vdev
+
+# library source files
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_MVSAM_CRYPTO) += rte_mrvl_pmd_ops.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/mvsam/meson.build b/drivers/crypto/mvsam/meson.build
new file mode 100644
index 00000000..3c8ea3cf
--- /dev/null
+++ b/drivers/crypto/mvsam/meson.build
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 Marvell International Ltd.
+# Copyright(c) 2018 Semihalf.
+# All rights reserved.
+
+path = get_option('lib_musdk_dir')
+lib_dir = path + '/lib'
+inc_dir = path + '/include'
+
+lib = cc.find_library('libmusdk', dirs: [lib_dir], required: false)
+if not lib.found()
+ build = false
+else
+ ext_deps += lib
+ includes += include_directories(inc_dir)
+ cflags += ['-DMVCONF_TYPES_PUBLIC', '-DMVCONF_DMA_PHYS_ADDR_T_PUBLIC']
+endif
+
+sources = files('rte_mrvl_pmd.c', 'rte_mrvl_pmd_ops.c')
+
+deps += ['bus_vdev']
diff --git a/drivers/crypto/mvsam/rte_mrvl_compat.h b/drivers/crypto/mvsam/rte_mrvl_compat.h
new file mode 100644
index 00000000..4ab28d39
--- /dev/null
+++ b/drivers/crypto/mvsam/rte_mrvl_compat.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
+ */
+
+#ifndef _RTE_MRVL_COMPAT_H_
+#define _RTE_MRVL_COMPAT_H_
+
+/* Unluckily, container_of is defined by both DPDK and MUSDK,
+ * we'll declare only one version.
+ *
+ * Note that it is not used in this PMD anyway.
+ */
+#ifdef container_of
+#undef container_of
+#endif
+#include "env/mv_autogen_comp_flags.h"
+#include "drivers/mv_sam.h"
+#include "drivers/mv_sam_cio.h"
+#include "drivers/mv_sam_session.h"
+
+#endif /* _RTE_MRVL_COMPAT_H_ */
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd.c b/drivers/crypto/mvsam/rte_mrvl_pmd.c
index 31f3fe58..1b6029a5 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <rte_common.h>
@@ -853,5 +825,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_MRVL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(mrvl_crypto_drv, cryptodev_mrvl_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
index a1de33ae..3f8de37b 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_ops.c
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_ops.c
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#include <string.h>
diff --git a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
index 923faaf9..c16d95b4 100644
--- a/drivers/crypto/mrvl/rte_mrvl_pmd_private.h
+++ b/drivers/crypto/mvsam/rte_mrvl_pmd_private.h
@@ -1,35 +1,7 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Marvell International Ltd.
- * Copyright(c) 2017 Semihalf.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of the copyright holder nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Marvell International Ltd.
+ * Copyright(c) 2017 Semihalf.
+ * All rights reserved.
*/
#ifndef _RTE_MRVL_PMD_PRIVATE_H_
@@ -37,7 +9,7 @@
#include "rte_mrvl_compat.h"
-#define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
+#define CRYPTODEV_NAME_MRVL_PMD crypto_mvsam
/**< Marvell PMD device name */
#define MRVL_CRYPTO_LOG_ERR(fmt, args...) \
diff --git a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
index a7530317..a7530317 100644
--- a/drivers/crypto/mrvl/rte_pmd_mrvl_version.map
+++ b/drivers/crypto/mvsam/rte_pmd_mvsam_version.map
diff --git a/drivers/crypto/null/null_crypto_pmd.c b/drivers/crypto/null/null_crypto_pmd.c
index dc8e5776..052b6546 100644
--- a/drivers/crypto/null/null_crypto_pmd.c
+++ b/drivers/crypto/null/null_crypto_pmd.c
@@ -247,5 +247,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_NULL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(null_crypto_drv, cryptodev_null_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/openssl/rte_openssl_pmd.c b/drivers/crypto/openssl/rte_openssl_pmd.c
index 0d9bedc0..93c6d7e5 100644
--- a/drivers/crypto/openssl/rte_openssl_pmd.c
+++ b/drivers/crypto/openssl/rte_openssl_pmd.c
@@ -1569,7 +1569,7 @@ process_op(struct openssl_qp *qp, struct rte_crypto_op *op,
openssl_reset_session(sess);
memset(sess, 0, sizeof(struct openssl_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -1733,5 +1733,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_OPENSSL_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv, cryptodev_openssl_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(openssl_crypto_drv,
+ cryptodev_openssl_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
index 260912dc..07266a5e 100644
--- a/drivers/crypto/qat/Makefile
+++ b/drivers/crypto/qat/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2015 Intel Corporation
+# Copyright(c) 2015-2018 Intel Corporation
include $(RTE_SDK)/mk/rte.vars.mk
diff --git a/drivers/crypto/qat/meson.build b/drivers/crypto/qat/meson.build
index 7b904630..006cd655 100644
--- a/drivers/crypto/qat/meson.build
+++ b/drivers/crypto/qat/meson.build
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: BSD-3-Clause
-# Copyright(c) 2017 Intel Corporation
+# Copyright(c) 2017-2018 Intel Corporation
dep = dependency('libcrypto', required: false)
if not dep.found()
diff --git a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
index 4f8f3d13..bfdbc979 100644
--- a/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
+++ b/drivers/crypto/qat/qat_adf/adf_transport_access_macros.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
#define ADF_TRANSPORT_ACCESS_MACROS_H
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw.h b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
index 5de34d55..ae39b7f1 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_H_
#define _ICP_QAT_FW_H_
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
index fbf2b839..c33bc3fe 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_fw_la.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_FW_LA_H_
#define _ICP_QAT_FW_LA_H_
diff --git a/drivers/crypto/qat/qat_adf/icp_qat_hw.h b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
index d03688c7..56e3cf79 100644
--- a/drivers/crypto/qat/qat_adf/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_adf/icp_qat_hw.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_HW_H_
#define _ICP_QAT_HW_H_
diff --git a/drivers/crypto/qat/qat_adf/qat_algs.h b/drivers/crypto/qat/qat_adf/qat_algs.h
index 802ba95d..88bd5f00 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs.h
+++ b/drivers/crypto/qat/qat_adf/qat_algs.h
@@ -1,48 +1,5 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _ICP_QAT_ALGS_H_
#define _ICP_QAT_ALGS_H_
diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
index 26f854c2..c87ed40f 100644
--- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
+++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c
@@ -1,50 +1,6 @@
-/*
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- * Copyright(c) 2015-2016 Intel Corporation.
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * qat-linux@intel.com
- *
- * BSD LICENSE
- * Copyright(c) 2015-2017 Intel Corporation.
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
+ * Copyright(c) 2015-2018 Intel Corporation
*/
-
#include <rte_memcpy.h>
#include <rte_common.h>
#include <rte_spinlock.h>
diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c
index 4afe159d..d9ce2a13 100644
--- a/drivers/crypto/qat/qat_crypto.c
+++ b/drivers/crypto/qat/qat_crypto.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <stdio.h>
diff --git a/drivers/crypto/qat/qat_crypto.h b/drivers/crypto/qat/qat_crypto.h
index c182af2e..281a142b 100644
--- a/drivers/crypto/qat/qat_crypto.h
+++ b/drivers/crypto/qat/qat_crypto.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2016 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _QAT_CRYPTO_H_
diff --git a/drivers/crypto/qat/qat_crypto_capabilities.h b/drivers/crypto/qat/qat_crypto_capabilities.h
index 37a6b7cb..001c32c5 100644
--- a/drivers/crypto/qat/qat_crypto_capabilities.h
+++ b/drivers/crypto/qat/qat_crypto_capabilities.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2017 Intel Corporation
+ * Copyright(c) 2017-2018 Intel Corporation
*/
#ifndef _QAT_CRYPTO_CAPABILITIES_H_
diff --git a/drivers/crypto/qat/qat_logs.h b/drivers/crypto/qat/qat_logs.h
index 565089a4..c9144bf6 100644
--- a/drivers/crypto/qat/qat_logs.h
+++ b/drivers/crypto/qat/qat_logs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#ifndef _QAT_LOGS_H_
diff --git a/drivers/crypto/qat/qat_qp.c b/drivers/crypto/qat/qat_qp.c
index 87b9ce0b..7fea10c7 100644
--- a/drivers/crypto/qat/qat_qp.c
+++ b/drivers/crypto/qat/qat_qp.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2015 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <rte_common.h>
@@ -54,8 +54,6 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
int socket_id)
{
const struct rte_memzone *mz;
- unsigned memzone_flags = 0;
- const struct rte_memseg *ms;
PMD_INIT_FUNC_TRACE();
mz = rte_memzone_lookup(queue_name);
@@ -78,25 +76,8 @@ queue_dma_zone_reserve(const char *queue_name, uint32_t queue_size,
PMD_DRV_LOG(DEBUG, "Allocate memzone for %s, size %u on socket %u",
queue_name, queue_size, socket_id);
- ms = rte_eal_get_physmem_layout();
- switch (ms[0].hugepage_sz) {
- case(RTE_PGSIZE_2M):
- memzone_flags = RTE_MEMZONE_2MB;
- break;
- case(RTE_PGSIZE_1G):
- memzone_flags = RTE_MEMZONE_1GB;
- break;
- case(RTE_PGSIZE_16M):
- memzone_flags = RTE_MEMZONE_16MB;
- break;
- case(RTE_PGSIZE_16G):
- memzone_flags = RTE_MEMZONE_16GB;
- break;
- default:
- memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
- }
- return rte_memzone_reserve_aligned(queue_name, queue_size, socket_id,
- memzone_flags, queue_size);
+ return rte_memzone_reserve_aligned(queue_name, queue_size,
+ socket_id, RTE_MEMZONE_IOVA_CONTIG, queue_size);
}
int qat_crypto_sym_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c
index bf837401..c8da07af 100644
--- a/drivers/crypto/qat/rte_qat_cryptodev.c
+++ b/drivers/crypto/qat/rte_qat_cryptodev.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2015-2017 Intel Corporation
+ * Copyright(c) 2015-2018 Intel Corporation
*/
#include <rte_bus_pci.h>
@@ -130,7 +130,7 @@ static int crypto_qat_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
{
struct rte_cryptodev_pmd_init_params init_params = {
.name = "",
- .socket_id = rte_socket_id(),
+ .socket_id = pci_dev->device.numa_node,
.private_data_size = sizeof(struct qat_pmd_private),
.max_nb_sessions = RTE_QAT_PMD_MAX_NB_SESSIONS
};
@@ -176,5 +176,5 @@ static struct cryptodev_driver qat_crypto_drv;
RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_QAT_SYM_PMD, rte_qat_pmd);
RTE_PMD_REGISTER_PCI_TABLE(CRYPTODEV_NAME_QAT_SYM_PMD, pci_id_qat_map);
-RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(qat_crypto_drv, rte_qat_pmd.driver,
cryptodev_qat_driver_id);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
index 140c8b41..ed574cc1 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c
@@ -91,8 +91,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx)
struct rte_cryptodev_capabilities tmp_caps[256] = { {0} };
uint32_t nb_caps = 0, i;
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
for (i = 0; i < sched_ctx->nb_slaves; i++) {
struct rte_cryptodev_info dev_info;
@@ -462,8 +464,10 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id,
sched_ctx->ops.option_set = scheduler->ops->option_set;
sched_ctx->ops.option_get = scheduler->ops->option_get;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
if (sched_ctx->ops.create_private_ctx) {
int ret = (*sched_ctx->ops.create_private_ctx)(dev);
diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
index 01e7646c..1c164da7 100644
--- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
+++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h
@@ -30,7 +30,7 @@ extern "C" {
#endif
/** Maximum number of multi-core worker cores */
-#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64)
+#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1)
/** Round-robin scheduling mode string */
#define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin
diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c
index b2ce44ce..91fb0668 100644
--- a/drivers/crypto/scheduler/scheduler_multicore.c
+++ b/drivers/crypto/scheduler/scheduler_multicore.c
@@ -21,8 +21,8 @@ struct mc_scheduler_ctx {
uint32_t num_workers; /**< Number of workers polling */
uint32_t stop_signal;
- struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
- struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ struct rte_ring *sched_enq_ring[RTE_MAX_LCORE];
+ struct rte_ring *sched_deq_ring[RTE_MAX_LCORE];
};
struct mc_scheduler_qp_ctx {
@@ -328,11 +328,13 @@ static int
scheduler_create_private_ctx(struct rte_cryptodev *dev)
{
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
- struct mc_scheduler_ctx *mc_ctx;
+ struct mc_scheduler_ctx *mc_ctx = NULL;
uint16_t i;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0,
rte_socket_id());
@@ -345,25 +347,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
for (i = 0; i < sched_ctx->nb_wc; i++) {
char r_name[16];
- snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_enq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_enq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
- snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i);
- mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE,
- rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ);
+ snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX
+ "%u_%u", dev->data->dev_id, i);
+ mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name);
if (!mc_ctx->sched_deq_ring[i]) {
- CS_LOG_ERR("Cannot create ring for worker %u", i);
- return -1;
+ mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name,
+ PER_SLAVE_BUFF_SIZE,
+ rte_socket_id(),
+ RING_F_SC_DEQ | RING_F_SP_ENQ);
+ if (!mc_ctx->sched_deq_ring[i]) {
+ CS_LOG_ERR("Cannot create ring for worker %u",
+ i);
+ goto exit;
+ }
}
}
sched_ctx->private_ctx = (void *)mc_ctx;
return 0;
+
+exit:
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ rte_ring_free(mc_ctx->sched_enq_ring[i]);
+ rte_ring_free(mc_ctx->sched_deq_ring[i]);
+ }
+ rte_free(mc_ctx);
+
+ return -1;
}
struct rte_cryptodev_scheduler_ops scheduler_mc_ops = {
diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
index 96bf0161..d09e849a 100644
--- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
+++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c
@@ -334,8 +334,10 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev)
struct scheduler_ctx *sched_ctx = dev->data->dev_private;
struct psd_scheduler_ctx *psd_ctx;
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0,
rte_socket_id());
diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c
index 51a85fa6..25d6409f 100644
--- a/drivers/crypto/scheduler/scheduler_pmd.c
+++ b/drivers/crypto/scheduler/scheduler_pmd.c
@@ -20,7 +20,8 @@ struct scheduler_init_params {
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
uint32_t enable_ordering;
- uint64_t wcmask;
+ uint16_t wc_pool[RTE_MAX_LCORE];
+ uint16_t nb_wc;
char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]
[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
};
@@ -86,10 +87,6 @@ cryptodev_scheduler_create(const char *name,
return -EFAULT;
}
- if (init_params->wcmask != 0)
- RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n",
- init_params->wcmask);
-
dev->driver_id = cryptodev_driver_id;
dev->dev_ops = rte_crypto_scheduler_pmd_ops;
@@ -100,15 +97,12 @@ cryptodev_scheduler_create(const char *name,
if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) {
uint16_t i;
- sched_ctx->nb_wc = 0;
+ sched_ctx->nb_wc = init_params->nb_wc;
- for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) {
- if (init_params->wcmask & (1ULL << i)) {
- sched_ctx->wc_pool[sched_ctx->nb_wc++] = i;
- RTE_LOG(INFO, PMD,
- " Worker core[%u]=%u added\n",
- sched_ctx->nb_wc-1, i);
- }
+ for (i = 0; i < sched_ctx->nb_wc; i++) {
+ sched_ctx->wc_pool[i] = init_params->wc_pool[i];
+ RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n",
+ i, sched_ctx->wc_pool[i]);
}
}
@@ -232,9 +226,47 @@ static int
parse_coremask_arg(const char *key __rte_unused,
const char *value, void *extra_args)
{
+ int i, j, val;
+ uint16_t idx = 0;
+ char c;
struct scheduler_init_params *params = extra_args;
- params->wcmask = strtoull(value, NULL, 16);
+ params->nb_wc = 0;
+
+ if (value == NULL)
+ return -1;
+ /* Remove all blank characters ahead and after .
+ * Remove 0x/0X if exists.
+ */
+ while (isblank(*value))
+ value++;
+ if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X')))
+ value += 2;
+ i = strlen(value);
+ while ((i > 0) && isblank(value[i - 1]))
+ i--;
+
+ if (i == 0)
+ return -1;
+
+ for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) {
+ c = value[i];
+ if (isxdigit(c) == 0) {
+ /* invalid characters */
+ return -1;
+ }
+ if (isdigit(c))
+ val = c - '0';
+ else if (isupper(c))
+ val = c - 'A' + 10;
+ else
+ val = c - 'a' + 10;
+
+ for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) {
+ if ((1 << j) & val)
+ params->wc_pool[params->nb_wc++] = idx;
+ }
+ }
return 0;
}
@@ -246,7 +278,7 @@ parse_corelist_arg(const char *key __rte_unused,
{
struct scheduler_init_params *params = extra_args;
- params->wcmask = 0ULL;
+ params->nb_wc = 0;
const char *token = value;
@@ -254,7 +286,11 @@ parse_corelist_arg(const char *key __rte_unused,
char *rval;
unsigned int core = strtoul(token, &rval, 10);
- params->wcmask |= 1ULL << core;
+ if (core >= RTE_MAX_LCORE) {
+ CS_LOG_ERR("Invalid worker core %u, should be smaller "
+ "than %u.\n", core, RTE_MAX_LCORE);
+ }
+ params->wc_pool[params->nb_wc++] = (uint16_t)core;
token = (const char *)rval;
if (token[0] == '\0')
break;
@@ -468,5 +504,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SCHEDULER_PMD,
"socket_id=<int> "
"slave=<name>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(scheduler_crypto_drv,
- cryptodev_scheduler_pmd_drv,
+ cryptodev_scheduler_pmd_drv.driver,
cryptodev_driver_id);
diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c
index 680c2afb..147dc51e 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_ops.c
+++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c
@@ -46,6 +46,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev)
sched_ctx->init_slave_names[i]);
rte_free(sched_ctx->init_slave_names[i]);
+ sched_ctx->init_slave_names[i] = NULL;
sched_ctx->nb_init_slaves -= 1;
}
@@ -261,11 +262,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev)
}
}
- if (sched_ctx->private_ctx)
+ if (sched_ctx->private_ctx) {
rte_free(sched_ctx->private_ctx);
+ sched_ctx->private_ctx = NULL;
+ }
- if (sched_ctx->capabilities)
+ if (sched_ctx->capabilities) {
rte_free(sched_ctx->capabilities);
+ sched_ctx->capabilities = NULL;
+ }
return 0;
}
diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h
index dd7ca5a4..12410b48 100644
--- a/drivers/crypto/scheduler/scheduler_pmd_private.h
+++ b/drivers/crypto/scheduler/scheduler_pmd_private.h
@@ -60,7 +60,7 @@ struct scheduler_ctx {
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
- uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES];
+ uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c
index 27af69f2..72751e35 100644
--- a/drivers/crypto/snow3g/rte_snow3g_pmd.c
+++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c
@@ -339,7 +339,7 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session,
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(session, 0, sizeof(struct snow3g_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, session);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
@@ -619,5 +619,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_SNOW3G_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv, cryptodev_snow3g_pmd_drv,
- cryptodev_driver_id);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(snow3g_crypto_drv,
+ cryptodev_snow3g_pmd_drv.driver, cryptodev_driver_id);
diff --git a/drivers/crypto/virtio/Makefile b/drivers/crypto/virtio/Makefile
new file mode 100644
index 00000000..be7b828f
--- /dev/null
+++ b/drivers/crypto/virtio/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_virtio_crypto.a
+
+#
+# include virtio_crypto.h
+#
+CFLAGS += -I$(RTE_SDK)/lib/librte_vhost
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_virtio_crypto_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtqueue.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_pci.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_rxtx.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_VIRTIO_CRYPTO) += virtio_cryptodev.c
+
+# this lib depends upon:
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_cryptodev
+LDLIBS += -lrte_pci -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/crypto/virtio/meson.build b/drivers/crypto/virtio/meson.build
new file mode 100644
index 00000000..b15b3f9f
--- /dev/null
+++ b/drivers/crypto/virtio/meson.build
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+
+includes += include_directories('../../../lib/librte_vhost')
+deps += 'bus_pci'
+name = 'virtio_crypto'
+sources = files('virtio_cryptodev.c', 'virtio_pci.c',
+ 'virtio_rxtx.c', 'virtqueue.c')
diff --git a/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
new file mode 100644
index 00000000..de8e412f
--- /dev/null
+++ b/drivers/crypto/virtio/rte_pmd_virtio_crypto_version.map
@@ -0,0 +1,3 @@
+DPDK_18.05 {
+ local: *;
+};
diff --git a/drivers/crypto/virtio/virtio_crypto_algs.h b/drivers/crypto/virtio/virtio_crypto_algs.h
new file mode 100644
index 00000000..4c44af37
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_algs.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_ALGS_H_
+#define _VIRTIO_CRYPTO_ALGS_H_
+
+#include <rte_memory.h>
+
+#include "virtio_crypto.h"
+
+struct virtio_crypto_session {
+ uint64_t session_id;
+
+ struct {
+ uint16_t offset;
+ uint16_t length;
+ } iv;
+
+ struct {
+ uint32_t length;
+ phys_addr_t phys_addr;
+ } aad;
+
+ struct virtio_crypto_op_ctrl_req ctrl;
+};
+
+#endif /* _VIRTIO_CRYPTO_ALGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_crypto_capabilities.h b/drivers/crypto/virtio/virtio_crypto_capabilities.h
new file mode 100644
index 00000000..03c30dee
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_crypto_capabilities.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTO_CAPABILITIES_H_
+#define _VIRTIO_CRYPTO_CAPABILITIES_H_
+
+#define VIRTIO_SYM_CAPABILITIES \
+ { /* SHA1 HMAC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, \
+ {.auth = { \
+ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC, \
+ .block_size = 64, \
+ .key_size = { \
+ .min = 1, \
+ .max = 64, \
+ .increment = 1 \
+ }, \
+ .digest_size = { \
+ .min = 1, \
+ .max = 20, \
+ .increment = 1 \
+ }, \
+ .iv_size = { 0 } \
+ }, } \
+ }, } \
+ }, \
+ { /* AES CBC */ \
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, \
+ {.sym = { \
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, \
+ {.cipher = { \
+ .algo = RTE_CRYPTO_CIPHER_AES_CBC, \
+ .block_size = 16, \
+ .key_size = { \
+ .min = 16, \
+ .max = 32, \
+ .increment = 8 \
+ }, \
+ .iv_size = { \
+ .min = 16, \
+ .max = 16, \
+ .increment = 0 \
+ } \
+ }, } \
+ }, } \
+ }
+
+#endif /* _VIRTIO_CRYPTO_CAPABILITIES_H_ */
diff --git a/drivers/crypto/virtio/virtio_cryptodev.c b/drivers/crypto/virtio/virtio_cryptodev.c
new file mode 100644
index 00000000..df88953f
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.c
@@ -0,0 +1,1504 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <stdbool.h>
+#include <unistd.h>
+
+#include <rte_common.h>
+#include <rte_errno.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_eal.h>
+
+#include "virtio_cryptodev.h"
+#include "virtqueue.h"
+#include "virtio_crypto_algs.h"
+#include "virtio_crypto_capabilities.h"
+
+int virtio_crypto_logtype_init;
+int virtio_crypto_logtype_session;
+int virtio_crypto_logtype_rx;
+int virtio_crypto_logtype_tx;
+int virtio_crypto_logtype_driver;
+
+static int virtio_crypto_dev_configure(struct rte_cryptodev *dev,
+ struct rte_cryptodev_config *config);
+static int virtio_crypto_dev_start(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_stop(struct rte_cryptodev *dev);
+static int virtio_crypto_dev_close(struct rte_cryptodev *dev);
+static void virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info);
+static void virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats);
+static void virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev);
+static int virtio_crypto_qp_setup(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool);
+static int virtio_crypto_qp_release(struct rte_cryptodev *dev,
+ uint16_t queue_pair_id);
+static void virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev);
+static unsigned int virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev);
+static void virtio_crypto_sym_clear_session(struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess);
+static int virtio_crypto_sym_configure_session(struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *session,
+ struct rte_mempool *mp);
+
+/*
+ * The set of PCI devices this driver supports
+ */
+static const struct rte_pci_id pci_id_virtio_crypto_map[] = {
+ { RTE_PCI_DEVICE(VIRTIO_CRYPTO_PCI_VENDORID,
+ VIRTIO_CRYPTO_PCI_DEVICEID) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct rte_cryptodev_capabilities virtio_capabilities[] = {
+ VIRTIO_SYM_CAPABILITIES,
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+};
+
+uint8_t cryptodev_virtio_driver_id;
+
+#define NUM_ENTRY_SYM_CREATE_SESSION 4
+
+static int
+virtio_crypto_send_command(struct virtqueue *vq,
+ struct virtio_crypto_op_ctrl_req *ctrl, uint8_t *cipher_key,
+ uint8_t *auth_key, struct virtio_crypto_session *session)
+{
+ uint8_t idx = 0;
+ uint8_t needed = 1;
+ uint32_t head = 0;
+ uint32_t len_cipher_key = 0;
+ uint32_t len_auth_key = 0;
+ uint32_t len_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t len_session_input = sizeof(struct virtio_crypto_session_input);
+ uint32_t len_total = 0;
+ uint32_t input_offset = 0;
+ void *virt_addr_started = NULL;
+ phys_addr_t phys_addr_started;
+ struct vring_desc *desc;
+ uint32_t desc_offset;
+ struct virtio_crypto_session_input *input;
+ int ret;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("session is NULL.");
+ return -EINVAL;
+ }
+ /* cipher only is supported, it is available if auth_key is NULL */
+ if (!cipher_key) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("cipher key is NULL.");
+ return -EINVAL;
+ }
+
+ head = vq->vq_desc_head_idx;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_desc_head_idx = %d, vq = %p",
+ head, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Not enough entry");
+ return -ENOSPC;
+ }
+
+ /* calculate the length of cipher key */
+ if (cipher_key) {
+ switch (ctrl->u.sym_create_session.op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.cipher
+ .para.keylen;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ len_cipher_key
+ = ctrl->u.sym_create_session.u.chain
+ .para.cipher_param.keylen;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("invalid op type");
+ return -EINVAL;
+ }
+ }
+
+ /* calculate the length of auth key */
+ if (auth_key) {
+ len_auth_key =
+ ctrl->u.sym_create_session.u.chain.para.u.mac_param
+ .auth_key_len;
+ }
+
+ /*
+ * malloc memory to store indirect vring_desc entries, including
+ * ctrl request, cipher key, auth key, session input and desc vring
+ */
+ desc_offset = len_ctrl_req + len_cipher_key + len_auth_key
+ + len_session_input;
+ virt_addr_started = rte_malloc(NULL,
+ desc_offset + NUM_ENTRY_SYM_CREATE_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (virt_addr_started == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap memory");
+ return -ENOSPC;
+ }
+ phys_addr_started = rte_malloc_virt2iova(virt_addr_started);
+
+ /* address to store indirect vring desc entries */
+ desc = (struct vring_desc *)
+ ((uint8_t *)virt_addr_started + desc_offset);
+
+ /* ctrl req part */
+ memcpy(virt_addr_started, ctrl, len_ctrl_req);
+ desc[idx].addr = phys_addr_started;
+ desc[idx].len = len_ctrl_req;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_ctrl_req;
+ input_offset += len_ctrl_req;
+
+ /* cipher key part */
+ if (len_cipher_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ cipher_key, len_cipher_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_cipher_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_cipher_key;
+ input_offset += len_cipher_key;
+ }
+
+ /* auth key part */
+ if (len_auth_key > 0) {
+ memcpy((uint8_t *)virt_addr_started + len_total,
+ auth_key, len_auth_key);
+
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_auth_key;
+ desc[idx].flags = VRING_DESC_F_NEXT;
+ desc[idx].next = idx + 1;
+ idx++;
+ len_total += len_auth_key;
+ input_offset += len_auth_key;
+ }
+
+ /* input part */
+ input = (struct virtio_crypto_session_input *)
+ ((uint8_t *)virt_addr_started + input_offset);
+ input->status = VIRTIO_CRYPTO_ERR;
+ input->session_id = ~0ULL;
+ desc[idx].addr = phys_addr_started + len_total;
+ desc[idx].len = len_session_input;
+ desc[idx].flags = VRING_DESC_F_WRITE;
+ idx++;
+
+ /* use a single desc entry */
+ vq->vq_ring.desc[head].addr = phys_addr_started + desc_offset;
+ vq->vq_ring.desc[head].len = idx * sizeof(struct vring_desc);
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_free_cnt--;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+
+ while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ /* get the result */
+ if (input->status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Something wrong on backend! "
+ "status=%u, session_id=%" PRIu64 "",
+ input->status, input->session_id);
+ rte_free(virt_addr_started);
+ ret = -1;
+ } else {
+ session->session_id = input->session_id;
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Create session successfully, "
+ "session_id=%" PRIu64 "", input->session_id);
+ rte_free(virt_addr_started);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+void
+virtio_crypto_queue_release(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq) {
+ hw = vq->hw;
+ /* Select and deactivate the queue */
+ VTPCI_OPS(hw)->del_queue(hw, vq);
+
+ rte_memzone_free(vq->mz);
+ rte_mempool_free(vq->mpool);
+ rte_free(vq);
+ }
+}
+
+#define MPOOL_MAX_NAME_SZ 32
+
+int
+virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq)
+{
+ char vq_name[VIRTQUEUE_MAX_NAME_SZ];
+ char mpool_name[MPOOL_MAX_NAME_SZ];
+ const struct rte_memzone *mz;
+ unsigned int vq_size, size;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq = NULL;
+ uint32_t i = 0;
+ uint32_t j;
+
+ PMD_INIT_FUNC_TRACE();
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("setting up queue: %u", vtpci_queue_idx);
+
+ /*
+ * Read the virtqueue size from the Queue Size field
+ * Always power of 2 and if 0 virtqueue does not exist
+ */
+ vq_size = VTPCI_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ if (vq_size == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue does not exist");
+ return -EINVAL;
+ }
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq_size: %u", vq_size);
+
+ if (!rte_is_power_of_2(vq_size)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("virtqueue size is not powerof 2");
+ return -EINVAL;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_dataqueue%d",
+ dev->data->dev_id, vtpci_queue_idx);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_dataqueue%d_mpool",
+ dev->data->dev_id, vtpci_queue_idx);
+ } else if (queue_type == VTCRYPTO_CTRLQ) {
+ snprintf(vq_name, sizeof(vq_name), "dev%d_controlqueue",
+ dev->data->dev_id);
+ snprintf(mpool_name, sizeof(mpool_name),
+ "dev%d_controlqueue_mpool",
+ dev->data->dev_id);
+ }
+ size = RTE_ALIGN_CEIL(sizeof(*vq) +
+ vq_size * sizeof(struct vq_desc_extra),
+ RTE_CACHE_LINE_SIZE);
+ vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("Can not allocate virtqueue");
+ return -ENOMEM;
+ }
+
+ if (queue_type == VTCRYPTO_DATAQ) {
+ /* pre-allocate a mempool and use it in the data plane to
+ * improve performance
+ */
+ vq->mpool = rte_mempool_lookup(mpool_name);
+ if (vq->mpool == NULL)
+ vq->mpool = rte_mempool_create(mpool_name,
+ vq_size,
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE, 0,
+ NULL, NULL, NULL, NULL, socket_id,
+ 0);
+ if (!vq->mpool) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Virtio Crypto PMD "
+ "Cannot create mempool");
+ goto mpool_create_err;
+ }
+ for (i = 0; i < vq_size; i++) {
+ vq->vq_descx[i].cookie =
+ rte_zmalloc("crypto PMD op cookie pointer",
+ sizeof(struct virtio_crypto_op_cookie),
+ RTE_CACHE_LINE_SIZE);
+ if (vq->vq_descx[i].cookie == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("Failed to "
+ "alloc mem for cookie");
+ goto cookie_alloc_err;
+ }
+ }
+ }
+
+ vq->hw = hw;
+ vq->dev_id = dev->data->dev_id;
+ vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_nentries = vq_size;
+
+ /*
+ * Using part of the vring entries is permitted, but the maximum
+ * is vq_size
+ */
+ if (nb_desc == 0 || nb_desc > vq_size)
+ nb_desc = vq_size;
+ vq->vq_free_cnt = nb_desc;
+
+ /*
+ * Reserve a memzone for vring elements
+ */
+ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("%s vring_size: %d, rounded_vring_size: %d",
+ (queue_type == VTCRYPTO_DATAQ) ? "dataq" : "ctrlq",
+ size, vq->vq_ring_size);
+
+ mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
+ socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
+ if (mz == NULL) {
+ if (rte_errno == EEXIST)
+ mz = rte_memzone_lookup(vq_name);
+ if (mz == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("not enough memory");
+ goto mz_reserve_err;
+ }
+ }
+
+ /*
+ * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((mz->phys_addr + vq->vq_ring_size - 1)
+ >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be "
+ "above 16TB!");
+ goto vring_addr_err;
+ }
+
+ memset(mz->addr, 0, sizeof(mz->len));
+ vq->mz = mz;
+ vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_virt_mem = mz->addr;
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_mem(physical): 0x%"PRIx64,
+ (uint64_t)mz->phys_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_ring_virt_mem: 0x%"PRIx64,
+ (uint64_t)(uintptr_t)mz->addr);
+
+ *pvq = vq;
+
+ return 0;
+
+vring_addr_err:
+ rte_memzone_free(mz);
+mz_reserve_err:
+cookie_alloc_err:
+ rte_mempool_free(vq->mpool);
+ if (i != 0) {
+ for (j = 0; j < i; j++)
+ rte_free(vq->vq_descx[j].cookie);
+ }
+mpool_create_err:
+ rte_free(vq);
+ return -ENOMEM;
+}
+
+static int
+virtio_crypto_ctrlq_setup(struct rte_cryptodev *dev, uint16_t queue_idx)
+{
+ int ret;
+ struct virtqueue *vq;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ /* if virtio device has started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_CTRLQ, queue_idx,
+ 0, SOCKET_ID_ANY, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control vq initialization failed");
+ return ret;
+ }
+
+ hw->cvq = vq;
+
+ return 0;
+}
+
+static void
+virtio_crypto_free_queues(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* control queue release */
+ virtio_crypto_queue_release(hw->cvq);
+
+ /* data queue release */
+ for (i = 0; i < hw->max_dataqueues; i++)
+ virtio_crypto_queue_release(dev->data->queue_pairs[i]);
+}
+
+static int
+virtio_crypto_dev_close(struct rte_cryptodev *dev __rte_unused)
+{
+ return 0;
+}
+
+/*
+ * dev_ops for virtio, bare necessities for basic operation
+ */
+static struct rte_cryptodev_ops virtio_crypto_dev_ops = {
+ /* Device related operations */
+ .dev_configure = virtio_crypto_dev_configure,
+ .dev_start = virtio_crypto_dev_start,
+ .dev_stop = virtio_crypto_dev_stop,
+ .dev_close = virtio_crypto_dev_close,
+ .dev_infos_get = virtio_crypto_dev_info_get,
+
+ .stats_get = virtio_crypto_dev_stats_get,
+ .stats_reset = virtio_crypto_dev_stats_reset,
+
+ .queue_pair_setup = virtio_crypto_qp_setup,
+ .queue_pair_release = virtio_crypto_qp_release,
+ .queue_pair_start = NULL,
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+ /* Crypto related operations */
+ .session_get_size = virtio_crypto_sym_get_session_private_size,
+ .session_configure = virtio_crypto_sym_configure_session,
+ .session_clear = virtio_crypto_sym_clear_session,
+ .qp_attach_session = NULL,
+ .qp_detach_session = NULL
+};
+
+static void
+virtio_crypto_update_stats(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (stats == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("invalid pointer");
+ return;
+ }
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ const struct virtqueue *data_queue
+ = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ stats->enqueued_count += data_queue->packets_sent_total;
+ stats->enqueue_err_count += data_queue->packets_sent_failed;
+
+ stats->dequeued_count += data_queue->packets_received_total;
+ stats->dequeue_err_count
+ += data_queue->packets_received_failed;
+ }
+}
+
+static void
+virtio_crypto_dev_stats_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_stats *stats)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ virtio_crypto_update_stats(dev, stats);
+}
+
+static void
+virtio_crypto_dev_stats_reset(struct rte_cryptodev *dev)
+{
+ unsigned int i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ struct virtqueue *data_queue = dev->data->queue_pairs[i];
+ if (data_queue == NULL)
+ continue;
+
+ data_queue->packets_sent_total = 0;
+ data_queue->packets_sent_failed = 0;
+
+ data_queue->packets_received_total = 0;
+ data_queue->packets_received_failed = 0;
+ }
+}
+
+static int
+virtio_crypto_qp_setup(struct rte_cryptodev *dev, uint16_t queue_pair_id,
+ const struct rte_cryptodev_qp_conf *qp_conf,
+ int socket_id,
+ struct rte_mempool *session_pool __rte_unused)
+{
+ int ret;
+ struct virtqueue *vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* if virtio dev is started, do not touch the virtqueues */
+ if (dev->data->dev_started)
+ return 0;
+
+ ret = virtio_crypto_queue_setup(dev, VTCRYPTO_DATAQ, queue_pair_id,
+ qp_conf->nb_descriptors, socket_id, &vq);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "virtio crypto data queue initialization failed\n");
+ return ret;
+ }
+
+ dev->data->queue_pairs[queue_pair_id] = vq;
+
+ return 0;
+}
+
+static int
+virtio_crypto_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
+{
+ struct virtqueue *vq
+ = (struct virtqueue *)dev->data->queue_pairs[queue_pair_id];
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (vq == NULL) {
+ VIRTIO_CRYPTO_DRV_LOG_DBG("vq already freed");
+ return 0;
+ }
+
+ virtio_crypto_queue_release(vq);
+ return 0;
+}
+
+static int
+virtio_negotiate_features(struct virtio_crypto_hw *hw, uint64_t req_features)
+{
+ uint64_t host_features;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Prepare guest_features: feature that driver wants to support */
+ VIRTIO_CRYPTO_INIT_LOG_DBG("guest_features before negotiate = %" PRIx64,
+ req_features);
+
+ /* Read device(host) feature bits */
+ host_features = VTPCI_OPS(hw)->get_features(hw);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("host_features before negotiate = %" PRIx64,
+ host_features);
+
+ /*
+ * Negotiate features: Subset of device feature bits are written back
+ * guest feature bits.
+ */
+ hw->guest_features = req_features;
+ hw->guest_features = vtpci_cryptodev_negotiate_features(hw,
+ host_features);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("features after negotiate = %" PRIx64,
+ hw->guest_features);
+
+ if (hw->modern) {
+ if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+ vtpci_cryptodev_set_status(hw,
+ VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (!(vtpci_cryptodev_get_status(hw) &
+ VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("failed to set FEATURES_OK "
+ "status!");
+ return -1;
+ }
+ }
+
+ hw->req_guest_features = req_features;
+
+ return 0;
+}
+
+/* reset device and renegotiate features if needed */
+static int
+virtio_crypto_init_device(struct rte_cryptodev *cryptodev,
+ uint64_t req_features)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+ struct virtio_crypto_config local_config;
+ struct virtio_crypto_config *config = &local_config;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Reset the device although not necessary at startup */
+ vtpci_cryptodev_reset(hw);
+
+ /* Tell the host we've noticed this device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+
+ /* Tell the host we've known how to drive the device. */
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_negotiate_features(hw, req_features) < 0)
+ return -1;
+
+ /* Get status of the device */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, status),
+ &config->status, sizeof(config->status));
+ if (config->status != VIRTIO_CRYPTO_S_HW_READY) {
+ VIRTIO_CRYPTO_DRV_LOG_ERR("accelerator hardware is "
+ "not ready");
+ return -1;
+ }
+
+ /* Get number of data queues */
+ vtpci_read_cryptodev_config(hw,
+ offsetof(struct virtio_crypto_config, max_dataqueues),
+ &config->max_dataqueues,
+ sizeof(config->max_dataqueues));
+ hw->max_dataqueues = config->max_dataqueues;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("hw->max_dataqueues=%d",
+ hw->max_dataqueues);
+
+ return 0;
+}
+
+/*
+ * This function is based on probe() function
+ * It returns 0 on success.
+ */
+static int
+crypto_virtio_create(const char *name, struct rte_pci_device *pci_dev,
+ struct rte_cryptodev_pmd_init_params *init_params)
+{
+ struct rte_cryptodev *cryptodev;
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ cryptodev = rte_cryptodev_pmd_create(name, &pci_dev->device,
+ init_params);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ cryptodev->driver_id = cryptodev_virtio_driver_id;
+ cryptodev->dev_ops = &virtio_crypto_dev_ops;
+
+ cryptodev->enqueue_burst = virtio_crypto_pkt_tx_burst;
+ cryptodev->dequeue_burst = virtio_crypto_pkt_rx_burst;
+
+ cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
+ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
+
+ hw = cryptodev->data->dev_private;
+ hw->dev_id = cryptodev->data->dev_id;
+ hw->virtio_dev_capabilities = virtio_capabilities;
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("dev %d vendorID=0x%x deviceID=0x%x",
+ cryptodev->data->dev_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ /* pci device init */
+ if (vtpci_cryptodev_init(pci_dev, hw))
+ return -1;
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_uninit(struct rte_cryptodev *cryptodev)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ return -EPERM;
+
+ if (cryptodev->data->dev_started) {
+ virtio_crypto_dev_stop(cryptodev);
+ virtio_crypto_dev_close(cryptodev);
+ }
+
+ cryptodev->dev_ops = NULL;
+ cryptodev->enqueue_burst = NULL;
+ cryptodev->dequeue_burst = NULL;
+
+ /* release control queue */
+ virtio_crypto_queue_release(hw->cvq);
+
+ rte_free(cryptodev->data);
+ cryptodev->data = NULL;
+
+ VIRTIO_CRYPTO_DRV_LOG_INFO("dev_uninit completed");
+
+ return 0;
+}
+
+static int
+virtio_crypto_dev_configure(struct rte_cryptodev *cryptodev,
+ struct rte_cryptodev_config *config __rte_unused)
+{
+ struct virtio_crypto_hw *hw = cryptodev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_init_device(cryptodev,
+ VIRTIO_CRYPTO_PMD_GUEST_FEATURES) < 0)
+ return -1;
+
+ /* setup control queue
+ * [0, 1, ... ,(config->max_dataqueues - 1)] are data queues
+ * config->max_dataqueues is the control queue
+ */
+ if (virtio_crypto_ctrlq_setup(cryptodev, hw->max_dataqueues) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("control queue setup error");
+ return -1;
+ }
+ virtio_crypto_ctrlq_start(cryptodev);
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_stop(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+ VIRTIO_CRYPTO_DRV_LOG_DBG("virtio_dev_stop");
+
+ vtpci_cryptodev_reset(hw);
+
+ virtio_crypto_dev_free_mbufs(dev);
+ virtio_crypto_free_queues(dev);
+
+ dev->data->dev_started = 0;
+}
+
+static int
+virtio_crypto_dev_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (dev->data->dev_started)
+ return 0;
+
+ /* Do final configuration before queue engine starts */
+ virtio_crypto_dataq_start(dev);
+ vtpci_cryptodev_reinit_complete(hw);
+
+ dev->data->dev_started = 1;
+
+ return 0;
+}
+
+static void
+virtio_crypto_dev_free_mbufs(struct rte_cryptodev *dev)
+{
+ uint32_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("Before freeing dataq[%d] used "
+ "and unused buf", i);
+ VIRTQUEUE_DUMP((struct virtqueue *)
+ dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue_pairs[%d]=%p",
+ i, dev->data->queue_pairs[i]);
+
+ virtqueue_detatch_unused(dev->data->queue_pairs[i]);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("After freeing dataq[%d] used and "
+ "unused buf", i);
+ VIRTQUEUE_DUMP(
+ (struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+static unsigned int
+virtio_crypto_sym_get_session_private_size(
+ struct rte_cryptodev *dev __rte_unused)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ return RTE_ALIGN_CEIL(sizeof(struct virtio_crypto_session), 16);
+}
+
+static int
+virtio_crypto_check_sym_session_paras(
+ struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(dev == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev is NULL");
+ return -1;
+ }
+ if (unlikely(dev->data == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("dev->data is NULL");
+ return -1;
+ }
+ hw = dev->data->dev_private;
+ if (unlikely(hw == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("hw is NULL");
+ return -1;
+ }
+ if (unlikely(hw->cvq == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("vq is NULL");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_clear_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (sess == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("sym_session is NULL");
+ return -1;
+ }
+
+ return virtio_crypto_check_sym_session_paras(dev);
+}
+
+#define NUM_ENTRY_SYM_CLEAR_SESSION 2
+
+static void
+virtio_crypto_sym_clear_session(
+ struct rte_cryptodev *dev,
+ struct rte_cryptodev_sym_session *sess)
+{
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *vq;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl;
+ struct vring_desc *desc;
+ uint8_t *status;
+ uint8_t needed = 1;
+ uint32_t head;
+ uint8_t *malloc_virt_addr;
+ uint64_t malloc_phys_addr;
+ uint8_t len_inhdr = sizeof(struct virtio_crypto_inhdr);
+ uint32_t len_op_ctrl_req = sizeof(struct virtio_crypto_op_ctrl_req);
+ uint32_t desc_offset = len_op_ctrl_req + len_inhdr;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (virtio_crypto_check_sym_clear_session_paras(dev, sess) < 0)
+ return;
+
+ hw = dev->data->dev_private;
+ vq = hw->cvq;
+ session = (struct virtio_crypto_session *)get_session_private_data(
+ sess, cryptodev_virtio_driver_id);
+ if (session == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid session parameter");
+ return;
+ }
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("vq->vq_desc_head_idx = %d, "
+ "vq = %p", vq->vq_desc_head_idx, vq);
+
+ if (vq->vq_free_cnt < needed) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "vq->vq_free_cnt = %d is less than %d, "
+ "not enough", vq->vq_free_cnt, needed);
+ return;
+ }
+
+ /*
+ * malloc memory to store information of ctrl request op,
+ * returned status and desc vring
+ */
+ malloc_virt_addr = rte_malloc(NULL, len_op_ctrl_req + len_inhdr
+ + NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc), RTE_CACHE_LINE_SIZE);
+ if (malloc_virt_addr == NULL) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("not enough heap room");
+ return;
+ }
+ malloc_phys_addr = rte_malloc_virt2iova(malloc_virt_addr);
+
+ /* assign ctrl request op part */
+ ctrl = (struct virtio_crypto_op_ctrl_req *)malloc_virt_addr;
+ ctrl->header.opcode = VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION;
+ /* default data virtqueue is 0 */
+ ctrl->header.queue_id = 0;
+ ctrl->u.destroy_session.session_id = session->session_id;
+
+ /* status part */
+ status = &(((struct virtio_crypto_inhdr *)
+ ((uint8_t *)malloc_virt_addr + len_op_ctrl_req))->status);
+ *status = VIRTIO_CRYPTO_ERR;
+
+ /* indirect desc vring part */
+ desc = (struct vring_desc *)((uint8_t *)malloc_virt_addr
+ + desc_offset);
+
+ /* ctrl request part */
+ desc[0].addr = malloc_phys_addr;
+ desc[0].len = len_op_ctrl_req;
+ desc[0].flags = VRING_DESC_F_NEXT;
+ desc[0].next = 1;
+
+ /* status part */
+ desc[1].addr = malloc_phys_addr + len_op_ctrl_req;
+ desc[1].len = len_inhdr;
+ desc[1].flags = VRING_DESC_F_WRITE;
+
+ /* use only a single desc entry */
+ head = vq->vq_desc_head_idx;
+ vq->vq_ring.desc[head].flags = VRING_DESC_F_INDIRECT;
+ vq->vq_ring.desc[head].addr = malloc_phys_addr + desc_offset;
+ vq->vq_ring.desc[head].len
+ = NUM_ENTRY_SYM_CLEAR_SESSION
+ * sizeof(struct vring_desc);
+ vq->vq_free_cnt -= needed;
+
+ vq->vq_desc_head_idx = vq->vq_ring.desc[head].next;
+
+ vq_update_avail_ring(vq, head);
+ vq_update_avail_idx(vq);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_queue_index = %d",
+ vq->vq_queue_index);
+
+ virtqueue_notify(vq);
+
+ rte_rmb();
+ while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) {
+ rte_rmb();
+ usleep(100);
+ }
+
+ while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) {
+ uint32_t idx, desc_idx, used_idx;
+ struct vring_used_elem *uep;
+
+ used_idx = (uint32_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ idx = (uint32_t) uep->id;
+ desc_idx = idx;
+ while (vq->vq_ring.desc[desc_idx].flags
+ & VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_ring.desc[desc_idx].next;
+ vq->vq_free_cnt++;
+ }
+
+ vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_desc_head_idx = idx;
+ vq->vq_used_cons_idx++;
+ vq->vq_free_cnt++;
+ }
+
+ if (*status != VIRTIO_CRYPTO_OK) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Close session failed "
+ "status=%"PRIu32", session_id=%"PRIu64"",
+ *status, session->session_id);
+ rte_free(malloc_virt_addr);
+ return;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("vq->vq_free_cnt=%d\n"
+ "vq->vq_desc_head_idx=%d",
+ vq->vq_free_cnt, vq->vq_desc_head_idx);
+
+ VIRTIO_CRYPTO_SESSION_LOG_INFO("Close session %"PRIu64" successfully ",
+ session->session_id);
+
+ memset(sess, 0, sizeof(struct virtio_crypto_session));
+ rte_free(malloc_virt_addr);
+}
+
+static struct rte_crypto_cipher_xform *
+virtio_crypto_get_cipher_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return &xform->cipher;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+static struct rte_crypto_auth_xform *
+virtio_crypto_get_auth_xform(struct rte_crypto_sym_xform *xform)
+{
+ do {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return &xform->auth;
+
+ xform = xform->next;
+ } while (xform);
+
+ return NULL;
+}
+
+/** Get xform chain order */
+static int
+virtio_crypto_get_chain_order(struct rte_crypto_sym_xform *xform)
+{
+ if (xform == NULL)
+ return -1;
+
+ /* Cipher Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_CIPHER;
+
+ /* Authentication Only */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next == NULL)
+ return VIRTIO_CRYPTO_CMD_AUTH;
+
+ /* Authenticate then Cipher */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
+ return VIRTIO_CRYPTO_CMD_HASH_CIPHER;
+
+ /* Cipher then Authenticate */
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
+ return VIRTIO_CRYPTO_CMD_CIPHER_HASH;
+
+ return -1;
+}
+
+static int
+virtio_crypto_sym_pad_cipher_param(
+ struct virtio_crypto_cipher_session_para *para,
+ struct rte_crypto_cipher_xform *cipher_xform)
+{
+ switch (cipher_xform->algo) {
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ para->algo = VIRTIO_CRYPTO_CIPHER_AES_CBC;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Crypto: Unsupported "
+ "Cipher alg %u", cipher_xform->algo);
+ return -1;
+ }
+
+ para->keylen = cipher_xform->key.length;
+ switch (cipher_xform->op) {
+ case RTE_CRYPTO_CIPHER_OP_ENCRYPT:
+ para->op = VIRTIO_CRYPTO_OP_ENCRYPT;
+ break;
+ case RTE_CRYPTO_CIPHER_OP_DECRYPT:
+ para->op = VIRTIO_CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported cipher operation "
+ "parameter");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_auth_param(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_auth_xform *auth_xform)
+{
+ uint32_t *algo;
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+
+ switch (ctrl->u.sym_create_session.u.chain.para.hash_mode) {
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN:
+ algo = &(para->u.hash_param.algo);
+ break;
+ case VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH:
+ algo = &(para->u.mac_param.algo);
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Unsupported hash mode %u "
+ "specified",
+ ctrl->u.sym_create_session.u.chain.para.hash_mode);
+ return -1;
+ }
+
+ switch (auth_xform->algo) {
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
+ *algo = VIRTIO_CRYPTO_MAC_HMAC_SHA1;
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Crypto: Undefined Hash algo %u specified",
+ auth_xform->algo);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_pad_op_ctrl_req(
+ struct virtio_crypto_op_ctrl_req *ctrl,
+ struct rte_crypto_sym_xform *xform, bool is_chainned,
+ uint8_t **cipher_key_data, uint8_t **auth_key_data,
+ struct virtio_crypto_session *session)
+{
+ int ret;
+ struct rte_crypto_auth_xform *auth_xform = NULL;
+ struct rte_crypto_cipher_xform *cipher_xform = NULL;
+
+ /* Get cipher xform from crypto xform chain */
+ cipher_xform = virtio_crypto_get_cipher_xform(xform);
+ if (cipher_xform) {
+ if (is_chainned)
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.chain.para
+ .cipher_param, cipher_xform);
+ else
+ ret = virtio_crypto_sym_pad_cipher_param(
+ &ctrl->u.sym_create_session.u.cipher.para,
+ cipher_xform);
+
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "pad cipher parameter failed");
+ return -1;
+ }
+
+ *cipher_key_data = cipher_xform->key.data;
+
+ session->iv.offset = cipher_xform->iv.offset;
+ session->iv.length = cipher_xform->iv.length;
+ }
+
+ /* Get auth xform from crypto xform chain */
+ auth_xform = virtio_crypto_get_auth_xform(xform);
+ if (auth_xform) {
+ /* FIXME: support VIRTIO_CRYPTO_SYM_HASH_MODE_NESTED */
+ struct virtio_crypto_alg_chain_session_para *para =
+ &(ctrl->u.sym_create_session.u.chain.para);
+ if (auth_xform->key.length) {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH;
+ para->u.mac_param.auth_key_len =
+ (uint32_t)auth_xform->key.length;
+ para->u.mac_param.hash_result_len =
+ auth_xform->digest_length;
+
+ *auth_key_data = auth_xform->key.data;
+ } else {
+ para->hash_mode = VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN;
+ para->u.hash_param.hash_result_len =
+ auth_xform->digest_length;
+ }
+
+ ret = virtio_crypto_sym_pad_auth_param(ctrl, auth_xform);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("pad auth parameter "
+ "failed");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int
+virtio_crypto_check_sym_configure_session_paras(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sym_sess,
+ struct rte_mempool *mempool)
+{
+ if (unlikely(xform == NULL) || unlikely(sym_sess == NULL) ||
+ unlikely(mempool == NULL)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("NULL pointer");
+ return -1;
+ }
+
+ if (virtio_crypto_check_sym_session_paras(dev) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int
+virtio_crypto_sym_configure_session(
+ struct rte_cryptodev *dev,
+ struct rte_crypto_sym_xform *xform,
+ struct rte_cryptodev_sym_session *sess,
+ struct rte_mempool *mempool)
+{
+ int ret;
+ struct virtio_crypto_session crypto_sess;
+ void *session_private = &crypto_sess;
+ struct virtio_crypto_session *session;
+ struct virtio_crypto_op_ctrl_req *ctrl_req;
+ enum virtio_crypto_cmd_id cmd_id;
+ uint8_t *cipher_key_data = NULL;
+ uint8_t *auth_key_data = NULL;
+ struct virtio_crypto_hw *hw;
+ struct virtqueue *control_vq;
+
+ PMD_INIT_FUNC_TRACE();
+
+ ret = virtio_crypto_check_sym_configure_session_paras(dev, xform,
+ sess, mempool);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR("Invalid parameters");
+ return ret;
+ }
+
+ if (rte_mempool_get(mempool, &session_private)) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Couldn't get object from session mempool");
+ return -ENOMEM;
+ }
+
+ session = (struct virtio_crypto_session *)session_private;
+ memset(session, 0, sizeof(struct virtio_crypto_session));
+ ctrl_req = &session->ctrl;
+ ctrl_req->header.opcode = VIRTIO_CRYPTO_CIPHER_CREATE_SESSION;
+ /* FIXME: support multiqueue */
+ ctrl_req->header.queue_id = 0;
+
+ hw = dev->data->dev_private;
+ control_vq = hw->cvq;
+
+ cmd_id = virtio_crypto_get_chain_order(xform);
+ if (cmd_id == VIRTIO_CRYPTO_CMD_CIPHER_HASH)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_CIPHER_THEN_HASH;
+ if (cmd_id == VIRTIO_CRYPTO_CMD_HASH_CIPHER)
+ ctrl_req->u.sym_create_session.u.chain.para.alg_chain_order
+ = VIRTIO_CRYPTO_SYM_ALG_CHAIN_ORDER_HASH_THEN_CIPHER;
+
+ switch (cmd_id) {
+ case VIRTIO_CRYPTO_CMD_CIPHER_HASH:
+ case VIRTIO_CRYPTO_CMD_HASH_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req,
+ xform, true, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ case VIRTIO_CRYPTO_CMD_CIPHER:
+ ctrl_req->u.sym_create_session.op_type
+ = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+ ret = virtio_crypto_sym_pad_op_ctrl_req(ctrl_req, xform,
+ false, &cipher_key_data, &auth_key_data, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "padding sym op ctrl req failed");
+ goto error_out;
+ }
+ ret = virtio_crypto_send_command(control_vq, ctrl_req,
+ cipher_key_data, NULL, session);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "create session failed: %d", ret);
+ goto error_out;
+ }
+ break;
+ default:
+ VIRTIO_CRYPTO_SESSION_LOG_ERR(
+ "Unsupported operation chain order parameter");
+ goto error_out;
+ }
+
+ set_session_private_data(sess, dev->driver_id,
+ session_private);
+
+ return 0;
+
+error_out:
+ return -1;
+}
+
+static void
+virtio_crypto_dev_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *info)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (info != NULL) {
+ info->driver_id = cryptodev_virtio_driver_id;
+ info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+ info->feature_flags = dev->feature_flags;
+ info->max_nb_queue_pairs = hw->max_dataqueues;
+ info->sym.max_nb_sessions =
+ RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS;
+ info->capabilities = hw->virtio_dev_capabilities;
+ }
+}
+
+static int
+crypto_virtio_pci_probe(
+ struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ struct rte_cryptodev_pmd_init_params init_params = {
+ .name = "",
+ .socket_id = rte_socket_id(),
+ .private_data_size = sizeof(struct virtio_crypto_hw),
+ .max_nb_sessions = RTE_VIRTIO_CRYPTO_PMD_MAX_NB_SESSIONS
+ };
+ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ VIRTIO_CRYPTO_DRV_LOG_DBG("Found Crypto device at %02x:%02x.%x",
+ pci_dev->addr.bus,
+ pci_dev->addr.devid,
+ pci_dev->addr.function);
+
+ rte_pci_device_name(&pci_dev->addr, name, sizeof(name));
+
+ return crypto_virtio_create(name, pci_dev, &init_params);
+}
+
+static int
+crypto_virtio_pci_remove(
+ struct rte_pci_device *pci_dev __rte_unused)
+{
+ struct rte_cryptodev *cryptodev;
+ char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
+
+ if (pci_dev == NULL)
+ return -EINVAL;
+
+ rte_pci_device_name(&pci_dev->addr, cryptodev_name,
+ sizeof(cryptodev_name));
+
+ cryptodev = rte_cryptodev_pmd_get_named_dev(cryptodev_name);
+ if (cryptodev == NULL)
+ return -ENODEV;
+
+ return virtio_crypto_dev_uninit(cryptodev);
+}
+
+static struct rte_pci_driver rte_virtio_crypto_driver = {
+ .id_table = pci_id_virtio_crypto_map,
+ .drv_flags = 0,
+ .probe = crypto_virtio_pci_probe,
+ .remove = crypto_virtio_pci_remove
+};
+
+static struct cryptodev_driver virtio_crypto_drv;
+
+RTE_PMD_REGISTER_PCI(CRYPTODEV_NAME_VIRTIO_PMD, rte_virtio_crypto_driver);
+RTE_PMD_REGISTER_CRYPTO_DRIVER(virtio_crypto_drv,
+ rte_virtio_crypto_driver.driver,
+ cryptodev_virtio_driver_id);
+
+RTE_INIT(virtio_crypto_init_log);
+static void
+virtio_crypto_init_log(void)
+{
+ virtio_crypto_logtype_init = rte_log_register("pmd.crypto.virtio.init");
+ if (virtio_crypto_logtype_init >= 0)
+ rte_log_set_level(virtio_crypto_logtype_init, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_session =
+ rte_log_register("pmd.crypto.virtio.session");
+ if (virtio_crypto_logtype_session >= 0)
+ rte_log_set_level(virtio_crypto_logtype_session,
+ RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_rx = rte_log_register("pmd.crypto.virtio.rx");
+ if (virtio_crypto_logtype_rx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_rx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_tx = rte_log_register("pmd.crypto.virtio.tx");
+ if (virtio_crypto_logtype_tx >= 0)
+ rte_log_set_level(virtio_crypto_logtype_tx, RTE_LOG_NOTICE);
+
+ virtio_crypto_logtype_driver =
+ rte_log_register("pmd.crypto.virtio.driver");
+ if (virtio_crypto_logtype_driver >= 0)
+ rte_log_set_level(virtio_crypto_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/crypto/virtio/virtio_cryptodev.h b/drivers/crypto/virtio/virtio_cryptodev.h
new file mode 100644
index 00000000..e402c030
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_cryptodev.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_CRYPTODEV_H_
+#define _VIRTIO_CRYPTODEV_H_
+
+#include "virtio_crypto.h"
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+
+/* Features desired/implemented by this driver. */
+#define VIRTIO_CRYPTO_PMD_GUEST_FEATURES (1ULL << VIRTIO_F_VERSION_1)
+
+#define CRYPTODEV_NAME_VIRTIO_PMD crypto_virtio
+
+#define NUM_ENTRY_VIRTIO_CRYPTO_OP 7
+
+extern uint8_t cryptodev_virtio_driver_id;
+
+enum virtio_crypto_cmd_id {
+ VIRTIO_CRYPTO_CMD_CIPHER = 0,
+ VIRTIO_CRYPTO_CMD_AUTH = 1,
+ VIRTIO_CRYPTO_CMD_CIPHER_HASH = 2,
+ VIRTIO_CRYPTO_CMD_HASH_CIPHER = 3
+};
+
+struct virtio_crypto_op_cookie {
+ struct virtio_crypto_op_data_req data_req;
+ struct virtio_crypto_inhdr inhdr;
+ struct vring_desc desc[NUM_ENTRY_VIRTIO_CRYPTO_OP];
+};
+
+/*
+ * Control queue function prototype
+ */
+void virtio_crypto_ctrlq_start(struct rte_cryptodev *dev);
+
+/*
+ * Data queue function prototype
+ */
+void virtio_crypto_dataq_start(struct rte_cryptodev *dev);
+
+int virtio_crypto_queue_setup(struct rte_cryptodev *dev,
+ int queue_type,
+ uint16_t vtpci_queue_idx,
+ uint16_t nb_desc,
+ int socket_id,
+ struct virtqueue **pvq);
+
+void virtio_crypto_queue_release(struct virtqueue *vq);
+
+uint16_t virtio_crypto_pkt_tx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+uint16_t virtio_crypto_pkt_rx_burst(void *tx_queue,
+ struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts);
+
+#endif /* _VIRTIO_CRYPTODEV_H_ */
diff --git a/drivers/crypto/virtio/virtio_logs.h b/drivers/crypto/virtio/virtio_logs.h
new file mode 100644
index 00000000..26a286cf
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_logs.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_LOGS_H_
+#define _VIRTIO_LOGS_H_
+
+#include <rte_log.h>
+
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \
+ "PMD: %s(): " fmt "\n", __func__, ##args)
+
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+
+extern int virtio_crypto_logtype_init;
+
+#define VIRTIO_CRYPTO_INIT_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_init, \
+ "INIT: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_INIT_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_INIT_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_session;
+
+#define VIRTIO_CRYPTO_SESSION_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_session, \
+ "SESSION: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_SESSION_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_SESSION_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_rx;
+
+#define VIRTIO_CRYPTO_RX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_rx, \
+ "RX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_RX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_RX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_RX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_tx;
+
+#define VIRTIO_CRYPTO_TX_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_tx, \
+ "TX: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_TX_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_TX_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_TX_LOG_IMPL(ERR, fmt, ## args)
+
+extern int virtio_crypto_logtype_driver;
+
+#define VIRTIO_CRYPTO_DRV_LOG_IMPL(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, virtio_crypto_logtype_driver, \
+ "DRIVER: %s(): " fmt "\n", __func__, ##args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_INFO(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(INFO, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_DBG(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(DEBUG, fmt, ## args)
+
+#define VIRTIO_CRYPTO_DRV_LOG_ERR(fmt, args...) \
+ VIRTIO_CRYPTO_DRV_LOG_IMPL(ERR, fmt, ## args)
+
+#endif /* _VIRTIO_LOGS_H_ */
diff --git a/drivers/crypto/virtio/virtio_pci.c b/drivers/crypto/virtio/virtio_pci.c
new file mode 100644
index 00000000..832c465b
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.c
@@ -0,0 +1,462 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#ifdef RTE_EXEC_ENV_LINUXAPP
+ #include <dirent.h>
+ #include <fcntl.h>
+#endif
+
+#include <rte_io.h>
+#include <rte_bus.h>
+
+#include "virtio_pci.h"
+#include "virtqueue.h"
+
+/*
+ * Following macros are derived from linux/pci_regs.h, however,
+ * we can't simply include that header here, as there is no such
+ * file for non-Linux platform.
+ */
+#define PCI_CAPABILITY_LIST 0x34
+#define PCI_CAP_ID_VNDR 0x09
+#define PCI_CAP_ID_MSIX 0x11
+
+/*
+ * The remaining space is defined by each driver as the per-driver
+ * configuration space.
+ */
+#define VIRTIO_PCI_CONFIG(hw) \
+ (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
+
+struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+static inline int
+check_vq_phys_addr_ok(struct virtqueue *vq)
+{
+ /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
+ * and only accepts 32 bit page frame number.
+ * Check if the allocated physical memory exceeds 16TB.
+ */
+ if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
+ (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("vring address shouldn't be above 16TB!");
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline void
+io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
+{
+ rte_write32(val & ((1ULL << 32) - 1), lo);
+ rte_write32(val >> 32, hi);
+}
+
+static void
+modern_read_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ int i;
+ uint8_t *p;
+ uint8_t old_gen, new_gen;
+
+ do {
+ old_gen = rte_read8(&hw->common_cfg->config_generation);
+
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
+
+ new_gen = rte_read8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+static void
+modern_write_dev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ int i;
+ const uint8_t *p = src;
+
+ for (i = 0; i < length; i++)
+ rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
+}
+
+static uint64_t
+modern_get_features(struct virtio_crypto_hw *hw)
+{
+ uint32_t features_lo, features_hi;
+
+ rte_write32(0, &hw->common_cfg->device_feature_select);
+ features_lo = rte_read32(&hw->common_cfg->device_feature);
+
+ rte_write32(1, &hw->common_cfg->device_feature_select);
+ features_hi = rte_read32(&hw->common_cfg->device_feature);
+
+ return ((uint64_t)features_hi << 32) | features_lo;
+}
+
+static void
+modern_set_features(struct virtio_crypto_hw *hw, uint64_t features)
+{
+ rte_write32(0, &hw->common_cfg->guest_feature_select);
+ rte_write32(features & ((1ULL << 32) - 1),
+ &hw->common_cfg->guest_feature);
+
+ rte_write32(1, &hw->common_cfg->guest_feature_select);
+ rte_write32(features >> 32,
+ &hw->common_cfg->guest_feature);
+}
+
+static uint8_t
+modern_get_status(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(&hw->common_cfg->device_status);
+}
+
+static void
+modern_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ rte_write8(status, &hw->common_cfg->device_status);
+}
+
+static void
+modern_reset(struct virtio_crypto_hw *hw)
+{
+ modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ modern_get_status(hw);
+}
+
+static uint8_t
+modern_get_isr(struct virtio_crypto_hw *hw)
+{
+ return rte_read8(hw->isr);
+}
+
+static uint16_t
+modern_set_config_irq(struct virtio_crypto_hw *hw, uint16_t vec)
+{
+ rte_write16(vec, &hw->common_cfg->msix_config);
+ return rte_read16(&hw->common_cfg->msix_config);
+}
+
+static uint16_t
+modern_set_queue_irq(struct virtio_crypto_hw *hw, struct virtqueue *vq,
+ uint16_t vec)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+ rte_write16(vec, &hw->common_cfg->queue_msix_vector);
+ return rte_read16(&hw->common_cfg->queue_msix_vector);
+}
+
+static uint16_t
+modern_get_queue_num(struct virtio_crypto_hw *hw, uint16_t queue_id)
+{
+ rte_write16(queue_id, &hw->common_cfg->queue_select);
+ return rte_read16(&hw->common_cfg->queue_size);
+}
+
+static int
+modern_setup_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ uint64_t desc_addr, avail_addr, used_addr;
+ uint16_t notify_off;
+
+ if (!check_vq_phys_addr_ok(vq))
+ return -1;
+
+ desc_addr = vq->vq_ring_mem;
+ avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
+ ring[vq->vq_nentries]),
+ VIRTIO_PCI_VRING_ALIGN);
+
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
+ vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
+ notify_off * hw->notify_off_multiplier);
+
+ rte_write16(1, &hw->common_cfg->queue_enable);
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("queue %u addresses:", vq->vq_queue_index);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t desc_addr: %" PRIx64, desc_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t aval_addr: %" PRIx64, avail_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t used_addr: %" PRIx64, used_addr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("\t notify addr: %p (notify offset: %u)",
+ vq->notify_addr, notify_off);
+
+ return 0;
+}
+
+static void
+modern_del_queue(struct virtio_crypto_hw *hw, struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
+
+ io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
+ &hw->common_cfg->queue_desc_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
+ &hw->common_cfg->queue_avail_hi);
+ io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
+ &hw->common_cfg->queue_used_hi);
+
+ rte_write16(0, &hw->common_cfg->queue_enable);
+}
+
+static void
+modern_notify_queue(struct virtio_crypto_hw *hw __rte_unused,
+ struct virtqueue *vq)
+{
+ rte_write16(vq->vq_queue_index, vq->notify_addr);
+}
+
+const struct virtio_pci_ops virtio_crypto_modern_ops = {
+ .read_dev_cfg = modern_read_dev_config,
+ .write_dev_cfg = modern_write_dev_config,
+ .reset = modern_reset,
+ .get_status = modern_get_status,
+ .set_status = modern_set_status,
+ .get_features = modern_get_features,
+ .set_features = modern_set_features,
+ .get_isr = modern_get_isr,
+ .set_config_irq = modern_set_config_irq,
+ .set_queue_irq = modern_set_queue_irq,
+ .get_queue_num = modern_get_queue_num,
+ .setup_queue = modern_setup_queue,
+ .del_queue = modern_del_queue,
+ .notify_queue = modern_notify_queue,
+};
+
+void
+vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length)
+{
+ VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
+}
+
+void
+vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length)
+{
+ VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
+}
+
+uint64_t
+vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features)
+{
+ uint64_t features;
+
+ /*
+ * Limit negotiated features to what the driver, virtqueue, and
+ * host all support.
+ */
+ features = host_features & hw->guest_features;
+ VTPCI_OPS(hw)->set_features(hw, features);
+
+ return features;
+}
+
+void
+vtpci_cryptodev_reset(struct virtio_crypto_hw *hw)
+{
+ VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
+ /* flush status write */
+ VTPCI_OPS(hw)->get_status(hw);
+}
+
+void
+vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw)
+{
+ vtpci_cryptodev_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
+}
+
+void
+vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status)
+{
+ if (status != VIRTIO_CONFIG_STATUS_RESET)
+ status |= VTPCI_OPS(hw)->get_status(hw);
+
+ VTPCI_OPS(hw)->set_status(hw, status);
+}
+
+uint8_t
+vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_status(hw);
+}
+
+uint8_t
+vtpci_cryptodev_isr(struct virtio_crypto_hw *hw)
+{
+ return VTPCI_OPS(hw)->get_isr(hw);
+}
+
+static void *
+get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
+{
+ uint8_t bar = cap->bar;
+ uint32_t length = cap->length;
+ uint32_t offset = cap->offset;
+ uint8_t *base;
+
+ if (bar >= PCI_MAX_RESOURCE) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("invalid bar: %u", bar);
+ return NULL;
+ }
+
+ if (offset + length < offset) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("offset(%u) + length(%u) overflows",
+ offset, length);
+ return NULL;
+ }
+
+ if (offset + length > dev->mem_resource[bar].len) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "invalid cap: overflows bar space: %u > %" PRIu64,
+ offset + length, dev->mem_resource[bar].len);
+ return NULL;
+ }
+
+ base = dev->mem_resource[bar].addr;
+ if (base == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("bar %u base addr is NULL", bar);
+ return NULL;
+ }
+
+ return base + offset;
+}
+
+#define PCI_MSIX_ENABLE 0x8000
+
+static int
+virtio_read_caps(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ uint8_t pos;
+ struct virtio_pci_cap cap;
+ int ret;
+
+ if (rte_pci_map_device(dev)) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to map pci device!");
+ return -1;
+ }
+
+ ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG("failed to read pci capability list");
+ return -1;
+ }
+
+ while (pos) {
+ ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos);
+ if (ret < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR(
+ "failed to read pci cap at pos: %x", pos);
+ break;
+ }
+
+ if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
+ /* Transitional devices would also have this capability,
+ * that's why we also check if msix is enabled.
+ * 1st byte is cap ID; 2nd byte is the position of next
+ * cap; next two bytes are the flags.
+ */
+ uint16_t flags = ((uint16_t *)&cap)[1];
+
+ if (flags & PCI_MSIX_ENABLE)
+ hw->use_msix = VIRTIO_MSIX_ENABLED;
+ else
+ hw->use_msix = VIRTIO_MSIX_DISABLED;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] skipping non VNDR cap id: %02x",
+ pos, cap.cap_vndr);
+ goto next;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG(
+ "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
+ pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ rte_pci_read_config(dev, &hw->notify_off_multiplier,
+ 4, pos + sizeof(cap));
+ hw->notify_base = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->dev_cfg = get_cfg_addr(dev, &cap);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cfg_addr(dev, &cap);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->dev_cfg == NULL || hw->isr == NULL) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("no modern virtio pci device found.");
+ return -1;
+ }
+
+ VIRTIO_CRYPTO_INIT_LOG_INFO("found modern virtio pci device.");
+
+ VIRTIO_CRYPTO_INIT_LOG_DBG("common cfg mapped at: %p", hw->common_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("device cfg mapped at: %p", hw->dev_cfg);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("isr cfg mapped at: %p", hw->isr);
+ VIRTIO_CRYPTO_INIT_LOG_DBG("notify base: %p, notify off multiplier: %u",
+ hw->notify_base, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+/*
+ * Return -1:
+ * if there is error mapping with VFIO/UIO.
+ * if port map error when driver type is KDRV_NONE.
+ * if whitelisted but driver type is KDRV_UNKNOWN.
+ * Return 1 if kernel driver is managing the device.
+ * Return 0 on success.
+ */
+int
+vtpci_cryptodev_init(struct rte_pci_device *dev, struct virtio_crypto_hw *hw)
+{
+ /*
+ * Try if we can succeed reading virtio pci caps, which exists
+ * only on modern pci device. If failed, we fallback to legacy
+ * virtio handling.
+ */
+ if (virtio_read_caps(dev, hw) == 0) {
+ VIRTIO_CRYPTO_INIT_LOG_INFO("modern virtio pci detected.");
+ virtio_hw_internal[hw->dev_id].vtpci_ops =
+ &virtio_crypto_modern_ops;
+ hw->modern = 1;
+ return 0;
+ }
+
+ /*
+ * virtio crypto conforms to virtio 1.0 and doesn't support
+ * legacy mode
+ */
+ return -1;
+}
diff --git a/drivers/crypto/virtio/virtio_pci.h b/drivers/crypto/virtio/virtio_pci.h
new file mode 100644
index 00000000..604ec366
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_pci.h
@@ -0,0 +1,253 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_PCI_H_
+#define _VIRTIO_PCI_H_
+
+#include <stdint.h>
+
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+#include <rte_cryptodev.h>
+
+#include "virtio_crypto.h"
+
+struct virtqueue;
+
+/* VirtIO PCI vendor/device ID. */
+#define VIRTIO_CRYPTO_PCI_VENDORID 0x1AF4
+#define VIRTIO_CRYPTO_PCI_DEVICEID 0x1054
+
+/* VirtIO ABI version, this must match exactly. */
+#define VIRTIO_PCI_ABI_VERSION 0
+
+/*
+ * VirtIO Header, located in BAR 0.
+ */
+#define VIRTIO_PCI_HOST_FEATURES 0 /* host's supported features (32bit, RO)*/
+#define VIRTIO_PCI_GUEST_FEATURES 4 /* guest's supported features (32, RW) */
+#define VIRTIO_PCI_QUEUE_PFN 8 /* physical address of VQ (32, RW) */
+#define VIRTIO_PCI_QUEUE_NUM 12 /* number of ring entries (16, RO) */
+#define VIRTIO_PCI_QUEUE_SEL 14 /* current VQ selection (16, RW) */
+#define VIRTIO_PCI_QUEUE_NOTIFY 16 /* notify host regarding VQ (16, RW) */
+#define VIRTIO_PCI_STATUS 18 /* device status register (8, RW) */
+#define VIRTIO_PCI_ISR 19 /* interrupt status register, reading
+ * also clears the register (8, RO)
+ */
+/* Only if MSIX is enabled: */
+
+/* configuration change vector (16, RW) */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* vector for selected VQ notifications */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+
+/* The bit of the ISR which indicates a device has an interrupt. */
+#define VIRTIO_PCI_ISR_INTR 0x1
+/* The bit of the ISR which indicates a device configuration change. */
+#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* Vector value used to disable MSI for queue. */
+#define VIRTIO_MSI_NO_VECTOR 0xFFFF
+
+/* Status byte for guest to report progress. */
+#define VIRTIO_CONFIG_STATUS_RESET 0x00
+#define VIRTIO_CONFIG_STATUS_ACK 0x01
+#define VIRTIO_CONFIG_STATUS_DRIVER 0x02
+#define VIRTIO_CONFIG_STATUS_DRIVER_OK 0x04
+#define VIRTIO_CONFIG_STATUS_FEATURES_OK 0x08
+#define VIRTIO_CONFIG_STATUS_FAILED 0x80
+
+/*
+ * Each virtqueue indirect descriptor list must be physically contiguous.
+ * To allow us to malloc(9) each list individually, limit the number
+ * supported to what will fit in one page. With 4KB pages, this is a limit
+ * of 256 descriptors. If there is ever a need for more, we can switch to
+ * contigmalloc(9) for the larger allocations, similar to what
+ * bus_dmamem_alloc(9) does.
+ *
+ * Note the sizeof(struct vring_desc) is 16 bytes.
+ */
+#define VIRTIO_MAX_INDIRECT ((int) (PAGE_SIZE / 16))
+
+/* Do we get callbacks when the ring is completely used, even if we've
+ * suppressed them?
+ */
+#define VIRTIO_F_NOTIFY_ON_EMPTY 24
+
+/* Can the device handle any descriptor layout? */
+#define VIRTIO_F_ANY_LAYOUT 27
+
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
+#define VIRTIO_F_VERSION_1 32
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+/* The Guest publishes the used index for which it expects an interrupt
+ * at the end of the avail ring. Host should ignore the avail->flags field.
+ */
+/* The Host publishes the avail index for which it expects a kick
+ * at the end of the used ring. Guest should ignore the used->flags field.
+ */
+#define VIRTIO_RING_F_EVENT_IDX 29
+
+/* Common configuration */
+#define VIRTIO_PCI_CAP_COMMON_CFG 1
+/* Notifications */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG 2
+/* ISR Status */
+#define VIRTIO_PCI_CAP_ISR_CFG 3
+/* Device specific configuration */
+#define VIRTIO_PCI_CAP_DEVICE_CFG 4
+/* PCI configuration access */
+#define VIRTIO_PCI_CAP_PCI_CFG 5
+
+/* This is the PCI capability header: */
+struct virtio_pci_cap {
+ uint8_t cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */
+ uint8_t cap_next; /* Generic PCI field: next ptr. */
+ uint8_t cap_len; /* Generic PCI field: capability length */
+ uint8_t cfg_type; /* Identifies the structure. */
+ uint8_t bar; /* Where to find it. */
+ uint8_t padding[3]; /* Pad to full dword. */
+ uint32_t offset; /* Offset within bar. */
+ uint32_t length; /* Length of the structure, in bytes. */
+};
+
+struct virtio_pci_notify_cap {
+ struct virtio_pci_cap cap;
+ uint32_t notify_off_multiplier; /* Multiplier for queue_notify_off. */
+};
+
+/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */
+struct virtio_pci_common_cfg {
+ /* About the whole device. */
+ uint32_t device_feature_select; /* read-write */
+ uint32_t device_feature; /* read-only */
+ uint32_t guest_feature_select; /* read-write */
+ uint32_t guest_feature; /* read-write */
+ uint16_t msix_config; /* read-write */
+ uint16_t num_queues; /* read-only */
+ uint8_t device_status; /* read-write */
+ uint8_t config_generation; /* read-only */
+
+ /* About a specific virtqueue. */
+ uint16_t queue_select; /* read-write */
+ uint16_t queue_size; /* read-write, power of 2. */
+ uint16_t queue_msix_vector; /* read-write */
+ uint16_t queue_enable; /* read-write */
+ uint16_t queue_notify_off; /* read-only */
+ uint32_t queue_desc_lo; /* read-write */
+ uint32_t queue_desc_hi; /* read-write */
+ uint32_t queue_avail_lo; /* read-write */
+ uint32_t queue_avail_hi; /* read-write */
+ uint32_t queue_used_lo; /* read-write */
+ uint32_t queue_used_hi; /* read-write */
+};
+
+struct virtio_crypto_hw;
+
+struct virtio_pci_ops {
+ void (*read_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int len);
+ void (*write_dev_cfg)(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int len);
+ void (*reset)(struct virtio_crypto_hw *hw);
+
+ uint8_t (*get_status)(struct virtio_crypto_hw *hw);
+ void (*set_status)(struct virtio_crypto_hw *hw, uint8_t status);
+
+ uint64_t (*get_features)(struct virtio_crypto_hw *hw);
+ void (*set_features)(struct virtio_crypto_hw *hw, uint64_t features);
+
+ uint8_t (*get_isr)(struct virtio_crypto_hw *hw);
+
+ uint16_t (*set_config_irq)(struct virtio_crypto_hw *hw, uint16_t vec);
+
+ uint16_t (*set_queue_irq)(struct virtio_crypto_hw *hw,
+ struct virtqueue *vq, uint16_t vec);
+
+ uint16_t (*get_queue_num)(struct virtio_crypto_hw *hw,
+ uint16_t queue_id);
+ int (*setup_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*del_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+ void (*notify_queue)(struct virtio_crypto_hw *hw, struct virtqueue *vq);
+};
+
+struct virtio_crypto_hw {
+ /* control queue */
+ struct virtqueue *cvq;
+ uint16_t dev_id;
+ uint16_t max_dataqueues;
+ uint64_t req_guest_features;
+ uint64_t guest_features;
+ uint8_t use_msix;
+ uint8_t modern;
+ uint32_t notify_off_multiplier;
+ uint8_t *isr;
+ uint16_t *notify_base;
+ struct virtio_pci_common_cfg *common_cfg;
+ struct virtio_crypto_config *dev_cfg;
+ const struct rte_cryptodev_capabilities *virtio_dev_capabilities;
+};
+
+/*
+ * While virtio_crypto_hw is stored in shared memory, this structure stores
+ * some infos that may vary in the multiple process model locally.
+ * For example, the vtpci_ops pointer.
+ */
+struct virtio_hw_internal {
+ const struct virtio_pci_ops *vtpci_ops;
+ struct rte_pci_ioport io;
+};
+
+#define VTPCI_OPS(hw) (virtio_hw_internal[(hw)->dev_id].vtpci_ops)
+#define VTPCI_IO(hw) (&virtio_hw_internal[(hw)->dev_id].io)
+
+extern struct virtio_hw_internal virtio_hw_internal[RTE_MAX_VIRTIO_CRYPTO];
+
+/*
+ * How many bits to shift physical queue address written to QUEUE_PFN.
+ * 12 is historical, and due to x86 page size.
+ */
+#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
+
+/* The alignment to use between consumer and producer parts of vring. */
+#define VIRTIO_PCI_VRING_ALIGN 4096
+
+enum virtio_msix_status {
+ VIRTIO_MSIX_NONE = 0,
+ VIRTIO_MSIX_DISABLED = 1,
+ VIRTIO_MSIX_ENABLED = 2
+};
+
+static inline int
+vtpci_with_feature(struct virtio_crypto_hw *hw, uint64_t bit)
+{
+ return (hw->guest_features & (1ULL << bit)) != 0;
+}
+
+/*
+ * Function declaration from virtio_pci.c
+ */
+int vtpci_cryptodev_init(struct rte_pci_device *dev,
+ struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_reset(struct virtio_crypto_hw *hw);
+
+void vtpci_cryptodev_reinit_complete(struct virtio_crypto_hw *hw);
+
+uint8_t vtpci_cryptodev_get_status(struct virtio_crypto_hw *hw);
+void vtpci_cryptodev_set_status(struct virtio_crypto_hw *hw, uint8_t status);
+
+uint64_t vtpci_cryptodev_negotiate_features(struct virtio_crypto_hw *hw,
+ uint64_t host_features);
+
+void vtpci_write_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ const void *src, int length);
+
+void vtpci_read_cryptodev_config(struct virtio_crypto_hw *hw, size_t offset,
+ void *dst, int length);
+
+uint8_t vtpci_cryptodev_isr(struct virtio_crypto_hw *hw);
+
+#endif /* _VIRTIO_PCI_H_ */
diff --git a/drivers/crypto/virtio/virtio_ring.h b/drivers/crypto/virtio/virtio_ring.h
new file mode 100644
index 00000000..ee306745
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_ring.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTIO_RING_H_
+#define _VIRTIO_RING_H_
+
+#include <stdint.h>
+
+#include <rte_common.h>
+
+/* This marks a buffer as continuing via the next field. */
+#define VRING_DESC_F_NEXT 1
+/* This marks a buffer as write-only (otherwise read-only). */
+#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
+
+/* The Host uses this in used->flags to advise the Guest: don't kick me
+ * when you add a buffer. It's unreliable, so it's simply an
+ * optimization. Guest will still kick if it's out of buffers.
+ */
+#define VRING_USED_F_NO_NOTIFY 1
+/* The Guest uses this in avail->flags to advise the Host: don't
+ * interrupt me when you consume a buffer. It's unreliable, so it's
+ * simply an optimization.
+ */
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+/* VirtIO ring descriptors: 16 bytes.
+ * These can chain together via "next".
+ */
+struct vring_desc {
+ uint64_t addr; /* Address (guest-physical). */
+ uint32_t len; /* Length. */
+ uint16_t flags; /* The flags as indicated above. */
+ uint16_t next; /* We chain unused descriptors via this. */
+};
+
+struct vring_avail {
+ uint16_t flags;
+ uint16_t idx;
+ uint16_t ring[0];
+};
+
+/* id is a 16bit index. uint32_t is used here for ids for padding reasons. */
+struct vring_used_elem {
+ /* Index of start of used descriptor chain. */
+ uint32_t id;
+ /* Total length of the descriptor chain which was written to. */
+ uint32_t len;
+};
+
+struct vring_used {
+ uint16_t flags;
+ volatile uint16_t idx;
+ struct vring_used_elem ring[0];
+};
+
+struct vring {
+ unsigned int num;
+ struct vring_desc *desc;
+ struct vring_avail *avail;
+ struct vring_used *used;
+};
+
+/* The standard layout for the ring is a continuous chunk of memory which
+ * looks like this. We assume num is a power of 2.
+ *
+ * struct vring {
+ * // The actual descriptors (16 bytes each)
+ * struct vring_desc desc[num];
+ *
+ * // A ring of available descriptor heads with free-running index.
+ * __u16 avail_flags;
+ * __u16 avail_idx;
+ * __u16 available[num];
+ * __u16 used_event_idx;
+ *
+ * // Padding to the next align boundary.
+ * char pad[];
+ *
+ * // A ring of used descriptor heads with free-running index.
+ * __u16 used_flags;
+ * __u16 used_idx;
+ * struct vring_used_elem used[num];
+ * __u16 avail_event_idx;
+ * };
+ *
+ * NOTE: for VirtIO PCI, align is 4096.
+ */
+
+/*
+ * We publish the used event index at the end of the available ring, and vice
+ * versa. They are at the end for backwards compatibility.
+ */
+#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
+#define vring_avail_event(vr) (*(uint16_t *)&(vr)->used->ring[(vr)->num])
+
+static inline size_t
+vring_size(unsigned int num, unsigned long align)
+{
+ size_t size;
+
+ size = num * sizeof(struct vring_desc);
+ size += sizeof(struct vring_avail) + (num * sizeof(uint16_t));
+ size = RTE_ALIGN_CEIL(size, align);
+ size += sizeof(struct vring_used) +
+ (num * sizeof(struct vring_used_elem));
+ return size;
+}
+
+static inline void
+vring_init(struct vring *vr, unsigned int num, uint8_t *p,
+ unsigned long align)
+{
+ vr->num = num;
+ vr->desc = (struct vring_desc *) p;
+ vr->avail = (struct vring_avail *) (p +
+ num * sizeof(struct vring_desc));
+ vr->used = (void *)
+ RTE_ALIGN_CEIL((uintptr_t)(&vr->avail->ring[num]), align);
+}
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static inline int
+vring_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+#endif /* _VIRTIO_RING_H_ */
diff --git a/drivers/crypto/virtio/virtio_rxtx.c b/drivers/crypto/virtio/virtio_rxtx.c
new file mode 100644
index 00000000..45039284
--- /dev/null
+++ b/drivers/crypto/virtio/virtio_rxtx.c
@@ -0,0 +1,515 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+#include <rte_cryptodev_pmd.h>
+
+#include "virtqueue.h"
+#include "virtio_cryptodev.h"
+#include "virtio_crypto_algs.h"
+
+static void
+vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
+{
+ struct vring_desc *dp, *dp_tail;
+ struct vq_desc_extra *dxp;
+ uint16_t desc_idx_last = desc_idx;
+
+ dp = &vq->vq_ring.desc[desc_idx];
+ dxp = &vq->vq_descx[desc_idx];
+ vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
+ if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
+ while (dp->flags & VRING_DESC_F_NEXT) {
+ desc_idx_last = dp->next;
+ dp = &vq->vq_ring.desc[dp->next];
+ }
+ }
+ dxp->ndescs = 0;
+
+ /*
+ * We must append the existing free chain, if any, to the end of
+ * newly freed chain. If the virtqueue was completely used, then
+ * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
+ */
+ if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
+ vq->vq_desc_head_idx = desc_idx;
+ } else {
+ dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+ dp_tail->next = desc_idx;
+ }
+
+ vq->vq_desc_tail_idx = desc_idx_last;
+ dp->next = VQ_RING_DESC_CHAIN_END;
+}
+
+static uint16_t
+virtqueue_dequeue_burst_rx(struct virtqueue *vq,
+ struct rte_crypto_op **rx_pkts, uint16_t num)
+{
+ struct vring_used_elem *uep;
+ struct rte_crypto_op *cop;
+ uint16_t used_idx, desc_idx;
+ uint16_t i;
+ struct virtio_crypto_inhdr *inhdr;
+ struct virtio_crypto_op_cookie *op_cookie;
+
+ /* Caller does the check */
+ for (i = 0; i < num ; i++) {
+ used_idx = (uint16_t)(vq->vq_used_cons_idx
+ & (vq->vq_nentries - 1));
+ uep = &vq->vq_ring.used->ring[used_idx];
+ desc_idx = (uint16_t)uep->id;
+ cop = (struct rte_crypto_op *)
+ vq->vq_descx[desc_idx].crypto_op;
+ if (unlikely(cop == NULL)) {
+ VIRTIO_CRYPTO_RX_LOG_DBG("vring descriptor with no "
+ "mbuf cookie at %u",
+ vq->vq_used_cons_idx);
+ break;
+ }
+
+ op_cookie = (struct virtio_crypto_op_cookie *)
+ vq->vq_descx[desc_idx].cookie;
+ inhdr = &(op_cookie->inhdr);
+ switch (inhdr->status) {
+ case VIRTIO_CRYPTO_OK:
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case VIRTIO_CRYPTO_ERR:
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_BADMSG:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_NOTSUPP:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
+ vq->packets_received_failed++;
+ break;
+ case VIRTIO_CRYPTO_INVSESS:
+ cop->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
+ vq->packets_received_failed++;
+ break;
+ default:
+ break;
+ }
+
+ vq->packets_received_total++;
+
+ rx_pkts[i] = cop;
+ rte_mempool_put(vq->mpool, op_cookie);
+
+ vq->vq_used_cons_idx++;
+ vq_ring_free_chain(vq, desc_idx);
+ vq->vq_descx[desc_idx].crypto_op = NULL;
+ }
+
+ return i;
+}
+
+static int
+virtqueue_crypto_sym_pkt_header_arrange(
+ struct rte_crypto_op *cop,
+ struct virtio_crypto_op_data_req *data,
+ struct virtio_crypto_session *session)
+{
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_op_data_req *req_data = data;
+ struct virtio_crypto_op_ctrl_req *ctrl = &session->ctrl;
+ struct virtio_crypto_sym_create_session_req *sym_sess_req =
+ &ctrl->u.sym_create_session;
+ struct virtio_crypto_alg_chain_session_para *chain_para =
+ &sym_sess_req->u.chain.para;
+ struct virtio_crypto_cipher_session_para *cipher_para;
+
+ req_data->header.session_id = session->session_id;
+
+ switch (sym_sess_req->op_type) {
+ case VIRTIO_CRYPTO_SYM_OP_CIPHER:
+ req_data->u.sym_req.op_type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
+
+ cipher_para = &sym_sess_req->u.cipher.para;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.cipher.para.iv_len
+ = session->iv.length;
+
+ req_data->u.sym_req.u.cipher.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.cipher.para.dst_data_len =
+ req_data->u.sym_req.u.cipher.para.src_data_len;
+ break;
+ case VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING:
+ req_data->u.sym_req.op_type =
+ VIRTIO_CRYPTO_SYM_OP_ALGORITHM_CHAINING;
+
+ cipher_para = &chain_para->cipher_param;
+ if (cipher_para->op == VIRTIO_CRYPTO_OP_ENCRYPT)
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_ENCRYPT;
+ else
+ req_data->header.opcode = VIRTIO_CRYPTO_CIPHER_DECRYPT;
+
+ req_data->u.sym_req.u.chain.para.iv_len = session->iv.length;
+ req_data->u.sym_req.u.chain.para.aad_len = session->aad.length;
+
+ req_data->u.sym_req.u.chain.para.src_data_len =
+ (sym_op->cipher.data.length +
+ sym_op->cipher.data.offset);
+ req_data->u.sym_req.u.chain.para.dst_data_len =
+ req_data->u.sym_req.u.chain.para.src_data_len;
+ req_data->u.sym_req.u.chain.para.cipher_start_src_offset =
+ sym_op->cipher.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_cipher =
+ sym_op->cipher.data.length;
+ req_data->u.sym_req.u.chain.para.hash_start_src_offset =
+ sym_op->auth.data.offset;
+ req_data->u.sym_req.u.chain.para.len_to_hash =
+ sym_op->auth.data.length;
+ req_data->u.sym_req.u.chain.para.aad_len =
+ chain_para->aad_len;
+
+ if (chain_para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.hash_param.hash_result_len;
+ if (chain_para->hash_mode ==
+ VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ req_data->u.sym_req.u.chain.para.hash_result_len =
+ chain_para->u.mac_param.hash_result_len;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_sym_enqueue_xmit(
+ struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ uint16_t idx = 0;
+ uint16_t num_entry;
+ uint16_t needed = 1;
+ uint16_t head_idx;
+ struct vq_desc_extra *dxp;
+ struct vring_desc *start_dp;
+ struct vring_desc *desc;
+ uint64_t indirect_op_data_req_phys_addr;
+ uint16_t req_data_len = sizeof(struct virtio_crypto_op_data_req);
+ uint32_t indirect_vring_addr_offset = req_data_len +
+ sizeof(struct virtio_crypto_inhdr);
+ struct rte_crypto_sym_op *sym_op = cop->sym;
+ struct virtio_crypto_session *session =
+ (struct virtio_crypto_session *)get_session_private_data(
+ cop->sym->session, cryptodev_virtio_driver_id);
+ struct virtio_crypto_op_data_req *op_data_req;
+ uint32_t hash_result_len = 0;
+ struct virtio_crypto_op_cookie *crypto_op_cookie;
+ struct virtio_crypto_alg_chain_session_para *para;
+
+ if (unlikely(sym_op->m_src->nb_segs != 1))
+ return -EMSGSIZE;
+ if (unlikely(txvq->vq_free_cnt == 0))
+ return -ENOSPC;
+ if (unlikely(txvq->vq_free_cnt < needed))
+ return -EMSGSIZE;
+ head_idx = txvq->vq_desc_head_idx;
+ if (unlikely(head_idx >= txvq->vq_nentries))
+ return -EFAULT;
+ if (unlikely(session == NULL))
+ return -EFAULT;
+
+ dxp = &txvq->vq_descx[head_idx];
+
+ if (rte_mempool_get(txvq->mpool, &dxp->cookie)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("can not get cookie");
+ return -EFAULT;
+ }
+ crypto_op_cookie = dxp->cookie;
+ indirect_op_data_req_phys_addr =
+ rte_mempool_virt2iova(crypto_op_cookie);
+ op_data_req = (struct virtio_crypto_op_data_req *)crypto_op_cookie;
+
+ if (virtqueue_crypto_sym_pkt_header_arrange(cop, op_data_req, session))
+ return -EFAULT;
+
+ /* status is initialized to VIRTIO_CRYPTO_ERR */
+ ((struct virtio_crypto_inhdr *)
+ ((uint8_t *)op_data_req + req_data_len))->status =
+ VIRTIO_CRYPTO_ERR;
+
+ /* point to indirect vring entry */
+ desc = (struct vring_desc *)
+ ((uint8_t *)op_data_req + indirect_vring_addr_offset);
+ for (idx = 0; idx < (NUM_ENTRY_VIRTIO_CRYPTO_OP - 1); idx++)
+ desc[idx].next = idx + 1;
+ desc[NUM_ENTRY_VIRTIO_CRYPTO_OP - 1].next = VQ_RING_DESC_CHAIN_END;
+
+ idx = 0;
+
+ /* indirect vring: first part, virtio_crypto_op_data_req */
+ desc[idx].addr = indirect_op_data_req_phys_addr;
+ desc[idx].len = req_data_len;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: iv of cipher */
+ if (session->iv.length) {
+ desc[idx].addr = cop->phys_addr + session->iv.offset;
+ desc[idx].len = session->iv.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: additional auth data */
+ if (session->aad.length) {
+ desc[idx].addr = session->aad.phys_addr;
+ desc[idx].len = session->aad.length;
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: src data */
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ desc[idx++].flags = VRING_DESC_F_NEXT;
+
+ /* indirect vring: dst data */
+ if (sym_op->m_dst) {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_dst, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ } else {
+ desc[idx].addr = rte_pktmbuf_mtophys_offset(sym_op->m_src, 0);
+ desc[idx].len = (sym_op->cipher.data.offset
+ + sym_op->cipher.data.length);
+ }
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+
+ /* indirect vring: digest result */
+ para = &(session->ctrl.u.sym_create_session.u.chain.para);
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_PLAIN)
+ hash_result_len = para->u.hash_param.hash_result_len;
+ if (para->hash_mode == VIRTIO_CRYPTO_SYM_HASH_MODE_AUTH)
+ hash_result_len = para->u.mac_param.hash_result_len;
+ if (hash_result_len > 0) {
+ desc[idx].addr = sym_op->auth.digest.phys_addr;
+ desc[idx].len = hash_result_len;
+ desc[idx++].flags = VRING_DESC_F_WRITE | VRING_DESC_F_NEXT;
+ }
+
+ /* indirect vring: last part, status returned */
+ desc[idx].addr = indirect_op_data_req_phys_addr + req_data_len;
+ desc[idx].len = sizeof(struct virtio_crypto_inhdr);
+ desc[idx++].flags = VRING_DESC_F_WRITE;
+
+ num_entry = idx;
+
+ /* save the infos to use when receiving packets */
+ dxp->crypto_op = (void *)cop;
+ dxp->ndescs = needed;
+
+ /* use a single buffer */
+ start_dp = txvq->vq_ring.desc;
+ start_dp[head_idx].addr = indirect_op_data_req_phys_addr +
+ indirect_vring_addr_offset;
+ start_dp[head_idx].len = num_entry * sizeof(struct vring_desc);
+ start_dp[head_idx].flags = VRING_DESC_F_INDIRECT;
+
+ idx = start_dp[head_idx].next;
+ txvq->vq_desc_head_idx = idx;
+ if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
+ txvq->vq_desc_tail_idx = idx;
+ txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
+ vq_update_avail_ring(txvq, head_idx);
+
+ return 0;
+}
+
+static int
+virtqueue_crypto_enqueue_xmit(struct virtqueue *txvq,
+ struct rte_crypto_op *cop)
+{
+ int ret;
+
+ switch (cop->type) {
+ case RTE_CRYPTO_OP_TYPE_SYMMETRIC:
+ ret = virtqueue_crypto_sym_enqueue_xmit(txvq, cop);
+ break;
+ default:
+ VIRTIO_CRYPTO_TX_LOG_ERR("invalid crypto op type %u",
+ cop->type);
+ ret = -EFAULT;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+virtio_crypto_vring_start(struct virtqueue *vq)
+{
+ struct virtio_crypto_hw *hw = vq->hw;
+ int i, size = vq->vq_nentries;
+ struct vring *vr = &vq->vq_ring;
+ uint8_t *ring_mem = vq->vq_ring_virt_mem;
+
+ PMD_INIT_FUNC_TRACE();
+
+ vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+ vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
+ vq->vq_free_cnt = vq->vq_nentries;
+
+ /* Chain all the descriptors in the ring with an END */
+ for (i = 0; i < size - 1; i++)
+ vr->desc[i].next = (uint16_t)(i + 1);
+ vr->desc[i].next = VQ_RING_DESC_CHAIN_END;
+
+ /*
+ * Disable device(host) interrupting guest
+ */
+ virtqueue_disable_intr(vq);
+
+ /*
+ * Set guest physical address of the virtqueue
+ * in VIRTIO_PCI_QUEUE_PFN config register of device
+ * to share with the backend
+ */
+ if (VTPCI_OPS(hw)->setup_queue(hw, vq) < 0) {
+ VIRTIO_CRYPTO_INIT_LOG_ERR("setup_queue failed");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void
+virtio_crypto_ctrlq_start(struct rte_cryptodev *dev)
+{
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ if (hw->cvq) {
+ virtio_crypto_vring_start(hw->cvq);
+ VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq);
+ }
+}
+
+void
+virtio_crypto_dataq_start(struct rte_cryptodev *dev)
+{
+ /*
+ * Start data vrings
+ * - Setup vring structure for data queues
+ */
+ uint16_t i;
+ struct virtio_crypto_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* Start data vring. */
+ for (i = 0; i < hw->max_dataqueues; i++) {
+ virtio_crypto_vring_start(dev->data->queue_pairs[i]);
+ VIRTQUEUE_DUMP((struct virtqueue *)dev->data->queue_pairs[i]);
+ }
+}
+
+/* vring size of data queue is 1024 */
+#define VIRTIO_MBUF_BURST_SZ 1024
+
+uint16_t
+virtio_crypto_pkt_rx_burst(void *tx_queue, struct rte_crypto_op **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq = tx_queue;
+ uint16_t nb_used, num, nb_rx;
+
+ nb_used = VIRTQUEUE_NUSED(txvq);
+
+ virtio_rmb();
+
+ num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts);
+ num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ)
+ ? num : VIRTIO_MBUF_BURST_SZ);
+
+ if (num == 0)
+ return 0;
+
+ nb_rx = virtqueue_dequeue_burst_rx(txvq, rx_pkts, num);
+ VIRTIO_CRYPTO_RX_LOG_DBG("used:%d dequeue:%d", nb_used, num);
+
+ return nb_rx;
+}
+
+uint16_t
+virtio_crypto_pkt_tx_burst(void *tx_queue, struct rte_crypto_op **tx_pkts,
+ uint16_t nb_pkts)
+{
+ struct virtqueue *txvq;
+ uint16_t nb_tx;
+ int error;
+
+ if (unlikely(nb_pkts < 1))
+ return nb_pkts;
+ if (unlikely(tx_queue == NULL)) {
+ VIRTIO_CRYPTO_TX_LOG_ERR("tx_queue is NULL");
+ return 0;
+ }
+ txvq = tx_queue;
+
+ VIRTIO_CRYPTO_TX_LOG_DBG("%d packets to xmit", nb_pkts);
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *txm = tx_pkts[nb_tx]->sym->m_src;
+ /* nb_segs is always 1 at virtio crypto situation */
+ int need = txm->nb_segs - txvq->vq_free_cnt;
+
+ /*
+ * Positive value indicates it hasn't enough space in vring
+ * descriptors
+ */
+ if (unlikely(need > 0)) {
+ /*
+ * try it again because the receive process may be
+ * free some space
+ */
+ need = txm->nb_segs - txvq->vq_free_cnt;
+ if (unlikely(need > 0)) {
+ VIRTIO_CRYPTO_TX_LOG_DBG("No free tx "
+ "descriptors to transmit");
+ break;
+ }
+ }
+
+ txvq->packets_sent_total++;
+
+ /* Enqueue Packet buffers */
+ error = virtqueue_crypto_enqueue_xmit(txvq, tx_pkts[nb_tx]);
+ if (unlikely(error)) {
+ if (error == ENOSPC)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count = 0");
+ else if (error == EMSGSIZE)
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue Free count < 1");
+ else
+ VIRTIO_CRYPTO_TX_LOG_ERR(
+ "virtqueue_enqueue error: %d", error);
+ txvq->packets_sent_failed++;
+ break;
+ }
+ }
+
+ if (likely(nb_tx)) {
+ vq_update_avail_idx(txvq);
+
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
+ virtqueue_notify(txvq);
+ VIRTIO_CRYPTO_TX_LOG_DBG("Notified backend after xmit");
+ }
+ }
+
+ return nb_tx;
+}
diff --git a/drivers/crypto/virtio/virtqueue.c b/drivers/crypto/virtio/virtqueue.c
new file mode 100644
index 00000000..fd8be581
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.c
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#include <stdint.h>
+
+#include <rte_mbuf.h>
+#include <rte_crypto.h>
+#include <rte_malloc.h>
+
+#include "virtqueue.h"
+
+void
+virtqueue_disable_intr(struct virtqueue *vq)
+{
+ /*
+ * Set VRING_AVAIL_F_NO_INTERRUPT to hint host
+ * not to interrupt when it consumes packets
+ * Note: this is only considered a hint to the host
+ */
+ vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+void
+virtqueue_detatch_unused(struct virtqueue *vq)
+{
+ struct rte_crypto_op *cop = NULL;
+
+ int idx;
+
+ if (vq != NULL)
+ for (idx = 0; idx < vq->vq_nentries; idx++) {
+ cop = vq->vq_descx[idx].crypto_op;
+ if (cop) {
+ if (cop->sym->m_src)
+ rte_pktmbuf_free(cop->sym->m_src);
+ if (cop->sym->m_dst)
+ rte_pktmbuf_free(cop->sym->m_dst);
+ rte_crypto_op_free(cop);
+ vq->vq_descx[idx].crypto_op = NULL;
+ }
+ }
+}
diff --git a/drivers/crypto/virtio/virtqueue.h b/drivers/crypto/virtio/virtqueue.h
new file mode 100644
index 00000000..bf10c657
--- /dev/null
+++ b/drivers/crypto/virtio/virtqueue.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 HUAWEI TECHNOLOGIES CO., LTD.
+ */
+
+#ifndef _VIRTQUEUE_H_
+#define _VIRTQUEUE_H_
+
+#include <stdint.h>
+
+#include <rte_atomic.h>
+#include <rte_memory.h>
+#include <rte_memzone.h>
+#include <rte_mempool.h>
+
+#include "virtio_pci.h"
+#include "virtio_ring.h"
+#include "virtio_logs.h"
+#include "virtio_crypto.h"
+
+struct rte_mbuf;
+
+/*
+ * Per virtio_config.h in Linux.
+ * For virtio_pci on SMP, we don't need to order with respect to MMIO
+ * accesses through relaxed memory I/O windows, so smp_mb() et al are
+ * sufficient.
+ *
+ */
+#define virtio_mb() rte_smp_mb()
+#define virtio_rmb() rte_smp_rmb()
+#define virtio_wmb() rte_smp_wmb()
+
+#define VIRTQUEUE_MAX_NAME_SZ 32
+
+enum { VTCRYPTO_DATAQ = 0, VTCRYPTO_CTRLQ = 1 };
+
+/**
+ * The maximum virtqueue size is 2^15. Use that value as the end of
+ * descriptor chain terminator since it will never be a valid index
+ * in the descriptor table. This is used to verify we are correctly
+ * handling vq_free_cnt.
+ */
+#define VQ_RING_DESC_CHAIN_END 32768
+
+struct vq_desc_extra {
+ void *crypto_op;
+ void *cookie;
+ uint16_t ndescs;
+};
+
+struct virtqueue {
+ /**< virtio_crypto_hw structure pointer. */
+ struct virtio_crypto_hw *hw;
+ /**< mem zone to populate RX ring. */
+ const struct rte_memzone *mz;
+ /**< memzone to populate hdr and request. */
+ struct rte_mempool *mpool;
+ uint8_t dev_id; /**< Device identifier. */
+ uint16_t vq_queue_index; /**< PCI queue index */
+
+ void *vq_ring_virt_mem; /**< linear address of vring*/
+ unsigned int vq_ring_size;
+ phys_addr_t vq_ring_mem; /**< physical address of vring */
+
+ struct vring vq_ring; /**< vring keeping desc, used and avail */
+ uint16_t vq_free_cnt; /**< num of desc available */
+ uint16_t vq_nentries; /**< vring desc numbers */
+
+ /**
+ * Head of the free chain in the descriptor table. If
+ * there are no free descriptors, this will be set to
+ * VQ_RING_DESC_CHAIN_END.
+ */
+ uint16_t vq_desc_head_idx;
+ uint16_t vq_desc_tail_idx;
+ /**
+ * Last consumed descriptor in the used table,
+ * trails vq_ring.used->idx.
+ */
+ uint16_t vq_used_cons_idx;
+ uint16_t vq_avail_idx;
+
+ /* Statistics */
+ uint64_t packets_sent_total;
+ uint64_t packets_sent_failed;
+ uint64_t packets_received_total;
+ uint64_t packets_received_failed;
+
+ uint16_t *notify_addr;
+
+ struct vq_desc_extra vq_descx[0];
+};
+
+/**
+ * Tell the backend not to interrupt us.
+ */
+void virtqueue_disable_intr(struct virtqueue *vq);
+
+/**
+ * Get all mbufs to be freed.
+ */
+void virtqueue_detatch_unused(struct virtqueue *vq);
+
+static inline int
+virtqueue_full(const struct virtqueue *vq)
+{
+ return vq->vq_free_cnt == 0;
+}
+
+#define VIRTQUEUE_NUSED(vq) \
+ ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+
+static inline void
+vq_update_avail_idx(struct virtqueue *vq)
+{
+ virtio_wmb();
+ vq->vq_ring.avail->idx = vq->vq_avail_idx;
+}
+
+static inline void
+vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t avail_idx;
+ /*
+ * Place the head of the descriptor chain into the next slot and make
+ * it usable to the host. The chain is made available now rather than
+ * deferring to virtqueue_notify() in the hopes that if the host is
+ * currently running on another CPU, we can keep it processing the new
+ * descriptor.
+ */
+ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
+ if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
+ vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+ vq->vq_avail_idx++;
+}
+
+static inline int
+virtqueue_kick_prepare(struct virtqueue *vq)
+{
+ return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+}
+
+static inline void
+virtqueue_notify(struct virtqueue *vq)
+{
+ /*
+ * Ensure updated avail->idx is visible to host.
+ * For virtio on IA, the notificaiton is through io port operation
+ * which is a serialization instruction itself.
+ */
+ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+}
+
+/**
+ * Dump virtqueue internal structures, for debug purpose only.
+ */
+#define VIRTQUEUE_DUMP(vq) do { \
+ uint16_t used_idx, nused; \
+ used_idx = (vq)->vq_ring.used->idx; \
+ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
+ VIRTIO_CRYPTO_INIT_LOG_DBG(\
+ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \
+ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
+ " avail.flags=0x%x; used.flags=0x%x", \
+ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
+ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
+ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
+ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+} while (0)
+
+#endif /* _VIRTQUEUE_H_ */
diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c
index ad65b80c..a805b227 100644
--- a/drivers/crypto/zuc/rte_zuc_pmd.c
+++ b/drivers/crypto/zuc/rte_zuc_pmd.c
@@ -12,7 +12,7 @@
#include "rte_zuc_pmd_private.h"
-#define ZUC_MAX_BURST 8
+#define ZUC_MAX_BURST 4
#define BYTE_LEN 8
static uint8_t cryptodev_driver_id;
@@ -168,10 +168,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op)
return sess;
}
-/** Encrypt/decrypt mbufs with same cipher key. */
+/** Encrypt/decrypt mbufs. */
static uint8_t
process_zuc_cipher_op(struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -180,6 +180,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
uint8_t *iv[ZUC_MAX_BURST];
uint32_t num_bytes[ZUC_MAX_BURST];
uint8_t *cipher_keys[ZUC_MAX_BURST];
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0)
@@ -190,6 +191,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
break;
}
+ sess = sessions[i];
+
#ifdef RTE_LIBRTE_PMD_ZUC_DEBUG
if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) ||
(ops[i]->sym->m_dst != NULL &&
@@ -211,10 +214,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->cipher.data.offset >> 3);
iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->cipher_iv_offset);
+ sess->cipher_iv_offset);
num_bytes[i] = ops[i]->sym->cipher.data.length >> 3;
- cipher_keys[i] = session->pKey_cipher;
+ cipher_keys[i] = sess->pKey_cipher;
processed_ops++;
}
@@ -225,10 +228,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops,
return processed_ops;
}
-/** Generate/verify hash from mbufs with same hash key. */
+/** Generate/verify hash from mbufs. */
static int
process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
- struct zuc_session *session,
+ struct zuc_session **sessions,
uint8_t num_ops)
{
unsigned i;
@@ -237,6 +240,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
uint32_t *dst;
uint32_t length_in_bits;
uint8_t *iv;
+ struct zuc_session *sess;
for (i = 0; i < num_ops; i++) {
/* Data must be byte aligned */
@@ -246,17 +250,19 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
break;
}
+ sess = sessions[i];
+
length_in_bits = ops[i]->sym->auth.data.length;
src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) +
(ops[i]->sym->auth.data.offset >> 3);
iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *,
- session->auth_iv_offset);
+ sess->auth_iv_offset);
- if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
+ if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) {
dst = (uint32_t *)qp->temp_digest;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
/* Verify digest. */
@@ -266,7 +272,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
} else {
dst = (uint32_t *)ops[i]->sym->auth.digest.data;
- sso_zuc_eia3_1_buffer(session->pKey_hash,
+ sso_zuc_eia3_1_buffer(sess->pKey_hash,
iv, src,
length_in_bits, dst);
}
@@ -276,33 +282,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops,
return processed_ops;
}
-/** Process a batch of crypto ops which shares the same session. */
+/** Process a batch of crypto ops which shares the same operation type. */
static int
-process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
+process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type,
+ struct zuc_session **sessions,
struct zuc_qp *qp, uint8_t num_ops,
uint16_t *accumulated_enqueued_ops)
{
unsigned i;
unsigned enqueued_ops, processed_ops;
- switch (session->op) {
+ switch (op_type) {
case ZUC_OP_ONLY_CIPHER:
processed_ops = process_zuc_cipher_op(ops,
- session, num_ops);
+ sessions, num_ops);
break;
case ZUC_OP_ONLY_AUTH:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
break;
case ZUC_OP_CIPHER_AUTH:
- processed_ops = process_zuc_cipher_op(ops, session,
+ processed_ops = process_zuc_cipher_op(ops, sessions,
num_ops);
- process_zuc_hash_op(qp, ops, session, processed_ops);
+ process_zuc_hash_op(qp, ops, sessions, processed_ops);
break;
case ZUC_OP_AUTH_CIPHER:
- processed_ops = process_zuc_hash_op(qp, ops, session,
+ processed_ops = process_zuc_hash_op(qp, ops, sessions,
num_ops);
- process_zuc_cipher_op(ops, session, processed_ops);
+ process_zuc_cipher_op(ops, sessions, processed_ops);
break;
default:
/* Operation not supported. */
@@ -318,10 +325,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session,
ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
/* Free session if a session-less crypto op. */
if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
- memset(session, 0, sizeof(struct zuc_session));
+ memset(sessions[i], 0, sizeof(struct zuc_session));
memset(ops[i]->sym->session, 0,
- rte_cryptodev_get_header_session_size());
- rte_mempool_put(qp->sess_mp, session);
+ rte_cryptodev_sym_get_header_session_size());
+ rte_mempool_put(qp->sess_mp, sessions[i]);
rte_mempool_put(qp->sess_mp, ops[i]->sym->session);
ops[i]->sym->session = NULL;
}
@@ -342,7 +349,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
struct rte_crypto_op *c_ops[ZUC_MAX_BURST];
struct rte_crypto_op *curr_c_op;
- struct zuc_session *prev_sess = NULL, *curr_sess = NULL;
+ struct zuc_session *curr_sess;
+ struct zuc_session *sessions[ZUC_MAX_BURST];
+ enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED;
+ enum zuc_operation curr_zuc_op;
struct zuc_qp *qp = queue_pair;
unsigned i;
uint8_t burst_size = 0;
@@ -352,61 +362,70 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
for (i = 0; i < nb_ops; i++) {
curr_c_op = ops[i];
- /* Set status as enqueued (not processed yet) by default. */
- curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
curr_sess = zuc_get_session(qp, curr_c_op);
- if (unlikely(curr_sess == NULL ||
- curr_sess->op == ZUC_OP_NOT_SUPPORTED)) {
+ if (unlikely(curr_sess == NULL)) {
curr_c_op->status =
RTE_CRYPTO_OP_STATUS_INVALID_SESSION;
break;
}
- /* Batch ops that share the same session. */
- if (prev_sess == NULL) {
- prev_sess = curr_sess;
- c_ops[burst_size++] = curr_c_op;
- } else if (curr_sess == prev_sess) {
- c_ops[burst_size++] = curr_c_op;
+ curr_zuc_op = curr_sess->op;
+
+ /*
+ * Batch ops that share the same operation type
+ * (cipher only, auth only...).
+ */
+ if (burst_size == 0) {
+ prev_zuc_op = curr_zuc_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
+ } else if (curr_zuc_op == prev_zuc_op) {
+ c_ops[burst_size] = curr_c_op;
+ sessions[burst_size] = curr_sess;
+ burst_size++;
/*
* When there are enough ops to process in a batch,
* process them, and start a new batch.
*/
if (burst_size == ZUC_MAX_BURST) {
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, curr_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = NULL;
}
} else {
/*
- * Different session, process the ops
- * of the previous session.
+ * Different operation type, process the ops
+ * of the previous type.
*/
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
if (processed_ops < burst_size) {
burst_size = 0;
break;
}
burst_size = 0;
- prev_sess = curr_sess;
+ prev_zuc_op = curr_zuc_op;
- c_ops[burst_size++] = curr_c_op;
+ c_ops[0] = curr_c_op;
+ sessions[0] = curr_sess;
+ burst_size++;
}
}
if (burst_size != 0) {
- /* Process the crypto ops of the last session. */
- processed_ops = process_ops(c_ops, prev_sess,
- qp, burst_size, &enqueued_ops);
+ /* Process the crypto ops of the last operation type. */
+ processed_ops = process_ops(c_ops, prev_zuc_op,
+ sessions, qp, burst_size,
+ &enqueued_ops);
}
qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops;
@@ -524,5 +543,5 @@ RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_ZUC_PMD,
"max_nb_queue_pairs=<int> "
"max_nb_sessions=<int> "
"socket_id=<int>");
-RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv,
+RTE_PMD_REGISTER_CRYPTO_DRIVER(zuc_crypto_drv, cryptodev_zuc_pmd_drv.driver,
cryptodev_driver_id);