aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/aesni_mb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/aesni_mb')
-rw-r--r--drivers/crypto/aesni_mb/Makefile10
-rw-r--r--drivers/crypto/aesni_mb/aesni_mb_ops.h31
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c178
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c116
-rw-r--r--drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h78
5 files changed, 278 insertions, 135 deletions
diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile
index d9f8fb98..806a95eb 100644
--- a/drivers/crypto/aesni_mb/Makefile
+++ b/drivers/crypto/aesni_mb/Makefile
@@ -3,12 +3,6 @@
include $(RTE_SDK)/mk/rte.vars.mk
-ifneq ($(MAKECMDGOALS),clean)
-ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),)
-$(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable")
-endif
-endif
-
# library name
LIB = librte_pmd_aesni_mb.a
@@ -23,9 +17,7 @@ LIBABIVER := 1
EXPORT_MAP := rte_pmd_aesni_mb_version.map
# external library dependencies
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)
-CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include
-LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB
+LDLIBS += -lIPSec_MB
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
diff --git a/drivers/crypto/aesni_mb/aesni_mb_ops.h b/drivers/crypto/aesni_mb/aesni_mb_ops.h
index 4d596e85..5a1cba6c 100644
--- a/drivers/crypto/aesni_mb/aesni_mb_ops.h
+++ b/drivers/crypto/aesni_mb/aesni_mb_ops.h
@@ -9,8 +9,7 @@
#define LINUX
#endif
-#include <mb_mgr.h>
-#include <aux_funcs.h>
+#include <intel-ipsec-mb.h>
enum aesni_mb_vector_mode {
RTE_AESNI_MB_NOT_SUPPORTED = 0,
@@ -34,9 +33,12 @@ typedef void (*aes_keyexp_192_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
typedef void (*aes_keyexp_256_t)
(const void *key, void *enc_exp_keys, void *dec_exp_keys);
-
typedef void (*aes_xcbc_expand_key_t)
(const void *key, void *exp_k1, void *k2, void *k3);
+typedef void (*aes_cmac_sub_key_gen_t)
+ (const void *exp_key, void *k2, void *k3);
+typedef void (*aes_cmac_keyexp_t)
+ (const void *key, void *keyexp);
/** Multi-buffer library function pointer table */
struct aesni_mb_op_fns {
@@ -78,9 +80,12 @@ struct aesni_mb_op_fns {
/**< AES192 key expansions */
aes_keyexp_256_t aes256;
/**< AES256 key expansions */
-
aes_xcbc_expand_key_t aes_xcbc;
- /**< AES XCBC key expansions */
+ /**< AES XCBC key epansions */
+ aes_cmac_sub_key_gen_t aes_cmac_subkey;
+ /**< AES CMAC subkey expansions */
+ aes_cmac_keyexp_t aes_cmac_expkey;
+ /**< AES CMAC key expansions */
} keyexp;
/**< Key expansion functions */
} aux;
@@ -123,7 +128,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_sse,
aes_keyexp_192_sse,
aes_keyexp_256_sse,
- aes_xcbc_expand_key_sse
+ aes_xcbc_expand_key_sse,
+ aes_cmac_subkey_gen_sse,
+ aes_keyexp_128_enc_sse
}
}
},
@@ -148,7 +155,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx,
aes_keyexp_192_avx,
aes_keyexp_256_avx,
- aes_xcbc_expand_key_avx
+ aes_xcbc_expand_key_avx,
+ aes_cmac_subkey_gen_avx,
+ aes_keyexp_128_enc_avx
}
}
},
@@ -173,7 +182,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx2,
aes_keyexp_192_avx2,
aes_keyexp_256_avx2,
- aes_xcbc_expand_key_avx2
+ aes_xcbc_expand_key_avx2,
+ aes_cmac_subkey_gen_avx2,
+ aes_keyexp_128_enc_avx2
}
}
},
@@ -198,7 +209,9 @@ static const struct aesni_mb_op_fns job_ops[] = {
aes_keyexp_128_avx512,
aes_keyexp_192_avx512,
aes_keyexp_256_avx512,
- aes_xcbc_expand_key_avx512
+ aes_xcbc_expand_key_avx512,
+ aes_cmac_subkey_gen_avx512,
+ aes_keyexp_128_enc_avx512
}
}
}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
index 636c6c37..93dc7a44 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c
@@ -2,7 +2,7 @@
* Copyright(c) 2015-2017 Intel Corporation
*/
-#include <des.h>
+#include <intel-ipsec-mb.h>
#include <rte_common.h>
#include <rte_hexdump.h>
@@ -108,7 +108,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
- MB_LOG_ERR("Crypto xform struct not of type auth");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type auth");
return -1;
}
@@ -124,6 +124,17 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
return 0;
}
+ if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_CMAC) {
+ sess->auth.algo = AES_CMAC;
+ (*mb_ops->aux.keyexp.aes_cmac_expkey)(xform->auth.key.data,
+ sess->auth.cmac.expkey);
+
+ (*mb_ops->aux.keyexp.aes_cmac_subkey)(sess->auth.cmac.expkey,
+ sess->auth.cmac.skey1, sess->auth.cmac.skey2);
+ return 0;
+ }
+
+
switch (xform->auth.algo) {
case RTE_CRYPTO_AUTH_MD5_HMAC:
sess->auth.algo = MD5;
@@ -150,7 +161,7 @@ aesni_mb_set_session_auth_parameters(const struct aesni_mb_op_fns *mb_ops,
hash_oneblock_fn = mb_ops->aux.one_block.sha512;
break;
default:
- MB_LOG_ERR("Unsupported authentication algorithm selection");
+ AESNI_MB_LOG(ERR, "Unsupported authentication algorithm selection");
return -ENOTSUP;
}
@@ -171,6 +182,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
const struct rte_crypto_sym_xform *xform)
{
uint8_t is_aes = 0;
+ uint8_t is_3DES = 0;
aes_keyexp_t aes_keyexp_fn;
if (xform == NULL) {
@@ -179,7 +191,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
}
if (xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
- MB_LOG_ERR("Crypto xform struct not of type cipher");
+ AESNI_MB_LOG(ERR, "Crypto xform struct not of type cipher");
return -EINVAL;
}
@@ -192,7 +204,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.direction = DECRYPT;
break;
default:
- MB_LOG_ERR("Invalid cipher operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid cipher operation parameter");
return -EINVAL;
}
@@ -216,8 +228,12 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
case RTE_CRYPTO_CIPHER_DES_DOCSISBPI:
sess->cipher.mode = DOCSIS_DES;
break;
+ case RTE_CRYPTO_CIPHER_3DES_CBC:
+ sess->cipher.mode = DES3;
+ is_3DES = 1;
+ break;
default:
- MB_LOG_ERR("Unsupported cipher mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported cipher mode parameter");
return -ENOTSUP;
}
@@ -241,7 +257,7 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes256;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -250,9 +266,52 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->cipher.expanded_aes_keys.encode,
sess->cipher.expanded_aes_keys.decode);
+ } else if (is_3DES) {
+ uint64_t *keys[3] = {sess->cipher.exp_3des_keys.key[0],
+ sess->cipher.exp_3des_keys.key[1],
+ sess->cipher.exp_3des_keys.key[2]};
+
+ switch (xform->cipher.key.length) {
+ case 24:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+ des_key_schedule(keys[2], xform->cipher.key.data+16);
+
+ /* Initialize keys - 24 bytes: [K1-K2-K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[2];
+ break;
+ case 16:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+ des_key_schedule(keys[1], xform->cipher.key.data+8);
+
+ /* Initialize keys - 16 bytes: [K1=K1,K2=K2,K3=K1] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[1];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ case 8:
+ des_key_schedule(keys[0], xform->cipher.key.data);
+
+ /* Initialize keys - 8 bytes: [K1 = K2 = K3] */
+ sess->cipher.exp_3des_keys.ks_ptr[0] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[1] = keys[0];
+ sess->cipher.exp_3des_keys.ks_ptr[2] = keys[0];
+ break;
+ default:
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
+ return -EINVAL;
+ }
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ sess->cipher.key_length_in_bytes = 24;
+#else
+ sess->cipher.key_length_in_bytes = 8;
+#endif
} else {
if (xform->cipher.key.length != 8) {
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
sess->cipher.key_length_in_bytes = 8;
@@ -283,7 +342,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.operation = RTE_CRYPTO_AUTH_OP_VERIFY;
break;
default:
- MB_LOG_ERR("Invalid aead operation parameter");
+ AESNI_MB_LOG(ERR, "Invalid aead operation parameter");
return -EINVAL;
}
@@ -293,7 +352,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->auth.algo = AES_CCM;
break;
default:
- MB_LOG_ERR("Unsupported aead mode parameter");
+ AESNI_MB_LOG(ERR, "Unsupported aead mode parameter");
return -ENOTSUP;
}
@@ -309,7 +368,7 @@ aesni_mb_set_session_aead_parameters(const struct aesni_mb_op_fns *mb_ops,
aes_keyexp_fn = mb_ops->aux.keyexp.aes128;
break;
default:
- MB_LOG_ERR("Invalid cipher key length");
+ AESNI_MB_LOG(ERR, "Invalid cipher key length");
return -EINVAL;
}
@@ -338,16 +397,19 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = xform->next;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
auth_xform = xform->next;
cipher_xform = xform;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_HASH_ONLY:
sess->chain_order = HASH_CIPHER;
auth_xform = xform;
cipher_xform = NULL;
+ sess->auth.digest_len = xform->auth.digest_length;
break;
case AESNI_MB_OP_CIPHER_ONLY:
/*
@@ -366,18 +428,18 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
case AESNI_MB_OP_AEAD_CIPHER_HASH:
sess->chain_order = CIPHER_HASH;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_AEAD_HASH_CIPHER:
sess->chain_order = HASH_CIPHER;
sess->aead.aad_len = xform->aead.aad_length;
- sess->aead.digest_len = xform->aead.digest_length;
+ sess->auth.digest_len = xform->aead.digest_length;
aead_xform = xform;
break;
case AESNI_MB_OP_NOT_SUPPORTED:
default:
- MB_LOG_ERR("Unsupported operation chain order parameter");
+ AESNI_MB_LOG(ERR, "Unsupported operation chain order parameter");
return -ENOTSUP;
}
@@ -386,14 +448,14 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_auth_parameters(mb_ops, sess, auth_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported authentication parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported authentication parameters");
return ret;
}
ret = aesni_mb_set_session_cipher_parameters(mb_ops, sess,
cipher_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported cipher parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported cipher parameters");
return ret;
}
@@ -401,7 +463,7 @@ aesni_mb_set_session_parameters(const struct aesni_mb_op_fns *mb_ops,
ret = aesni_mb_set_session_aead_parameters(mb_ops, sess,
aead_xform);
if (ret != 0) {
- MB_LOG_ERR("Invalid/unsupported aead parameters");
+ AESNI_MB_LOG(ERR, "Invalid/unsupported aead parameters");
return ret;
}
}
@@ -444,7 +506,7 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
if (likely(op->sym->session != NULL))
sess = (struct aesni_mb_session *)
- get_session_private_data(
+ get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
} else {
@@ -466,8 +528,8 @@ get_session(struct aesni_mb_qp *qp, struct rte_crypto_op *op)
sess = NULL;
}
op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
- set_session_private_data(op->sym->session, cryptodev_driver_id,
- _sess_private_data);
+ set_sym_session_private_data(op->sym->session,
+ cryptodev_driver_id, _sess_private_data);
}
if (unlikely(sess == NULL))
@@ -510,22 +572,39 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
job->cipher_mode = session->cipher.mode;
job->aes_key_len_in_bytes = session->cipher.key_length_in_bytes;
- job->aes_enc_key_expanded = session->cipher.expanded_aes_keys.encode;
- job->aes_dec_key_expanded = session->cipher.expanded_aes_keys.decode;
+
+ if (job->cipher_mode == DES3) {
+ job->aes_enc_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ job->aes_dec_key_expanded =
+ session->cipher.exp_3des_keys.ks_ptr;
+ } else {
+ job->aes_enc_key_expanded =
+ session->cipher.expanded_aes_keys.encode;
+ job->aes_dec_key_expanded =
+ session->cipher.expanded_aes_keys.decode;
+ }
+
+
/* Set authentication parameters */
job->hash_alg = session->auth.algo;
if (job->hash_alg == AES_XCBC) {
- job->_k1_expanded = session->auth.xcbc.k1_expanded;
- job->_k2 = session->auth.xcbc.k2;
- job->_k3 = session->auth.xcbc.k3;
+ job->u.XCBC._k1_expanded = session->auth.xcbc.k1_expanded;
+ job->u.XCBC._k2 = session->auth.xcbc.k2;
+ job->u.XCBC._k3 = session->auth.xcbc.k3;
} else if (job->hash_alg == AES_CCM) {
job->u.CCM.aad = op->sym->aead.aad.data + 18;
job->u.CCM.aad_len_in_bytes = session->aead.aad_len;
+ } else if (job->hash_alg == AES_CMAC) {
+ job->u.CMAC._key_expanded = session->auth.cmac.expkey;
+ job->u.CMAC._skey1 = session->auth.cmac.skey1;
+ job->u.CMAC._skey2 = session->auth.cmac.skey2;
+
} else {
- job->hashed_auth_key_xor_ipad = session->auth.pads.inner;
- job->hashed_auth_key_xor_opad = session->auth.pads.outer;
+ job->u.HMAC._hashed_auth_key_xor_ipad = session->auth.pads.inner;
+ job->u.HMAC._hashed_auth_key_xor_opad = session->auth.pads.outer;
}
/* Mutable crypto operation parameters */
@@ -536,7 +615,7 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
char *odata = rte_pktmbuf_append(m_dst,
rte_pktmbuf_data_len(op->sym->m_src));
if (odata == NULL) {
- MB_LOG_ERR("failed to allocate space in destination "
+ AESNI_MB_LOG(ERR, "failed to allocate space in destination "
"mbuf for source data");
op->status = RTE_CRYPTO_OP_STATUS_ERROR;
return -1;
@@ -568,11 +647,11 @@ set_mb_job_params(JOB_AES_HMAC *job, struct aesni_mb_qp *qp,
* Multi-buffer library current only support returning a truncated
* digest length as specified in the relevant IPsec RFCs
*/
- if (job->hash_alg != AES_CCM)
+ if (job->hash_alg != AES_CCM && job->hash_alg != AES_CMAC)
job->auth_tag_output_len_in_bytes =
get_truncated_digest_byte_length(job->hash_alg);
else
- job->auth_tag_output_len_in_bytes = session->aead.digest_len;
+ job->auth_tag_output_len_in_bytes = session->auth.digest_len;
/* Set IV parameters */
@@ -639,7 +718,7 @@ static inline struct rte_crypto_op *
post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
{
struct rte_crypto_op *op = (struct rte_crypto_op *)job->user_data;
- struct aesni_mb_session *sess = get_session_private_data(
+ struct aesni_mb_session *sess = get_sym_session_private_data(
op->sym->session,
cryptodev_driver_id);
@@ -663,7 +742,7 @@ post_process_mb_job(struct aesni_mb_qp *qp, JOB_AES_HMAC *job)
if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
memset(sess, 0, sizeof(struct aesni_mb_session));
memset(op->sym->session, 0,
- rte_cryptodev_get_header_session_size());
+ rte_cryptodev_sym_get_header_session_size());
rte_mempool_put(qp->sess_mp, sess);
rte_mempool_put(qp->sess_mp, op->sym->session);
op->sym->session = NULL;
@@ -702,7 +781,7 @@ handle_completed_jobs(struct aesni_mb_qp *qp, JOB_AES_HMAC *job,
if (processed_jobs == nb_ops)
break;
- job = (*qp->op_fns->job.get_completed_job)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_completed_job)(qp->mb_mgr);
}
return processed_jobs;
@@ -715,7 +794,7 @@ flush_mb_mgr(struct aesni_mb_qp *qp, struct rte_crypto_op **ops,
int processed_ops = 0;
/* Flush the remaining jobs */
- JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(&qp->mb_mgr);
+ JOB_AES_HMAC *job = (*qp->op_fns->job.flush_job)(qp->mb_mgr);
if (job)
processed_ops += handle_completed_jobs(qp, job,
@@ -760,14 +839,14 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
break;
/* Get next free mb job struct from mb manager */
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
if (unlikely(job == NULL)) {
/* if no free mb job structs we need to flush mb_mgr */
processed_jobs += flush_mb_mgr(qp,
&ops[processed_jobs],
(nb_ops - processed_jobs) - 1);
- job = (*qp->op_fns->job.get_next)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.get_next)(qp->mb_mgr);
}
retval = set_mb_job_params(job, qp, op, &digest_idx);
@@ -777,7 +856,7 @@ aesni_mb_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
}
/* Submit job to multi-buffer for processing */
- job = (*qp->op_fns->job.submit)(&qp->mb_mgr);
+ job = (*qp->op_fns->job.submit)(qp->mb_mgr);
/*
* If submit returns a processed job then handle it,
@@ -813,13 +892,13 @@ cryptodev_aesni_mb_create(const char *name,
/* Check CPU for support for AES instruction set */
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AES)) {
- MB_LOG_ERR("AES instructions not supported by CPU");
+ AESNI_MB_LOG(ERR, "AES instructions not supported by CPU");
return -EFAULT;
}
dev = rte_cryptodev_pmd_create(name, &vdev->device, init_params);
if (dev == NULL) {
- MB_LOG_ERR("failed to create cryptodev vdev");
+ AESNI_MB_LOG(ERR, "failed to create cryptodev vdev");
return -ENODEV;
}
@@ -866,7 +945,13 @@ cryptodev_aesni_mb_create(const char *name,
internals->vector_mode = vector_mode;
internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs;
- internals->max_nb_sessions = init_params->max_nb_sessions;
+
+#if IMB_VERSION_NUM >= IMB_VERSION(0, 50, 0)
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: %s\n",
+ imb_get_version_str());
+#else
+ AESNI_MB_LOG(INFO, "IPSec Multi-buffer library version used: 0.49.0\n");
+#endif
return 0;
}
@@ -878,8 +963,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
"",
sizeof(struct aesni_mb_private),
rte_socket_id(),
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
- RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
+ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS
};
const char *name, *args;
int retval;
@@ -892,7 +976,7 @@ cryptodev_aesni_mb_probe(struct rte_vdev_device *vdev)
retval = rte_cryptodev_pmd_parse_input_args(&init_params, args);
if (retval) {
- MB_LOG_ERR("Failed to parse initialisation arguments[%s]\n",
+ AESNI_MB_LOG(ERR, "Failed to parse initialisation arguments[%s]",
args);
return -EINVAL;
}
@@ -928,8 +1012,12 @@ RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd_drv);
RTE_PMD_REGISTER_ALIAS(CRYPTODEV_NAME_AESNI_MB_PMD, cryptodev_aesni_mb_pmd);
RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_AESNI_MB_PMD,
"max_nb_queue_pairs=<int> "
- "max_nb_sessions=<int> "
"socket_id=<int>");
RTE_PMD_REGISTER_CRYPTO_DRIVER(aesni_mb_crypto_drv,
- cryptodev_aesni_mb_pmd_drv,
+ cryptodev_aesni_mb_pmd_drv.driver,
cryptodev_driver_id);
+
+RTE_INIT(aesni_mb_init_log)
+{
+ aesni_mb_logtype_driver = rte_log_register("pmd.crypto.aesni_mb");
+}
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
index 9d685a09..ab26e5ae 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c
@@ -239,6 +239,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
+ { /* 3DES CBC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
+ {.cipher = {
+ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
+ .block_size = 8,
+ .key_size = {
+ .min = 8,
+ .max = 24,
+ .increment = 8
+ },
+ .iv_size = {
+ .min = 8,
+ .max = 8,
+ .increment = 0
+ }
+ }, }
+ }, }
+ },
{ /* DES DOCSIS BPI */
.op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
{.sym = {
@@ -289,8 +309,27 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = {
}, }
}, }
},
-
-
+ { /* AES CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
+ {.auth = {
+ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
+ .block_size = 16,
+ .key_size = {
+ .min = 16,
+ .max = 16,
+ .increment = 0
+ },
+ .digest_size = {
+ .min = 12,
+ .max = 16,
+ .increment = 4
+ },
+ .iv_size = { 0 }
+ }, }
+ }, }
+ },
RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
};
@@ -368,7 +407,8 @@ aesni_mb_pmd_info_get(struct rte_cryptodev *dev,
dev_info->feature_flags = dev->feature_flags;
dev_info->capabilities = aesni_mb_pmd_capabilities;
dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
- dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
+ /* No limit of number of sessions */
+ dev_info->sym.max_nb_sessions = 0;
}
}
@@ -383,6 +423,8 @@ aesni_mb_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
r = rte_ring_lookup(qp->name);
if (r)
rte_ring_free(r);
+ if (qp->mb_mgr)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
}
@@ -422,12 +464,12 @@ aesni_mb_pmd_qp_create_processed_ops_ring(struct aesni_mb_qp *qp,
r = rte_ring_lookup(ring_name);
if (r) {
if (rte_ring_get_size(r) >= ring_size) {
- MB_LOG_INFO("Reusing existing ring %s for processed ops",
+ AESNI_MB_LOG(INFO, "Reusing existing ring %s for processed ops",
ring_name);
return r;
}
- MB_LOG_ERR("Unable to reuse existing ring %s for processed ops",
+ AESNI_MB_LOG(ERR, "Unable to reuse existing ring %s for processed ops",
ring_name);
return NULL;
}
@@ -444,6 +486,7 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
{
struct aesni_mb_qp *qp = NULL;
struct aesni_mb_private *internals = dev->data->dev_private;
+ int ret = -1;
/* Free memory prior to re-allocation if needed. */
if (dev->data->queue_pairs[qp_id] != NULL)
@@ -462,12 +505,20 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
goto qp_setup_cleanup;
+ qp->mb_mgr = alloc_mb_mgr(0);
+ if (qp->mb_mgr == NULL) {
+ ret = -ENOMEM;
+ goto qp_setup_cleanup;
+ }
+
qp->op_fns = &job_ops[internals->vector_mode];
qp->ingress_queue = aesni_mb_pmd_qp_create_processed_ops_ring(qp,
"ingress", qp_conf->nb_descriptors, socket_id);
- if (qp->ingress_queue == NULL)
+ if (qp->ingress_queue == NULL) {
+ ret = -1;
goto qp_setup_cleanup;
+ }
qp->sess_mp = session_pool;
@@ -479,30 +530,17 @@ aesni_mb_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
"digest_mp_%u_%u", dev->data->dev_id, qp_id);
/* Initialise multi-buffer manager */
- (*qp->op_fns->job.init_mgr)(&qp->mb_mgr);
+ (*qp->op_fns->job.init_mgr)(qp->mb_mgr);
return 0;
qp_setup_cleanup:
- if (qp)
+ if (qp) {
+ if (qp->mb_mgr == NULL)
+ free_mb_mgr(qp->mb_mgr);
rte_free(qp);
+ }
- return -1;
-}
-
-/** Start queue pair */
-static int
-aesni_mb_pmd_qp_start(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
-}
-
-/** Stop queue pair */
-static int
-aesni_mb_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev,
- __rte_unused uint16_t queue_pair_id)
-{
- return -ENOTSUP;
+ return ret;
}
/** Return the number of allocated queue pairs */
@@ -514,14 +552,14 @@ aesni_mb_pmd_qp_count(struct rte_cryptodev *dev)
/** Returns the size of the aesni multi-buffer session structure */
static unsigned
-aesni_mb_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+aesni_mb_pmd_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
{
return sizeof(struct aesni_mb_session);
}
/** Configure a aesni multi-buffer session from a crypto xform chain */
static int
-aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_configure(struct rte_cryptodev *dev,
struct rte_crypto_sym_xform *xform,
struct rte_cryptodev_sym_session *sess,
struct rte_mempool *mempool)
@@ -531,27 +569,27 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
int ret;
if (unlikely(sess == NULL)) {
- MB_LOG_ERR("invalid session struct");
+ AESNI_MB_LOG(ERR, "invalid session struct");
return -EINVAL;
}
if (rte_mempool_get(mempool, &sess_private_data)) {
- CDEV_LOG_ERR(
- "Couldn't get object from session mempool");
+ AESNI_MB_LOG(ERR,
+ "Couldn't get object from session mempool");
return -ENOMEM;
}
ret = aesni_mb_set_session_parameters(&job_ops[internals->vector_mode],
sess_private_data, xform);
if (ret != 0) {
- MB_LOG_ERR("failed configure session parameters");
+ AESNI_MB_LOG(ERR, "failed configure session parameters");
/* Return session to mempool */
rte_mempool_put(mempool, sess_private_data);
return ret;
}
- set_session_private_data(sess, dev->driver_id,
+ set_sym_session_private_data(sess, dev->driver_id,
sess_private_data);
return 0;
@@ -559,17 +597,17 @@ aesni_mb_pmd_session_configure(struct rte_cryptodev *dev,
/** Clear the memory of session so it doesn't leave key material behind */
static void
-aesni_mb_pmd_session_clear(struct rte_cryptodev *dev,
+aesni_mb_pmd_sym_session_clear(struct rte_cryptodev *dev,
struct rte_cryptodev_sym_session *sess)
{
uint8_t index = dev->driver_id;
- void *sess_priv = get_session_private_data(sess, index);
+ void *sess_priv = get_sym_session_private_data(sess, index);
/* Zero out the whole structure */
if (sess_priv) {
memset(sess_priv, 0, sizeof(struct aesni_mb_session));
struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
- set_session_private_data(sess, index, NULL);
+ set_sym_session_private_data(sess, index, NULL);
rte_mempool_put(sess_mp, sess_priv);
}
}
@@ -587,13 +625,11 @@ struct rte_cryptodev_ops aesni_mb_pmd_ops = {
.queue_pair_setup = aesni_mb_pmd_qp_setup,
.queue_pair_release = aesni_mb_pmd_qp_release,
- .queue_pair_start = aesni_mb_pmd_qp_start,
- .queue_pair_stop = aesni_mb_pmd_qp_stop,
.queue_pair_count = aesni_mb_pmd_qp_count,
- .session_get_size = aesni_mb_pmd_session_get_size,
- .session_configure = aesni_mb_pmd_session_configure,
- .session_clear = aesni_mb_pmd_session_clear
+ .sym_session_get_size = aesni_mb_pmd_sym_session_get_size,
+ .sym_session_configure = aesni_mb_pmd_sym_session_configure,
+ .sym_session_clear = aesni_mb_pmd_sym_session_clear
};
struct rte_cryptodev_ops *rte_aesni_mb_pmd_ops = &aesni_mb_pmd_ops;
diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
index 948e091c..70e9d18e 100644
--- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
+++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_private.h
@@ -7,28 +7,26 @@
#include "aesni_mb_ops.h"
+/*
+ * IMB_VERSION_NUM macro was introduced in version Multi-buffer 0.50,
+ * so if macro is not defined, it means that the version is 0.49.
+ */
+#if !defined(IMB_VERSION_NUM)
+#define IMB_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
+#define IMB_VERSION_NUM IMB_VERSION(0, 49, 0)
+#endif
+
#define CRYPTODEV_NAME_AESNI_MB_PMD crypto_aesni_mb
/**< AES-NI Multi buffer PMD device name */
-#define MB_LOG_ERR(fmt, args...) \
- RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- RTE_STR(CRYPTODEV_NAME_AESNI_MB_PMD), \
- __func__, __LINE__, ## args)
-
-#ifdef RTE_LIBRTE_AESNI_MB_DEBUG
-#define MB_LOG_INFO(fmt, args...) \
- RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-
-#define MB_LOG_DBG(fmt, args...) \
- RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
- CRYPTODEV_NAME_AESNI_MB_PMD, \
- __func__, __LINE__, ## args)
-#else
-#define MB_LOG_INFO(fmt, args...)
-#define MB_LOG_DBG(fmt, args...)
-#endif
+/** AESNI_MB PMD LOGTYPE DRIVER */
+int aesni_mb_logtype_driver;
+
+#define AESNI_MB_LOG(level, fmt, ...) \
+ rte_log(RTE_LOG_ ## level, aesni_mb_logtype_driver, \
+ "%s() line %u: " fmt "\n", __func__, __LINE__, \
+ ## __VA_ARGS__)
+
#define HMAC_IPAD_VALUE (0x36)
#define HMAC_OPAD_VALUE (0x5C)
@@ -66,8 +64,9 @@ static const unsigned auth_truncated_digest_byte_lengths[] = {
[SHA_384] = 24,
[SHA_512] = 32,
[AES_XCBC] = 12,
+ [AES_CMAC] = 16,
[AES_CCM] = 8,
- [NULL_HASH] = 0
+ [NULL_HASH] = 0
};
/**
@@ -91,7 +90,8 @@ static const unsigned auth_digest_byte_lengths[] = {
[SHA_384] = 48,
[SHA_512] = 64,
[AES_XCBC] = 16,
- [NULL_HASH] = 0
+ [AES_CMAC] = 16,
+ [NULL_HASH] = 0
};
/**
@@ -122,8 +122,6 @@ struct aesni_mb_private {
/**< CPU vector instruction set mode */
unsigned max_nb_queue_pairs;
/**< Max number of queue pairs supported by device */
- unsigned max_nb_sessions;
- /**< Max number of sessions supported by device */
};
/** AESNI Multi buffer queue pair */
@@ -134,7 +132,7 @@ struct aesni_mb_qp {
/**< Unique Queue Pair Name */
const struct aesni_mb_op_fns *op_fns;
/**< Vector mode dependent pointer table of the multi-buffer APIs */
- MB_MGR mb_mgr;
+ MB_MGR *mb_mgr;
/**< Multi-buffer instance */
struct rte_ring *ingress_queue;
/**< Ring for placing operations ready for processing */
@@ -171,12 +169,18 @@ struct aesni_mb_session {
uint64_t key_length_in_bytes;
- struct {
- uint32_t encode[60] __rte_aligned(16);
- /**< encode key */
- uint32_t decode[60] __rte_aligned(16);
- /**< decode key */
- } expanded_aes_keys;
+ union {
+ struct {
+ uint32_t encode[60] __rte_aligned(16);
+ /**< encode key */
+ uint32_t decode[60] __rte_aligned(16);
+ /**< decode key */
+ } expanded_aes_keys;
+ struct {
+ const void *ks_ptr[3];
+ uint64_t key[3][16];
+ } exp_3des_keys;
+ };
/**< Expanded AES keys - Allocating space to
* contain the maximum expanded key size which
* is 240 bytes for 256 bit AES, calculate by:
@@ -211,14 +215,24 @@ struct aesni_mb_session {
uint8_t k3[16] __rte_aligned(16);
/**< k3. */
} xcbc;
+
+ struct {
+ uint32_t expkey[60] __rte_aligned(16);
+ /**< k1 (expanded key). */
+ uint32_t skey1[4] __rte_aligned(16);
+ /**< k2. */
+ uint32_t skey2[4] __rte_aligned(16);
+ /**< k3. */
+ } cmac;
/**< Expanded XCBC authentication keys */
};
+ /** digest size */
+ uint16_t digest_len;
+
} auth;
struct {
/** AAD data length */
uint16_t aad_len;
- /** digest size */
- uint16_t digest_len;
} aead;
} __rte_cache_aligned;