diff options
Diffstat (limited to 'drivers/crypto')
-rw-r--r-- | drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h | 26 | ||||
-rw-r--r-- | drivers/crypto/scheduler/rte_cryptodev_scheduler.c | 8 | ||||
-rw-r--r-- | drivers/crypto/scheduler/rte_cryptodev_scheduler.h | 2 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_multicore.c | 53 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pkt_size_distr.c | 4 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd.c | 68 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd_ops.c | 9 | ||||
-rw-r--r-- | drivers/crypto/scheduler/scheduler_pmd_private.h | 2 | ||||
-rw-r--r-- | drivers/crypto/zuc/rte_zuc_pmd.c | 108 |
9 files changed, 186 insertions, 94 deletions
diff --git a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h index 8e583803..ae8c0c30 100644 --- a/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h +++ b/drivers/crypto/dpaa2_sec/dpaa2_sec_priv.h @@ -211,9 +211,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 16, + .min = 1, .max = 16, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -232,9 +232,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 20, + .min = 1, .max = 20, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -253,9 +253,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 28, + .min = 1, .max = 28, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -274,9 +274,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 32, - .max = 32, - .increment = 0 + .min = 1, + .max = 32, + .increment = 1 }, .iv_size = { 0 } }, } @@ -295,9 +295,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 48, + .min = 1, .max = 48, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } @@ -316,9 +316,9 @@ static const struct rte_cryptodev_capabilities dpaa2_sec_capabilities[] = { .increment = 1 }, .digest_size = { - .min = 64, + .min = 1, .max = 64, - .increment = 0 + .increment = 1 }, .iv_size = { 0 } }, } diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c index 822ce27c..59ece957 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.c +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.c @@ -119,8 +119,10 @@ update_scheduler_capability(struct scheduler_ctx *sched_ctx) struct rte_cryptodev_capabilities tmp_caps[256] = { {0} }; uint32_t nb_caps = 0, i; - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } for (i = 0; i < sched_ctx->nb_slaves; i++) { struct rte_cryptodev_info dev_info; @@ -490,8 +492,10 @@ rte_cryptodev_scheduler_load_user_scheduler(uint8_t scheduler_id, sched_ctx->ops.option_set = scheduler->ops->option_set; sched_ctx->ops.option_get = scheduler->ops->option_get; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } if (sched_ctx->ops.create_private_ctx) { int ret = (*sched_ctx->ops.create_private_ctx)(dev); diff --git a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h index df22f2a9..84917d1d 100644 --- a/drivers/crypto/scheduler/rte_cryptodev_scheduler.h +++ b/drivers/crypto/scheduler/rte_cryptodev_scheduler.h @@ -59,7 +59,7 @@ extern "C" { #endif /** Maximum number of multi-core worker cores */ -#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (64) +#define RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES (RTE_MAX_LCORE - 1) /** Round-robin scheduling mode string */ #define SCHEDULER_MODE_NAME_ROUND_ROBIN round-robin diff --git a/drivers/crypto/scheduler/scheduler_multicore.c b/drivers/crypto/scheduler/scheduler_multicore.c index 0cd5bce5..14c33b98 100644 --- a/drivers/crypto/scheduler/scheduler_multicore.c +++ b/drivers/crypto/scheduler/scheduler_multicore.c @@ -49,8 +49,8 @@ struct mc_scheduler_ctx { uint32_t num_workers; /**< Number of workers polling */ uint32_t stop_signal; - struct rte_ring *sched_enq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; - struct rte_ring *sched_deq_ring[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + struct rte_ring *sched_enq_ring[RTE_MAX_LCORE]; + struct rte_ring *sched_deq_ring[RTE_MAX_LCORE]; }; struct mc_scheduler_qp_ctx { @@ -356,11 +356,13 @@ static int scheduler_create_private_ctx(struct rte_cryptodev *dev) { struct scheduler_ctx *sched_ctx = dev->data->dev_private; - struct mc_scheduler_ctx *mc_ctx; + struct mc_scheduler_ctx *mc_ctx = NULL; uint16_t i; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } mc_ctx = rte_zmalloc_socket(NULL, sizeof(struct mc_scheduler_ctx), 0, rte_socket_id()); @@ -373,25 +375,48 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) for (i = 0; i < sched_ctx->nb_wc; i++) { char r_name[16]; - snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_ENQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_enq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_enq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_enq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_enq_ring[i]) { + CS_LOG_ERR("Cannot create ring for worker %u", + i); + goto exit; + } } - snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX "%u", i); - mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, PER_SLAVE_BUFF_SIZE, - rte_socket_id(), RING_F_SC_DEQ | RING_F_SP_ENQ); + snprintf(r_name, sizeof(r_name), MC_SCHED_DEQ_RING_NAME_PREFIX + "%u_%u", dev->data->dev_id, i); + mc_ctx->sched_deq_ring[i] = rte_ring_lookup(r_name); if (!mc_ctx->sched_deq_ring[i]) { - CS_LOG_ERR("Cannot create ring for worker %u", i); - return -1; + mc_ctx->sched_deq_ring[i] = rte_ring_create(r_name, + PER_SLAVE_BUFF_SIZE, + rte_socket_id(), + RING_F_SC_DEQ | RING_F_SP_ENQ); + if (!mc_ctx->sched_deq_ring[i]) { + CS_LOG_ERR("Cannot create ring for worker %u", + i); + goto exit; + } } } sched_ctx->private_ctx = (void *)mc_ctx; return 0; + +exit: + for (i = 0; i < sched_ctx->nb_wc; i++) { + rte_ring_free(mc_ctx->sched_enq_ring[i]); + rte_ring_free(mc_ctx->sched_deq_ring[i]); + } + rte_free(mc_ctx); + + return -1; } struct rte_cryptodev_scheduler_ops scheduler_mc_ops = { diff --git a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c index 1dd1bc32..4874191b 100644 --- a/drivers/crypto/scheduler/scheduler_pkt_size_distr.c +++ b/drivers/crypto/scheduler/scheduler_pkt_size_distr.c @@ -362,8 +362,10 @@ scheduler_create_private_ctx(struct rte_cryptodev *dev) struct scheduler_ctx *sched_ctx = dev->data->dev_private; struct psd_scheduler_ctx *psd_ctx; - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } psd_ctx = rte_zmalloc_socket(NULL, sizeof(struct psd_scheduler_ctx), 0, rte_socket_id()); diff --git a/drivers/crypto/scheduler/scheduler_pmd.c b/drivers/crypto/scheduler/scheduler_pmd.c index acdf6361..fcba1197 100644 --- a/drivers/crypto/scheduler/scheduler_pmd.c +++ b/drivers/crypto/scheduler/scheduler_pmd.c @@ -48,7 +48,8 @@ struct scheduler_init_params { uint32_t nb_slaves; enum rte_cryptodev_scheduler_mode mode; uint32_t enable_ordering; - uint64_t wcmask; + uint16_t wc_pool[RTE_MAX_LCORE]; + uint16_t nb_wc; char slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES] [RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; }; @@ -114,10 +115,6 @@ cryptodev_scheduler_create(const char *name, return -EFAULT; } - if (init_params->wcmask != 0) - RTE_LOG(INFO, PMD, " workers core mask = %"PRIx64"\n", - init_params->wcmask); - dev->driver_id = cryptodev_driver_id; dev->dev_ops = rte_crypto_scheduler_pmd_ops; @@ -128,15 +125,12 @@ cryptodev_scheduler_create(const char *name, if (init_params->mode == CDEV_SCHED_MODE_MULTICORE) { uint16_t i; - sched_ctx->nb_wc = 0; + sched_ctx->nb_wc = init_params->nb_wc; - for (i = 0; i < RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES; i++) { - if (init_params->wcmask & (1ULL << i)) { - sched_ctx->wc_pool[sched_ctx->nb_wc++] = i; - RTE_LOG(INFO, PMD, - " Worker core[%u]=%u added\n", - sched_ctx->nb_wc-1, i); - } + for (i = 0; i < sched_ctx->nb_wc; i++) { + sched_ctx->wc_pool[i] = init_params->wc_pool[i]; + RTE_LOG(INFO, PMD, " Worker core[%u]=%u added\n", + i, sched_ctx->wc_pool[i]); } } @@ -260,9 +254,47 @@ static int parse_coremask_arg(const char *key __rte_unused, const char *value, void *extra_args) { + int i, j, val; + uint16_t idx = 0; + char c; struct scheduler_init_params *params = extra_args; - params->wcmask = strtoull(value, NULL, 16); + params->nb_wc = 0; + + if (value == NULL) + return -1; + /* Remove all blank characters ahead and after . + * Remove 0x/0X if exists. + */ + while (isblank(*value)) + value++; + if (value[0] == '0' && ((value[1] == 'x') || (value[1] == 'X'))) + value += 2; + i = strlen(value); + while ((i > 0) && isblank(value[i - 1])) + i--; + + if (i == 0) + return -1; + + for (i = i - 1; i >= 0 && idx < RTE_MAX_LCORE; i--) { + c = value[i]; + if (isxdigit(c) == 0) { + /* invalid characters */ + return -1; + } + if (isdigit(c)) + val = c - '0'; + else if (isupper(c)) + val = c - 'A' + 10; + else + val = c - 'a' + 10; + + for (j = 0; j < 4 && idx < RTE_MAX_LCORE; j++, idx++) { + if ((1 << j) & val) + params->wc_pool[params->nb_wc++] = idx; + } + } return 0; } @@ -274,7 +306,7 @@ parse_corelist_arg(const char *key __rte_unused, { struct scheduler_init_params *params = extra_args; - params->wcmask = 0ULL; + params->nb_wc = 0; const char *token = value; @@ -282,7 +314,11 @@ parse_corelist_arg(const char *key __rte_unused, char *rval; unsigned int core = strtoul(token, &rval, 10); - params->wcmask |= 1ULL << core; + if (core >= RTE_MAX_LCORE) { + CS_LOG_ERR("Invalid worker core %u, should be smaller " + "than %u.\n", core, RTE_MAX_LCORE); + } + params->wc_pool[params->nb_wc++] = (uint16_t)core; token = (const char *)rval; if (token[0] == '\0') break; diff --git a/drivers/crypto/scheduler/scheduler_pmd_ops.c b/drivers/crypto/scheduler/scheduler_pmd_ops.c index d9b52352..75433db2 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_ops.c +++ b/drivers/crypto/scheduler/scheduler_pmd_ops.c @@ -74,6 +74,7 @@ scheduler_attach_init_slave(struct rte_cryptodev *dev) sched_ctx->init_slave_names[i]); rte_free(sched_ctx->init_slave_names[i]); + sched_ctx->init_slave_names[i] = NULL; sched_ctx->nb_init_slaves -= 1; } @@ -289,11 +290,15 @@ scheduler_pmd_close(struct rte_cryptodev *dev) } } - if (sched_ctx->private_ctx) + if (sched_ctx->private_ctx) { rte_free(sched_ctx->private_ctx); + sched_ctx->private_ctx = NULL; + } - if (sched_ctx->capabilities) + if (sched_ctx->capabilities) { rte_free(sched_ctx->capabilities); + sched_ctx->capabilities = NULL; + } return 0; } diff --git a/drivers/crypto/scheduler/scheduler_pmd_private.h b/drivers/crypto/scheduler/scheduler_pmd_private.h index e606716a..bab4334d 100644 --- a/drivers/crypto/scheduler/scheduler_pmd_private.h +++ b/drivers/crypto/scheduler/scheduler_pmd_private.h @@ -89,7 +89,7 @@ struct scheduler_ctx { char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN]; char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN]; - uint16_t wc_pool[RTE_CRYPTODEV_SCHEDULER_MAX_NB_WORKER_CORES]; + uint16_t wc_pool[RTE_MAX_LCORE]; uint16_t nb_wc; char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES]; diff --git a/drivers/crypto/zuc/rte_zuc_pmd.c b/drivers/crypto/zuc/rte_zuc_pmd.c index 590224bf..8b13be9e 100644 --- a/drivers/crypto/zuc/rte_zuc_pmd.c +++ b/drivers/crypto/zuc/rte_zuc_pmd.c @@ -40,7 +40,7 @@ #include "rte_zuc_pmd_private.h" -#define ZUC_MAX_BURST 8 +#define ZUC_MAX_BURST 4 #define BYTE_LEN 8 static uint8_t cryptodev_driver_id; @@ -196,10 +196,10 @@ zuc_get_session(struct zuc_qp *qp, struct rte_crypto_op *op) return sess; } -/** Encrypt/decrypt mbufs with same cipher key. */ +/** Encrypt/decrypt mbufs. */ static uint8_t process_zuc_cipher_op(struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -208,6 +208,7 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, uint8_t *iv[ZUC_MAX_BURST]; uint32_t num_bytes[ZUC_MAX_BURST]; uint8_t *cipher_keys[ZUC_MAX_BURST]; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0) @@ -218,6 +219,8 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, break; } + sess = sessions[i]; + #ifdef RTE_LIBRTE_PMD_ZUC_DEBUG if (!rte_pktmbuf_is_contiguous(ops[i]->sym->m_src) || (ops[i]->sym->m_dst != NULL && @@ -239,10 +242,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); iv[i] = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->cipher_iv_offset); + sess->cipher_iv_offset); num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; - cipher_keys[i] = session->pKey_cipher; + cipher_keys[i] = sess->pKey_cipher; processed_ops++; } @@ -253,10 +256,10 @@ process_zuc_cipher_op(struct rte_crypto_op **ops, return processed_ops; } -/** Generate/verify hash from mbufs with same hash key. */ +/** Generate/verify hash from mbufs. */ static int process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, - struct zuc_session *session, + struct zuc_session **sessions, uint8_t num_ops) { unsigned i; @@ -265,6 +268,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, uint32_t *dst; uint32_t length_in_bits; uint8_t *iv; + struct zuc_session *sess; for (i = 0; i < num_ops; i++) { /* Data must be byte aligned */ @@ -274,17 +278,19 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, break; } + sess = sessions[i]; + length_in_bits = ops[i]->sym->auth.data.length; src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->auth.data.offset >> 3); iv = rte_crypto_op_ctod_offset(ops[i], uint8_t *, - session->auth_iv_offset); + sess->auth_iv_offset); - if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { + if (sess->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { dst = (uint32_t *)qp->temp_digest; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); /* Verify digest. */ @@ -294,7 +300,7 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, } else { dst = (uint32_t *)ops[i]->sym->auth.digest.data; - sso_zuc_eia3_1_buffer(session->pKey_hash, + sso_zuc_eia3_1_buffer(sess->pKey_hash, iv, src, length_in_bits, dst); } @@ -304,33 +310,34 @@ process_zuc_hash_op(struct zuc_qp *qp, struct rte_crypto_op **ops, return processed_ops; } -/** Process a batch of crypto ops which shares the same session. */ +/** Process a batch of crypto ops which shares the same operation type. */ static int -process_ops(struct rte_crypto_op **ops, struct zuc_session *session, +process_ops(struct rte_crypto_op **ops, enum zuc_operation op_type, + struct zuc_session **sessions, struct zuc_qp *qp, uint8_t num_ops, uint16_t *accumulated_enqueued_ops) { unsigned i; unsigned enqueued_ops, processed_ops; - switch (session->op) { + switch (op_type) { case ZUC_OP_ONLY_CIPHER: processed_ops = process_zuc_cipher_op(ops, - session, num_ops); + sessions, num_ops); break; case ZUC_OP_ONLY_AUTH: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); break; case ZUC_OP_CIPHER_AUTH: - processed_ops = process_zuc_cipher_op(ops, session, + processed_ops = process_zuc_cipher_op(ops, sessions, num_ops); - process_zuc_hash_op(qp, ops, session, processed_ops); + process_zuc_hash_op(qp, ops, sessions, processed_ops); break; case ZUC_OP_AUTH_CIPHER: - processed_ops = process_zuc_hash_op(qp, ops, session, + processed_ops = process_zuc_hash_op(qp, ops, sessions, num_ops); - process_zuc_cipher_op(ops, session, processed_ops); + process_zuc_cipher_op(ops, sessions, processed_ops); break; default: /* Operation not supported. */ @@ -346,10 +353,10 @@ process_ops(struct rte_crypto_op **ops, struct zuc_session *session, ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; /* Free session if a session-less crypto op. */ if (ops[i]->sess_type == RTE_CRYPTO_OP_SESSIONLESS) { - memset(session, 0, sizeof(struct zuc_session)); + memset(sessions[i], 0, sizeof(struct zuc_session)); memset(ops[i]->sym->session, 0, rte_cryptodev_get_header_session_size()); - rte_mempool_put(qp->sess_mp, session); + rte_mempool_put(qp->sess_mp, sessions[i]); rte_mempool_put(qp->sess_mp, ops[i]->sym->session); ops[i]->sym->session = NULL; } @@ -370,7 +377,10 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct rte_crypto_op *c_ops[ZUC_MAX_BURST]; struct rte_crypto_op *curr_c_op; - struct zuc_session *prev_sess = NULL, *curr_sess = NULL; + struct zuc_session *curr_sess; + struct zuc_session *sessions[ZUC_MAX_BURST]; + enum zuc_operation prev_zuc_op = ZUC_OP_NOT_SUPPORTED; + enum zuc_operation curr_zuc_op; struct zuc_qp *qp = queue_pair; unsigned i; uint8_t burst_size = 0; @@ -380,9 +390,6 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, for (i = 0; i < nb_ops; i++) { curr_c_op = ops[i]; - /* Set status as enqueued (not processed yet) by default. */ - curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - curr_sess = zuc_get_session(qp, curr_c_op); if (unlikely(curr_sess == NULL || curr_sess->op == ZUC_OP_NOT_SUPPORTED)) { @@ -391,50 +398,63 @@ zuc_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, break; } - /* Batch ops that share the same session. */ - if (prev_sess == NULL) { - prev_sess = curr_sess; - c_ops[burst_size++] = curr_c_op; - } else if (curr_sess == prev_sess) { - c_ops[burst_size++] = curr_c_op; + curr_zuc_op = curr_sess->op; + + /* + * Batch ops that share the same operation type + * (cipher only, auth only...). + */ + if (burst_size == 0) { + prev_zuc_op = curr_zuc_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; + } else if (curr_zuc_op == prev_zuc_op) { + c_ops[burst_size] = curr_c_op; + sessions[burst_size] = curr_sess; + burst_size++; /* * When there are enough ops to process in a batch, * process them, and start a new batch. */ if (burst_size == ZUC_MAX_BURST) { - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, curr_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = NULL; } } else { /* - * Different session, process the ops - * of the previous session. + * Different operation type, process the ops + * of the previous type. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); if (processed_ops < burst_size) { burst_size = 0; break; } burst_size = 0; - prev_sess = curr_sess; + prev_zuc_op = curr_zuc_op; - c_ops[burst_size++] = curr_c_op; + c_ops[0] = curr_c_op; + sessions[0] = curr_sess; + burst_size++; } } if (burst_size != 0) { - /* Process the crypto ops of the last session. */ - processed_ops = process_ops(c_ops, prev_sess, - qp, burst_size, &enqueued_ops); + /* Process the crypto ops of the last operation type. */ + processed_ops = process_ops(c_ops, prev_zuc_op, + sessions, qp, burst_size, + &enqueued_ops); } qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; |