diff options
author | Piotr Bronowski <piotrx.bronowski@intel.com> | 2023-09-14 17:41:13 +0000 |
---|---|---|
committer | Andrew Yourtchenko <ayourtch@gmail.com> | 2023-10-25 17:18:40 +0000 |
commit | 74209bac286d10d39b9fa6f3e673ff89713e734f (patch) | |
tree | 2e342c5bb9cd28b2cc2426abb2663ce4cc503cd2 /src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c | |
parent | 7c4027fa5e42a8cc7176cd62ab7a0043fb1933ff (diff) |
dpdk-cryptodev: improve dequeue behavior, fix cache stats logging
This patch provides minor improvements to the logic governing dequeuing
from the ring. Previously whenever a frame was dequeued
we've been trying to dequeue from the ring another one till
inflight == 0. Now threshold is set for 8 frames pending in the cache
to be consumed by the vnet. This threshold has been chosen based on
cache ring stats observation in the system under load.
Some unnecessary logic for setting deq_tail has been removed.
Also logging has been corrected, and cache ring logic simplied.
Type: improvement
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: I19f3daf5913006e9cb23e142a163f596e85f5bda
(cherry picked from commit 7cc17f6df9b3f4b45aaac16ba0aa098d6cd58794)
Diffstat (limited to 'src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c')
-rw-r--r-- | src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c | 49 |
1 files changed, 19 insertions, 30 deletions
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c index 19291eb3b59..9ac0f940948 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c +++ b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c @@ -118,6 +118,9 @@ cryptodev_frame_linked_algs_enqueue (vlib_main_t *vm, cryptodev_cache_ring_elt_t *ring_elt = cryptodev_cache_ring_push (ring, frame); + if (PREDICT_FALSE (ring_elt == NULL)) + return -1; + ring_elt->aad_len = 1; ring_elt->op_type = (u8) op_type; return 0; @@ -272,6 +275,9 @@ cryptodev_raw_aead_enqueue (vlib_main_t *vm, vnet_crypto_async_frame_t *frame, cryptodev_cache_ring_elt_t *ring_elt = cryptodev_cache_ring_push (ring, frame); + if (PREDICT_FALSE (ring_elt == NULL)) + return -1; + ring_elt->aad_len = aad_len; ring_elt->op_type = (u8) op_type; return 0; @@ -466,32 +472,17 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, cryptodev_cache_ring_t *ring = &cet->cache_ring; u16 *const deq = &ring->deq_tail; u32 n_success; - u16 n_deq, indice, i, left_to_deq; + u16 n_deq, i, left_to_deq; u16 max_to_deq = 0; u16 inflight = cet->inflight; u8 dequeue_more = 0; int dequeue_status; - indice = *deq; - - for (i = 0; i < VNET_CRYPTO_FRAME_POOL_SIZE; i++) - { - if (PREDICT_TRUE ( - CRYPTODEV_CACHE_RING_GET_FRAME_ELTS_INFLIGHT (ring, indice) > 0)) - break; - indice += 1; - indice &= CRYPTODEV_CACHE_QUEUE_MASK; - } - - ERROR_ASSERT (i != VNET_CRYPTO_FRAME_POOL_SIZE); - - *deq = indice; - left_to_deq = ring->frames[*deq].n_elts - ring->frames[*deq].deq_elts_tail; max_to_deq = clib_min (left_to_deq, CRYPTODE_DEQ_MAX); - /* you can use deq field to track frame that is currently dequeued */ - /* based on that you can specify the amount of elements to deq for the frame + /* deq field can be used to track frame that is currently dequeued */ + /* based on thatthe amount of elements to deq for the frame can be specified */ n_deq = rte_cryptodev_raw_dequeue_burst ( @@ -516,9 +507,13 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, if (cryptodev_cache_ring_update_deq_tail (ring, deq)) { + u32 fr_processed = + (CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) & + CRYPTODEV_CACHE_QUEUE_MASK; + *nb_elts_processed = frame->n_elts; *enqueue_thread_idx = frame->enqueue_thread_index; - dequeue_more = max_to_deq < CRYPTODE_DEQ_MAX; + dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE); } int res = @@ -555,24 +550,18 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, u8 dequeue_more = 1; while (cet->inflight > 0 && dequeue_more) - { dequeue_more = cryptodev_raw_dequeue_internal (vm, nb_elts_processed, enqueue_thread_idx); - } if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0)) cryptodev_enqueue_frame_to_qat (vm, &ring->frames[ring->enq_head]); - if (PREDICT_TRUE (ring_elt->f != 0)) + if (PREDICT_TRUE (ring_elt->f != 0) && + (ring_elt->n_elts == ring_elt->deq_elts_tail)) { - if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail) - { - vlib_node_set_interrupt_pending ( - vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index); - ret_frame = cryptodev_cache_ring_pop (ring); - - return ret_frame; - } + vlib_node_set_interrupt_pending ( + vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index); + ret_frame = cryptodev_cache_ring_pop (ring); } return ret_frame; |