diff options
author | Piotr Bronowski <piotrx.bronowski@intel.com> | 2023-12-04 14:47:48 +0000 |
---|---|---|
committer | Fan Zhang <fanzhang.oss@gmail.com> | 2023-12-07 15:45:36 +0000 |
commit | 864eaae5bb648ad18b320f9ae9043eed440767db (patch) | |
tree | 72214edf249830c259920143172ec46bff418ee3 | |
parent | caf12e6ab6d01854ee6399fec3dbb578eba1e596 (diff) |
dpdk-cryptodev: fix crypto-dispatch node stats
This patch introduces a fix for correcting a counter for the number
of processed vectors in the crypto-dispatch node.
Type: fix
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: Icaeb925a352a9ac766652f43c4e752f6727cdeb9
-rw-r--r-- | src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c | 10 | ||||
-rw-r--r-- | src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c | 8 |
2 files changed, 7 insertions, 11 deletions
diff --git a/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c index d0f093e1414..a3e66b25653 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c +++ b/src/plugins/dpdk/cryptodev/cryptodev_op_data_path.c @@ -461,8 +461,7 @@ error_exit: } static_always_inline u8 -cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, - u32 *enqueue_thread_idx) +cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx) { cryptodev_main_t *cmt = &cryptodev_main; cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; @@ -541,7 +540,6 @@ cryptodev_frame_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, (CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) & CRYPTODEV_CACHE_QUEUE_MASK; - *nb_elts_processed = frame->n_elts; *enqueue_thread_idx = frame->enqueue_thread_index; dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE); } @@ -578,8 +576,7 @@ cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, while (cet->inflight > 0 && dequeue_more) { - dequeue_more = cryptodev_frame_dequeue_internal (vm, nb_elts_processed, - enqueue_thread_idx); + dequeue_more = cryptodev_frame_dequeue_internal (vm, enqueue_thread_idx); } if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0)) @@ -587,8 +584,9 @@ cryptodev_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, if (PREDICT_TRUE (ring_elt->f != 0)) { - if (ring_elt->enq_elts_head == ring_elt->deq_elts_tail) + if (ring_elt->n_elts == ring_elt->deq_elts_tail) { + *nb_elts_processed = ring_elt->n_elts; vlib_node_set_interrupt_pending ( vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index); ret_frame = cryptodev_cache_ring_pop (ring); diff --git a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c index 9ac0f940948..67ab9c89e67 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c +++ b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c @@ -463,8 +463,7 @@ cryptodev_post_dequeue (void *frame, u32 index, u8 is_op_success) } static_always_inline u8 -cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, - u32 *enqueue_thread_idx) +cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *enqueue_thread_idx) { cryptodev_main_t *cmt = &cryptodev_main; cryptodev_engine_thread_t *cet = cmt->per_thread_data + vm->thread_index; @@ -511,7 +510,6 @@ cryptodev_raw_dequeue_internal (vlib_main_t *vm, u32 *nb_elts_processed, (CRYPTODEV_CACHE_QUEUE_SIZE - ring->tail + ring->deq_tail) & CRYPTODEV_CACHE_QUEUE_MASK; - *nb_elts_processed = frame->n_elts; *enqueue_thread_idx = frame->enqueue_thread_index; dequeue_more = (fr_processed < CRYPTODEV_MAX_PROCESED_IN_CACHE_QUEUE); } @@ -550,8 +548,7 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, u8 dequeue_more = 1; while (cet->inflight > 0 && dequeue_more) - dequeue_more = cryptodev_raw_dequeue_internal (vm, nb_elts_processed, - enqueue_thread_idx); + dequeue_more = cryptodev_raw_dequeue_internal (vm, enqueue_thread_idx); if (PREDICT_TRUE (ring->frames[ring->enq_head].f != 0)) cryptodev_enqueue_frame_to_qat (vm, &ring->frames[ring->enq_head]); @@ -559,6 +556,7 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, if (PREDICT_TRUE (ring_elt->f != 0) && (ring_elt->n_elts == ring_elt->deq_elts_tail)) { + *nb_elts_processed = ring_elt->n_elts; vlib_node_set_interrupt_pending ( vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index); ret_frame = cryptodev_cache_ring_pop (ring); |