diff options
Diffstat (limited to 'src/vnet/crypto/node.c')
-rw-r--r-- | src/vnet/crypto/node.c | 102 |
1 files changed, 60 insertions, 42 deletions
diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c index 51ee63d1d62..12e6033ad80 100644 --- a/src/vnet/crypto/node.c +++ b/src/vnet/crypto/node.c @@ -74,60 +74,78 @@ vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node, static_always_inline u32 crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_thread_t * ct, - vnet_crypto_frame_dequeue_t * hdl, - u32 n_cache, u32 * n_total) + vnet_crypto_frame_dequeue_t * hdl, u32 n_cache, + u32 * n_total) { - vnet_crypto_async_frame_t *cf = (hdl) (vm); + vnet_crypto_main_t *cm = &crypto_main; + u32 n_elts = 0; + u32 enqueue_thread_idx = ~0; + vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx); + *n_total += n_elts; - while (cf) + while (cf || n_elts) { - vec_validate (ct->buffer_indice, n_cache + cf->n_elts); - vec_validate (ct->nexts, n_cache + cf->n_elts); - clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices, - sizeof (u32) * cf->n_elts); - if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS) - { - clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index, - sizeof (u16) * cf->n_elts); - } - else + if (cf) { - u32 i; - for (i = 0; i < cf->n_elts; i++) + vec_validate (ct->buffer_indice, n_cache + cf->n_elts); + vec_validate (ct->nexts, n_cache + cf->n_elts); + clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices, + sizeof (u32) * cf->n_elts); + if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS) + { + clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index, + sizeof (u16) * cf->n_elts); + } + else { - if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED) + u32 i; + for (i = 0; i < cf->n_elts; i++) { - ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP; - vlib_node_increment_counter (vm, node->node_index, - cf->elts[i].status, 1); + if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP; + vlib_node_increment_counter (vm, node->node_index, + cf->elts[i].status, 1); + } + else + ct->nexts[i + n_cache] = cf->next_node_index[i]; } - else - ct->nexts[i + n_cache] = cf->next_node_index[i]; } - } - n_cache += cf->n_elts; - *n_total += cf->n_elts; - if (n_cache >= VLIB_FRAME_SIZE) - { - vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, ct->nexts, - n_cache); - n_cache = 0; - } - - if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) - { - u32 i; + n_cache += cf->n_elts; + if (n_cache >= VLIB_FRAME_SIZE) + { + vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, + ct->nexts, n_cache); + n_cache = 0; + } - for (i = 0; i < cf->n_elts; i++) + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE)) { - vlib_buffer_t *b = vlib_get_buffer (vm, cf->buffer_indices[i]); - if (b->flags & VLIB_BUFFER_IS_TRACED) - vnet_crypto_async_add_trace (vm, node, b, cf->op, - cf->elts[i].status); + u32 i; + + for (i = 0; i < cf->n_elts; i++) + { + vlib_buffer_t *b = vlib_get_buffer (vm, + cf->buffer_indices[i]); + if (b->flags & VLIB_BUFFER_IS_TRACED) + vnet_crypto_async_add_trace (vm, node, b, cf->op, + cf->elts[i].status); + } } + vnet_crypto_async_free_frame (vm, cf); + } + /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */ + if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT + && n_elts > 0) + { + vlib_node_set_interrupt_pending (vlib_mains[enqueue_thread_idx], + cm->crypto_node_index); } - vnet_crypto_async_free_frame (vm, cf); - cf = (hdl) (vm); + + n_elts = 0; + enqueue_thread_idx = 0; + cf = (hdl) (vm, &n_elts, &enqueue_thread_idx); + *n_total += n_elts; } return n_cache; |