From 10bb21fb13aa74dcb0c2c0841d41a698bb274fbe Mon Sep 17 00:00:00 2001 From: Benoît Ganne Date: Wed, 8 Sep 2021 16:26:52 +0200 Subject: vlib: fix vlib_buffer_enqueue_to_next() overflow MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vlib_buffer_enqueue_to_next() requires to allow overflow of up to 63 elements of 'buffer' and 'nexts' array. - add helper to compute the minimum size - fix occurences in session and async crypto Type: fix Change-Id: If8d7eebc5bf9beba71ba194aec0f79b8eb6d5843 Signed-off-by: Benoît Ganne --- src/vlib/buffer_node.h | 25 +++++++++++++++++++++++++ src/vnet/crypto/node.c | 8 ++++---- src/vnet/session/session_node.c | 6 +++--- 3 files changed, 32 insertions(+), 7 deletions(-) diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h index 9ca43d425fc..10ebd253c1b 100644 --- a/src/vlib/buffer_node.h +++ b/src/vlib/buffer_node.h @@ -335,6 +335,17 @@ generic_buffer_node_inline (vlib_main_t * vm, return frame->n_vectors; } +/* Minimum size for the 'buffers' and 'nexts' arrays to be used when calling + * vlib_buffer_enqueue_to_next(). + * Because of optimizations, vlib_buffer_enqueue_to_next() will access + * past 'count' elements in the 'buffers' and 'nexts' arrays, IOW it + * will overflow. + * Those overflow elements are ignored in the final result so they do not + * need to be properly initialized, however if the array is allocated right + * before the end of a page and the next page is not mapped, accessing the + * overflow elements will trigger a segfault. */ +#define VLIB_BUFFER_ENQUEUE_MIN_SIZE(n) round_pow2 ((n), 64) + static_always_inline void vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, u16 * nexts, uword count) @@ -344,6 +355,20 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, (fn) (vm, node, buffers, nexts, count); } +static_always_inline void +vlib_buffer_enqueue_to_next_vec (vlib_main_t *vm, vlib_node_runtime_t *node, + u32 **buffers, u16 **nexts, uword count) +{ + const u32 bl = vec_len (*buffers), nl = vec_len (*nexts); + const u32 c = VLIB_BUFFER_ENQUEUE_MIN_SIZE (count); + ASSERT (bl >= count && nl >= count); + vec_validate (*buffers, c); + vec_validate (*nexts, c); + vlib_buffer_enqueue_to_next (vm, node, *buffers, *nexts, count); + vec_set_len (*buffers, bl); + vec_set_len (*nexts, nl); +} + static_always_inline void vlib_buffer_enqueue_to_single_next (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c index 7f34ec10fff..e753f1ad1db 100644 --- a/src/vnet/crypto/node.c +++ b/src/vnet/crypto/node.c @@ -114,8 +114,8 @@ crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node, n_cache += cf->n_elts; if (n_cache >= VLIB_FRAME_SIZE) { - vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indices, - ct->nexts, n_cache); + vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, + &ct->nexts, n_cache); n_cache = 0; } @@ -168,8 +168,8 @@ VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm, } /* *INDENT-ON* */ if (n_cache) - vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indices, ct->nexts, - n_cache); + vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts, + n_cache); return n_dispatched; } diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c index 72df07b1d6d..981fa66eae1 100644 --- a/src/vnet/session/session_node.c +++ b/src/vnet/session/session_node.c @@ -1660,9 +1660,9 @@ static void session_flush_pending_tx_buffers (session_worker_t * wrk, vlib_node_runtime_t * node) { - vlib_buffer_enqueue_to_next (wrk->vm, node, wrk->pending_tx_buffers, - wrk->pending_tx_nexts, - vec_len (wrk->pending_tx_nexts)); + vlib_buffer_enqueue_to_next_vec (wrk->vm, node, &wrk->pending_tx_buffers, + &wrk->pending_tx_nexts, + vec_len (wrk->pending_tx_nexts)); vec_reset_length (wrk->pending_tx_buffers); vec_reset_length (wrk->pending_tx_nexts); } -- cgit 1.2.3-korg