aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKlement Sekera <ksekera@cisco.com>2019-02-13 11:01:32 +0100
committerKlement Sekera <ksekera@cisco.com>2019-02-27 12:48:13 +0100
commit372a33efe8b8cc941c6313a70d5050ddc6f26259 (patch)
treecb1bdd47909cddc4627b7e1e91fe6e8e6b19dbb2
parent7f1cc2c2c105f487b02e4d46bff8638b914fd106 (diff)
buffer chain linearization
Rewrite vlib_buffer_chain_linearize function so that it works as intended. Linearize buffer chains coming out of reassembly to work around some dpdk-tx issues. Note that this is not a complete workaround as a sufficiently large packet will still cause the resulting chain to be too long. Drop features from reassembly code which relies on knowing which and how many buffers were freed during linearization, buffer counts and tracing capabilities for these cases. Change-Id: Ic65de53ecb5c78cd96b178033f6a576ab4060ed1 Signed-off-by: Klement Sekera <ksekera@cisco.com>
-rw-r--r--src/vlib/buffer_funcs.h226
-rw-r--r--src/vlib/physmem_funcs.h5
-rw-r--r--src/vnet/dhcp/dhcp4_proxy_node.c13
-rw-r--r--src/vnet/ip/ip4_error.h1
-rw-r--r--src/vnet/ip/ip4_reassembly.c198
-rw-r--r--src/vnet/ip/ip6_reassembly.c215
6 files changed, 216 insertions, 442 deletions
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index b561a91c394..0b15a23e8a9 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -42,6 +42,10 @@
#include <vppinfra/hash.h>
#include <vppinfra/fifo.h>
+#include <vlib/buffer.h>
+#include <vlib/physmem_funcs.h>
+#include <vlib/main.h>
+#include <vlib/node.h>
/** \file
vlib buffer access methods.
@@ -1130,131 +1134,141 @@ vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
#endif
}
-/** minimum data size of first buffer in a buffer chain */
-#define VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE (256)
+always_inline u32
+vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b)
+{
+ return b->data + VLIB_BUFFER_DATA_SIZE -
+ ((u8 *) vlib_buffer_get_current (b) + b->current_length);
+}
-/**
- * @brief compress buffer chain in a way where the first buffer is at least
- * VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE long
- *
- * @param[in] vm - vlib_main
- * @param[in,out] first - first buffer in chain
- * @param[in,out] discard_vector - vector of buffer indexes which were removed
- * from the chain
- */
-always_inline void
-vlib_buffer_chain_compress (vlib_main_t * vm,
- vlib_buffer_t * first, u32 ** discard_vector)
+always_inline u32
+vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b)
{
- if (first->current_length >= VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE ||
- !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
+ vlib_buffer_t *db = b, *sb, *first = b;
+ int is_cloned = 0;
+ u32 bytes_left = 0, data_size;
+ u16 src_left, dst_left, n_buffers = 1;
+ u8 *dp, *sp;
+ u32 to_free = 0;
+
+ if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
+ return 1;
+
+ data_size = VLIB_BUFFER_DATA_SIZE;
+
+ dst_left = vlib_buffer_space_left_at_end (vm, b);
+
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
- /* this is already big enough or not a chain */
- return;
+ b = vlib_get_buffer (vm, b->next_buffer);
+ if (b->n_add_refs > 0)
+ is_cloned = 1;
+ bytes_left += b->current_length;
+ n_buffers++;
}
- /* probe free list to find allocated buffer size to avoid overfill */
- vlib_buffer_free_list_index_t index;
- vlib_buffer_free_list_t *free_list =
- vlib_buffer_get_buffer_free_list (vm, first, &index);
-
- u32 want_first_size = clib_min (VLIB_BUFFER_CHAIN_MIN_FIRST_DATA_SIZE,
- free_list->n_data_bytes -
- first->current_data);
- do
+
+ /* if buffer is cloned, create completely new chain - unless everything fits
+ * into one buffer */
+ if (is_cloned && bytes_left >= dst_left)
{
- vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
- u32 need = want_first_size - first->current_length;
- u32 amount_to_copy = clib_min (need, second->current_length);
- clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
- first->current_length,
- vlib_buffer_get_current (second), amount_to_copy);
- first->current_length += amount_to_copy;
- second->current_data += amount_to_copy;
- second->current_length -= amount_to_copy;
- if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
- {
- first->total_length_not_including_first_buffer -= amount_to_copy;
- }
- if (!second->current_length)
+ u32 len = 0;
+ u32 space_needed = bytes_left - dst_left;
+ u32 tail;
+
+ if (vlib_buffer_alloc (vm, &tail, 1) == 0)
+ return 0;
+
+ ++n_buffers;
+ len += data_size;
+ b = vlib_get_buffer (vm, tail);
+
+ while (len < space_needed)
{
- vec_add1 (*discard_vector, first->next_buffer);
- if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
+ u32 bi;
+ if (vlib_buffer_alloc (vm, &bi, 1) == 0)
{
- first->next_buffer = second->next_buffer;
+ vlib_buffer_free_one (vm, tail);
+ return 0;
}
- else
- {
- first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- }
- second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ b->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b->next_buffer = bi;
+ b = vlib_get_buffer (vm, bi);
+ len += data_size;
+ n_buffers++;
}
+ sb = vlib_get_buffer (vm, first->next_buffer);
+ to_free = first->next_buffer;
+ first->next_buffer = tail;
}
- while ((first->current_length < want_first_size) &&
- (first->flags & VLIB_BUFFER_NEXT_PRESENT));
-}
+ else
+ sb = vlib_get_buffer (vm, first->next_buffer);
-/**
- * @brief linearize buffer chain - the first buffer is filled, if needed,
- * buffers are allocated and filled, returns free space in last buffer or
- * negative on failure
- *
- * @param[in] vm - vlib_main
- * @param[in,out] first - first buffer in chain
- */
-always_inline int
-vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
-{
- vlib_buffer_t *b = first;
- vlib_buffer_free_list_t *fl =
- vlib_buffer_get_free_list (vm, vlib_buffer_get_free_list_index (b));
- u32 buf_len = fl->n_data_bytes;
- // free buffer chain starting from the second buffer
- int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
- u32 chain_to_free = b->next_buffer;
-
- u32 len = vlib_buffer_length_in_chain (vm, b);
- u32 free_len = buf_len - b->current_data - b->current_length;
- int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
- int n_buffers = (alloc_len + buf_len - 1) / buf_len;
- u32 new_buffers[n_buffers];
+ src_left = sb->current_length;
+ sp = vlib_buffer_get_current (sb);
+ dp = vlib_buffer_get_tail (db);
- u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
- if (n_alloc != n_buffers)
+ while (bytes_left)
{
- vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
- return -1;
+ u16 bytes_to_copy;
+
+ if (dst_left == 0)
+ {
+ if (db != first)
+ db->current_data = 0;
+ db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
+ ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT);
+ db = vlib_get_buffer (vm, db->next_buffer);
+ dst_left = data_size;
+ dp = db->data;
+ }
+
+ while (src_left == 0)
+ {
+ ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT);
+ sb = vlib_get_buffer (vm, sb->next_buffer);
+ src_left = sb->current_length;
+ sp = vlib_buffer_get_current (sb);
+ }
+
+ bytes_to_copy = clib_min (dst_left, src_left);
+
+ if (dp != sp)
+ {
+ if (sb == db)
+ bytes_to_copy = clib_min (bytes_to_copy, sp - dp);
+
+ clib_memcpy_fast (dp, sp, bytes_to_copy);
+ }
+
+ src_left -= bytes_to_copy;
+ dst_left -= bytes_to_copy;
+ dp += bytes_to_copy;
+ sp += bytes_to_copy;
+ bytes_left -= bytes_to_copy;
}
+ if (db != first)
+ db->current_data = 0;
+ db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
- vlib_buffer_t *s = b;
- while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
+ if (is_cloned && to_free)
+ vlib_buffer_free_one (vm, to_free);
+ else
{
- s = vlib_get_buffer (vm, s->next_buffer);
- int d_free_len = buf_len - b->current_data - b->current_length;
- ASSERT (d_free_len >= 0);
- // chain buf and split write
- u32 copy_len = clib_min (d_free_len, s->current_length);
- u8 *d = vlib_buffer_put_uninit (b, copy_len);
- clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
- int rest = s->current_length - copy_len;
- if (rest > 0)
+ if (db->flags & VLIB_BUFFER_NEXT_PRESENT)
+ vlib_buffer_free_one (vm, db->next_buffer);
+ db->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ b = first;
+ n_buffers = 1;
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
- //prev buf is full
- ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
- ASSERT (n_buffers > 0);
- b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
- //make full use of the new buffers
- b->current_data = 0;
- d = vlib_buffer_put_uninit (b, rest);
- clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
+ b = vlib_get_buffer (vm, b->next_buffer);
+ ++n_buffers;
}
}
- vlib_buffer_free (vm, &chain_to_free, free_count);
- b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
- if (b == first) /* no buffers addeed */
- b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- ASSERT (len == vlib_buffer_length_in_chain (vm, first));
- ASSERT (n_buffers == 0);
- return buf_len - b->current_data - b->current_length;
+
+ first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ return n_buffers;
}
#endif /* included_vlib_buffer_funcs_h */
diff --git a/src/vlib/physmem_funcs.h b/src/vlib/physmem_funcs.h
index 18daeebabd9..d93bae8fbe5 100644
--- a/src/vlib/physmem_funcs.h
+++ b/src/vlib/physmem_funcs.h
@@ -40,6 +40,11 @@
#ifndef included_vlib_physmem_funcs_h
#define included_vlib_physmem_funcs_h
+#include <vppinfra/clib.h>
+#include <vppinfra/clib_error.h>
+#include <vlib/physmem.h>
+#include <vlib/main.h>
+
clib_error_t *vlib_physmem_init (vlib_main_t * vm);
clib_error_t *vlib_physmem_shared_map_create (vlib_main_t * vm, char *name,
uword size, u32 log2_page_sz,
diff --git a/src/vnet/dhcp/dhcp4_proxy_node.c b/src/vnet/dhcp/dhcp4_proxy_node.c
index d5ddfdc86f2..f72927441ed 100644
--- a/src/vnet/dhcp/dhcp4_proxy_node.c
+++ b/src/vnet/dhcp/dhcp4_proxy_node.c
@@ -181,7 +181,14 @@ dhcp_proxy_to_server_input (vlib_main_t * vm,
goto do_trace;
}
- space_left = vlib_buffer_chain_linearize (vm, b0);
+ if (!vlib_buffer_chain_linearize (vm, b0))
+ {
+ error0 = DHCP_PROXY_ERROR_PKT_TOO_BIG;
+ next0 = DHCP_PROXY_TO_SERVER_INPUT_NEXT_DROP;
+ pkts_too_big++;
+ goto do_trace;
+ }
+ space_left = vlib_buffer_space_left_at_end (vm, b0);
/* cant parse chains...
* and we need some space for option 82*/
if ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) != 0 ||
@@ -530,8 +537,8 @@ dhcp_proxy_to_client_input (vlib_main_t * vm,
if (1 /* dpm->insert_option_82 */ )
{
/* linearize needed to "unclone" and scan options */
- int space_left = vlib_buffer_chain_linearize (vm, b0);
- if ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) != 0 || space_left < 0)
+ int rv = vlib_buffer_chain_linearize (vm, b0);
+ if ((b0->flags & VLIB_BUFFER_NEXT_PRESENT) != 0 || !rv)
{
error0 = DHCP_PROXY_ERROR_PKT_TOO_BIG;
goto drop_packet;
diff --git a/src/vnet/ip/ip4_error.h b/src/vnet/ip/ip4_error.h
index 338d91ec0ae..badcc6609e9 100644
--- a/src/vnet/ip/ip4_error.h
+++ b/src/vnet/ip/ip4_error.h
@@ -86,7 +86,6 @@
/* Errors signalled by ip4-reassembly */ \
_ (REASS_DUPLICATE_FRAGMENT, "duplicate/overlapping fragments") \
_ (REASS_LIMIT_REACHED, "drops due to concurrent reassemblies limit") \
- _ (REASS_TIMEOUT, "fragments dropped due to reassembly timeout") \
_ (REASS_MALFORMED_PACKET, "malformed packets") \
_ (REASS_INTERNAL_ERROR, "drops due to internal reassembly error")
diff --git a/src/vnet/ip/ip4_reassembly.c b/src/vnet/ip/ip4_reassembly.c
index 4bf39144ddb..b54279c7ab7 100644
--- a/src/vnet/ip/ip4_reassembly.c
+++ b/src/vnet/ip/ip4_reassembly.c
@@ -58,6 +58,7 @@ typedef enum
{
IP4_REASS_RC_OK,
IP4_REASS_RC_INTERNAL_ERROR,
+ IP4_REASS_RC_NO_BUF,
} ip4_reass_rc_t;
typedef struct
@@ -118,7 +119,6 @@ typedef struct
{
ip4_reass_t *pool;
u32 reass_n;
- u32 buffers_n;
u32 id_counter;
clib_spinlock_t lock;
} ip4_reass_per_thread_t;
@@ -292,11 +292,12 @@ ip4_reass_free (ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
always_inline void
ip4_reass_on_timeout (vlib_main_t * vm, ip4_reass_main_t * rm,
- ip4_reass_t * reass, u32 ** vec_drop_timeout)
+ ip4_reass_t * reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
vnet_buffer_opaque_t *range_vnb;
+ u32 *to_free = NULL;
while (~0 != range_bi)
{
range_b = vlib_get_buffer (vm, range_bi);
@@ -304,7 +305,7 @@ ip4_reass_on_timeout (vlib_main_t * vm, ip4_reass_main_t * rm,
u32 bi = range_bi;
while (~0 != bi)
{
- vec_add1 (*vec_drop_timeout, bi);
+ vec_add1 (to_free, bi);
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
@@ -318,12 +319,13 @@ ip4_reass_on_timeout (vlib_main_t * vm, ip4_reass_main_t * rm,
}
range_bi = range_vnb->ip.reass.next_range_bi;
}
+ vlib_buffer_free (vm, to_free, vec_len (to_free));
+ vec_free (to_free);
}
ip4_reass_t *
ip4_reass_find_or_create (vlib_main_t * vm, ip4_reass_main_t * rm,
- ip4_reass_per_thread_t * rt,
- ip4_reass_key_t * k, u32 ** vec_drop_timeout)
+ ip4_reass_per_thread_t * rt, ip4_reass_key_t * k)
{
ip4_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
@@ -336,7 +338,7 @@ ip4_reass_find_or_create (vlib_main_t * vm, ip4_reass_main_t * rm,
reass = pool_elt_at_index (rt->pool, value.value);
if (now > reass->last_heard + rm->timeout)
{
- ip4_reass_on_timeout (vm, rm, reass, vec_drop_timeout);
+ ip4_reass_on_timeout (vm, rm, reass);
ip4_reass_free (rm, rt, reass);
reass = NULL;
}
@@ -383,16 +385,14 @@ ip4_reass_find_or_create (vlib_main_t * vm, ip4_reass_main_t * rm,
always_inline ip4_reass_rc_t
ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
- ip4_reass_t * reass, u32 * bi0, u32 * next0,
- u32 * error0, u32 ** vec_drop_compress,
- u32 ** vec_drop_overlap, bool is_feature)
+ ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
+ bool is_feature)
{
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
vlib_buffer_t *last_b = NULL;
u32 sub_chain_bi = reass->first_bi;
u32 total_length = 0;
u32 buf_cnt = 0;
- u32 dropped_cnt = 0;
do
{
u32 tmp_bi = sub_chain_bi;
@@ -435,8 +435,7 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
if (trim_front > tmp->current_length)
{
/* drop whole buffer */
- vec_add1 (*vec_drop_compress, tmp_bi);
- ++dropped_cnt;
+ vlib_buffer_free_one (vm, tmp_bi);
trim_front -= tmp->current_length;
if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
{
@@ -478,12 +477,11 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
}
else
{
- vec_add1 (*vec_drop_overlap, tmp_bi);
+ vlib_buffer_free_one (vm, tmp_bi);
if (reass->first_bi == tmp_bi)
{
return IP4_REASS_RC_INTERNAL_ERROR;
}
- ++dropped_cnt;
}
if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
{
@@ -506,7 +504,6 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
return IP4_REASS_RC_INTERNAL_ERROR;
}
last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- rt->buffers_n -= buf_cnt - dropped_cnt;
if (total_length < first_b->current_length)
{
return IP4_REASS_RC_INTERNAL_ERROR;
@@ -518,9 +515,10 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip->flags_and_fragment_offset = 0;
ip->length = clib_host_to_net_u16 (first_b->current_length + total_length);
ip->checksum = ip4_header_checksum (ip);
- u32 before = vec_len (*vec_drop_compress);
- vlib_buffer_chain_compress (vm, first_b, vec_drop_compress);
- rt->buffers_n += vec_len (*vec_drop_compress) - before;
+ if (!vlib_buffer_chain_linearize (vm, first_b))
+ {
+ return IP4_REASS_RC_NO_BUF;
+ }
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -568,25 +566,6 @@ ip4_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
return IP4_REASS_RC_OK;
}
-always_inline u32
-ip4_reass_get_buffer_chain_length (vlib_main_t * vm, vlib_buffer_t * b)
-{
- u32 len = 0;
- while (b)
- {
- ++len;
- if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
- {
- b = vlib_get_buffer (vm, b->next_buffer);
- }
- else
- {
- break;
- }
- }
- return len;
-}
-
always_inline ip4_reass_rc_t
ip4_reass_insert_range_in_chain (vlib_main_t * vm,
ip4_reass_main_t * rm,
@@ -618,7 +597,6 @@ ip4_reass_insert_range_in_chain (vlib_main_t * vm,
return IP4_REASS_RC_INTERNAL_ERROR;
}
reass->data_len += ip4_reass_buffer_get_data_len (new_next_b);
- rt->buffers_n += ip4_reass_get_buffer_chain_length (vm, new_next_b);
return IP4_REASS_RC_OK;
}
@@ -626,7 +604,6 @@ always_inline ip4_reass_rc_t
ip4_reass_remove_range_from_chain (vlib_main_t * vm,
vlib_node_runtime_t * node,
ip4_reass_main_t * rm,
- u32 ** vec_drop_overlap,
ip4_reass_t * reass, u32 prev_range_bi,
u32 discard_bi)
{
@@ -655,7 +632,7 @@ ip4_reass_remove_range_from_chain (vlib_main_t * vm,
reass->data_len -= ip4_reass_buffer_get_data_len (discard_b);
while (1)
{
- vec_add1 (*vec_drop_overlap, discard_bi);
+ vlib_buffer_free_one (vm, discard_bi);
if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
{
ip4_reass_add_trace (vm, node, rm, reass, discard_bi, RANGE_DISCARD,
@@ -678,9 +655,8 @@ ip4_reass_remove_range_from_chain (vlib_main_t * vm,
always_inline ip4_reass_rc_t
ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip4_reass_main_t * rm, ip4_reass_per_thread_t * rt,
- ip4_reass_t * reass, u32 * bi0, u32 * next0,
- u32 * error0, u32 ** vec_drop_overlap,
- u32 ** vec_drop_compress, bool is_feature)
+ ip4_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
+ bool is_feature)
{
ip4_reass_rc_t rc = IP4_REASS_RC_OK;
int consumed = 0;
@@ -849,8 +825,7 @@ ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
// discard candidate range, probe next range
rc =
- ip4_reass_remove_range_from_chain (vm, node, rm,
- vec_drop_overlap, reass,
+ ip4_reass_remove_range_from_chain (vm, node, rm, reass,
prev_range_bi,
candidate_range_bi);
if (IP4_REASS_RC_OK != rc)
@@ -889,7 +864,6 @@ ip4_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
reass->data_len == reass->last_packet_octet + 1)
{
return ip4_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
- vec_drop_compress, vec_drop_overlap,
is_feature);
}
else
@@ -920,74 +894,10 @@ ip4_reassembly_inline (vlib_main_t * vm,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- static u32 *vec_drop_timeout = NULL; // indexes of buffers which timed out
- static u32 *vec_drop_overlap = NULL; // indexes of buffers which were discarded due to overlap
- static u32 *vec_drop_internal_error = NULL; // indexes of buffers which were discarded due to internal errors
- static u32 *vec_drop_compress = NULL; // indexes of buffers dicarded due to buffer compression
- while (n_left_from > 0 || vec_len (vec_drop_timeout) > 0
- || vec_len (vec_drop_overlap) > 0 || vec_len (vec_drop_compress) > 0
- || vec_len (vec_drop_internal_error) > 0)
+ while (n_left_from > 0)
{
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (vec_len (vec_drop_timeout) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_timeout);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP4_ERROR_REASS_TIMEOUT];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP4_REASSEMBLY_NEXT_DROP);
- IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_timeout);
- --rt->buffers_n;
- }
-
- while (vec_len (vec_drop_overlap) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_overlap);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP4_ERROR_REASS_DUPLICATE_FRAGMENT];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP4_REASSEMBLY_NEXT_DROP);
- IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_duplicate_fragment);
- --rt->buffers_n;
- }
-
- while (vec_len (vec_drop_compress) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_compress);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP4_ERROR_NONE];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP4_REASSEMBLY_NEXT_DROP);
- IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_compress);
- --rt->buffers_n;
- }
- while (vec_len (vec_drop_internal_error) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_internal_error);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP4_ERROR_REASS_INTERNAL_ERROR];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP4_REASSEMBLY_NEXT_DROP);
- IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_internal_error);
- --rt->buffers_n;
- }
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 bi0;
@@ -1033,23 +943,22 @@ ip4_reassembly_inline (vlib_main_t * vm,
(u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
ip4_reass_t *reass =
- ip4_reass_find_or_create (vm, rm, rt, &k,
- &vec_drop_timeout);
+ ip4_reass_find_or_create (vm, rm, rt, &k);
if (reass)
{
switch (ip4_reass_update
(vm, node, rm, rt, reass, &bi0, &next0, &error0,
- &vec_drop_overlap, &vec_drop_compress,
is_feature))
{
case IP4_REASS_RC_OK:
/* nothing to do here */
break;
+ case IP4_REASS_RC_NO_BUF:
+ /* fallthrough */
case IP4_REASS_RC_INTERNAL_ERROR:
/* drop everything and start with a clean slate */
- ip4_reass_on_timeout (vm, rm, reass,
- &vec_drop_internal_error);
+ ip4_reass_on_timeout (vm, rm, reass);
ip4_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1321,7 +1230,6 @@ ip4_reass_walk_expired (vlib_main_t * vm,
f64 now = vlib_time_now (vm);
ip4_reass_t *reass;
- u32 *vec_drop_timeout = NULL;
int *pool_indexes_to_free = NULL;
uword thread_index = 0;
@@ -1347,20 +1255,7 @@ ip4_reass_walk_expired (vlib_main_t * vm,
vec_foreach (i, pool_indexes_to_free)
{
ip4_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
- u32 before = vec_len (vec_drop_timeout);
- vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
- if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
- {
- if (pool_is_free_index (vm->trace_main.trace_buffer_pool,
- b->trace_index))
- {
- /* the trace is gone, don't trace this buffer anymore */
- b->flags &= ~VLIB_BUFFER_IS_TRACED;
- }
- }
- ip4_reass_on_timeout (vm, rm, reass, &vec_drop_timeout);
- u32 after = vec_len (vec_drop_timeout);
- rt->buffers_n -= (after - before);
+ ip4_reass_on_timeout (vm, rm, reass);
ip4_reass_free (rm, rt, reass);
}
/* *INDENT-ON* */
@@ -1368,42 +1263,7 @@ ip4_reass_walk_expired (vlib_main_t * vm,
clib_spinlock_unlock (&rt->lock);
}
- while (vec_len (vec_drop_timeout) > 0)
- {
- vlib_frame_t *f = vlib_get_frame_to_node (vm, rm->ip4_drop_idx);
- u32 *to_next = vlib_frame_vector_args (f);
- u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
- int trace_frame = 0;
- while (vec_len (vec_drop_timeout) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_timeout);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
- {
- if (pool_is_free_index (vm->trace_main.trace_buffer_pool,
- b->trace_index))
- {
- /* the trace is gone, don't trace this buffer anymore */
- b->flags &= ~VLIB_BUFFER_IS_TRACED;
- }
- else
- {
- trace_frame = 1;
- }
- }
- b->error = node->errors[IP4_ERROR_REASS_TIMEOUT];
- to_next[0] = bi;
- ++f->n_vectors;
- to_next += 1;
- n_left_to_next -= 1;
- IP4_REASS_DEBUG_BUFFER (bi, enqueue_drop_timeout_walk);
- }
- f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
- vlib_put_frame_to_node (vm, rm->ip4_drop_idx, f);
- }
-
vec_free (pool_indexes_to_free);
- vec_free (vec_drop_timeout);
if (event_data)
{
_vec_len (event_data) = 0;
@@ -1413,8 +1273,6 @@ ip4_reass_walk_expired (vlib_main_t * vm,
return 0;
}
-static vlib_node_registration_t ip4_reass_expire_node;
-
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_reass_expire_node, static) = {
.function = ip4_reass_walk_expired,
@@ -1489,7 +1347,6 @@ show_ip4_reass (vlib_main_t * vm, unformat_input_t * input,
}
u32 sum_reass_n = 0;
- u64 sum_buffers_n = 0;
ip4_reass_t *reass;
uword thread_index;
const uword nthreads = vlib_num_workers () + 1;
@@ -1506,7 +1363,6 @@ show_ip4_reass (vlib_main_t * vm, unformat_input_t * input,
/* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
- sum_buffers_n += rt->buffers_n;
clib_spinlock_unlock (&rt->lock);
}
vlib_cli_output (vm, "---------------------");
@@ -1515,8 +1371,6 @@ show_ip4_reass (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_output (vm,
"Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
(long unsigned) rm->max_reass_n);
- vlib_cli_output (vm, "Buffers in use: %lu\n",
- (long unsigned) sum_buffers_n);
return 0;
}
diff --git a/src/vnet/ip/ip6_reassembly.c b/src/vnet/ip/ip6_reassembly.c
index ab5b42a2f00..e03b745ca3b 100644
--- a/src/vnet/ip/ip6_reassembly.c
+++ b/src/vnet/ip/ip6_reassembly.c
@@ -36,6 +36,7 @@ typedef enum
{
IP6_REASS_RC_OK,
IP6_REASS_RC_INTERNAL_ERROR,
+ IP6_REASS_RC_NO_BUF,
} ip6_reass_rc_t;
typedef struct
@@ -96,7 +97,6 @@ typedef struct
{
ip6_reass_t *pool;
u32 reass_n;
- u32 buffers_n;
u32 id_counter;
clib_spinlock_t lock;
} ip6_reass_per_thread_t;
@@ -285,11 +285,12 @@ ip6_reass_free (ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
always_inline void
ip6_reass_drop_all (vlib_main_t * vm, ip6_reass_main_t * rm,
- ip6_reass_t * reass, u32 ** vec_drop_bi)
+ ip6_reass_t * reass)
{
u32 range_bi = reass->first_bi;
vlib_buffer_t *range_b;
vnet_buffer_opaque_t *range_vnb;
+ u32 *to_free = NULL;
while (~0 != range_bi)
{
range_b = vlib_get_buffer (vm, range_bi);
@@ -297,7 +298,7 @@ ip6_reass_drop_all (vlib_main_t * vm, ip6_reass_main_t * rm,
u32 bi = range_bi;
while (~0 != bi)
{
- vec_add1 (*vec_drop_bi, bi);
+ vec_add1 (to_free, bi);
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
@@ -311,12 +312,14 @@ ip6_reass_drop_all (vlib_main_t * vm, ip6_reass_main_t * rm,
}
range_bi = range_vnb->ip.reass.next_range_bi;
}
+ vlib_buffer_free (vm, to_free, vec_len (to_free));
+ vec_free (to_free);
}
always_inline void
ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_t * reass,
- u32 * icmp_bi, u32 ** vec_timeout)
+ u32 * icmp_bi)
{
if (~0 == reass->first_bi)
{
@@ -346,14 +349,13 @@ ip6_reass_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * node,
ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
0);
}
- ip6_reass_drop_all (vm, rm, reass, vec_timeout);
+ ip6_reass_drop_all (vm, rm, reass);
}
always_inline ip6_reass_t *
ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
- ip6_reass_key_t * k, u32 * icmp_bi,
- u32 ** vec_timeout)
+ ip6_reass_key_t * k, u32 * icmp_bi)
{
ip6_reass_t *reass = NULL;
f64 now = vlib_time_now (rm->vlib_main);
@@ -370,7 +372,7 @@ ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
reass = pool_elt_at_index (rt->pool, value.value);
if (now > reass->last_heard + rm->timeout)
{
- ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi, vec_timeout);
+ ip6_reass_on_timeout (vm, node, rm, reass, icmp_bi);
ip6_reass_free (rm, rt, reass);
reass = NULL;
}
@@ -421,8 +423,8 @@ ip6_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
always_inline ip6_reass_rc_t
ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
- ip6_reass_t * reass, u32 * bi0, u32 * next0,
- u32 * error0, u32 ** vec_drop_compress, bool is_feature)
+ ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
+ bool is_feature)
{
*bi0 = reass->first_bi;
*error0 = IP6_ERROR_NONE;
@@ -432,6 +434,8 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 total_length = 0;
u32 buf_cnt = 0;
u32 dropped_cnt = 0;
+ u32 *vec_drop_compress = NULL;
+ ip6_reass_rc_t rv = IP6_REASS_RC_OK;
do
{
u32 tmp_bi = sub_chain_bi;
@@ -440,7 +444,8 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
!(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
u32 data_len = ip6_reass_buffer_get_data_len (tmp);
@@ -453,7 +458,8 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
/* first buffer - keep ip6 header */
if (0 != ip6_reass_buffer_get_data_offset (tmp))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
trim_front = 0;
trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
@@ -461,7 +467,8 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
sizeof (*frag_hdr));
if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
}
u32 keep_data =
@@ -474,12 +481,12 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
if (trim_front > tmp->current_length)
{
/* drop whole buffer */
- vec_add1 (*vec_drop_compress, tmp_bi);
- ++dropped_cnt;
+ vec_add1 (vec_drop_compress, tmp_bi);
trim_front -= tmp->current_length;
if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
tmp_bi = tmp->next_buffer;
@@ -510,17 +517,19 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
keep_data -= tmp->current_length;
if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
}
total_length += tmp->current_length;
}
else
{
- vec_add1 (*vec_drop_compress, tmp_bi);
+ vec_add1 (vec_drop_compress, tmp_bi);
if (reass->first_bi == tmp_bi)
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
++dropped_cnt;
}
@@ -542,13 +551,15 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
if (!last_b)
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
if (total_length < first_b->current_length)
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
total_length -= first_b->current_length;
first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -570,7 +581,8 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
}
if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
{
- return IP6_REASS_RC_INTERNAL_ERROR;
+ rv = IP6_REASS_RC_INTERNAL_ERROR;
+ goto free_buffers_and_return;
}
memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
first_b->current_length - ip6_frag_hdr_offset -
@@ -579,7 +591,11 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
ip->payload_length =
clib_host_to_net_u16 (total_length + first_b->current_length -
sizeof (*ip));
- vlib_buffer_chain_compress (vm, first_b, vec_drop_compress);
+ if (!vlib_buffer_chain_linearize (vm, first_b))
+ {
+ rv = IP6_REASS_RC_NO_BUF;
+ goto free_buffers_and_return;
+ }
if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
{
ip6_reass_add_trace (vm, node, rm, reass, reass->first_bi, FINALIZE, 0);
@@ -621,26 +637,10 @@ ip6_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
ip6_reass_free (rm, rt, reass);
reass = NULL;
- return IP6_REASS_RC_OK;
-}
-
-always_inline u32
-ip6_reass_get_buffer_chain_length (vlib_main_t * vm, vlib_buffer_t * b)
-{
- u32 len = 0;
- while (b)
- {
- ++len;
- if (PREDICT_FALSE (b->flags & VLIB_BUFFER_NEXT_PRESENT))
- {
- b = vlib_get_buffer (vm, b->next_buffer);
- }
- else
- {
- break;
- }
- }
- return len;
+free_buffers_and_return:
+ vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
+ vec_free (vec_drop_compress);
+ return rv;
}
always_inline void
@@ -668,16 +668,13 @@ ip6_reass_insert_range_in_chain (vlib_main_t * vm, ip6_reass_main_t * rm,
reass->first_bi = new_next_bi;
}
reass->data_len += ip6_reass_buffer_get_data_len (new_next_b);
- rt->buffers_n += ip6_reass_get_buffer_chain_length (vm, new_next_b);
}
always_inline ip6_reass_rc_t
ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
ip6_reass_main_t * rm, ip6_reass_per_thread_t * rt,
- ip6_reass_t * reass, u32 * bi0, u32 * next0,
- u32 * error0, ip6_frag_hdr_t * frag_hdr,
- u32 ** vec_drop_overlap, u32 ** vec_drop_compress,
- bool is_feature)
+ ip6_reass_t * reass, u32 * bi0, u32 * next0, u32 * error0,
+ ip6_frag_hdr_t * frag_hdr, bool is_feature)
{
int consumed = 0;
vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
@@ -761,7 +758,7 @@ ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
else
{
// overlapping fragment - not allowed by RFC 8200
- ip6_reass_drop_all (vm, rm, reass, vec_drop_overlap);
+ ip6_reass_drop_all (vm, rm, reass);
ip6_reass_free (rm, rt, reass);
if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -784,7 +781,7 @@ ip6_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
reass->data_len == reass->last_packet_octet + 1)
{
return ip6_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
- vec_drop_compress, is_feature);
+ is_feature);
}
else
{
@@ -882,76 +879,15 @@ ip6_reassembly_inline (vlib_main_t * vm,
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
- static u32 *vec_timeout = NULL; // indexes of buffers which timed out
- static u32 *vec_drop_overlap = NULL; // indexes of buffers dropped due to overlap
- static u32 *vec_drop_internal_error = NULL; // indexes of buffers dropped due to internal errors
- static u32 *vec_drop_compress = NULL; // indexes of buffers dropped due to buffer compression
- while (n_left_from > 0 || vec_len (vec_timeout) > 0
- || vec_len (vec_drop_overlap) > 0 || vec_len (vec_drop_compress) > 0
- || vec_len (vec_drop_internal_error) > 0)
+ while (n_left_from > 0)
{
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (vec_len (vec_timeout) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_timeout);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP6_REASSEMBLY_NEXT_DROP);
- --rt->buffers_n;
- }
-
- while (vec_len (vec_drop_overlap) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_overlap);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP6_ERROR_REASS_OVERLAPPING_FRAGMENT];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP6_REASSEMBLY_NEXT_DROP);
- --rt->buffers_n;
- }
-
- while (vec_len (vec_drop_compress) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_compress);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP6_ERROR_NONE];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP6_REASSEMBLY_NEXT_DROP);
- --rt->buffers_n;
- }
- while (vec_len (vec_drop_internal_error) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_drop_internal_error);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- b->error = node->errors[IP6_ERROR_REASS_INTERNAL_ERROR];
- to_next[0] = bi;
- to_next += 1;
- n_left_to_next -= 1;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi,
- IP6_REASSEMBLY_NEXT_DROP);
- --rt->buffers_n;
- }
-
while (n_left_from > 0 && n_left_to_next > 0)
{
u32 bi0;
vlib_buffer_t *b0;
- u32 next0;
+ u32 next0 = IP6_REASSEMBLY_NEXT_DROP;
u32 error0 = IP6_ERROR_NONE;
u32 icmp_bi = ~0;
@@ -1001,22 +937,21 @@ ip6_reassembly_inline (vlib_main_t * vm,
sw_if_index[VLIB_RX] << 32 | frag_hdr->identification;
k.as_u64[5] = ip0->protocol;
ip6_reass_t *reass =
- ip6_reass_find_or_create (vm, node, rm, rt, &k, &icmp_bi,
- &vec_timeout);
+ ip6_reass_find_or_create (vm, node, rm, rt, &k, &icmp_bi);
if (reass)
{
switch (ip6_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
- &error0, frag_hdr, &vec_drop_overlap,
- &vec_drop_compress, is_feature))
+ &error0, frag_hdr, is_feature))
{
case IP6_REASS_RC_OK:
/* nothing to do here */
break;
+ case IP6_REASS_RC_NO_BUF:
+ /* fallthrough */
case IP6_REASS_RC_INTERNAL_ERROR:
/* drop everything and start with a clean slate */
- ip6_reass_drop_all (vm, rm, reass,
- &vec_drop_internal_error);
+ ip6_reass_drop_all (vm, rm, reass);
ip6_reass_free (rm, rt, reass);
goto next_packet;
break;
@@ -1306,7 +1241,6 @@ ip6_reass_walk_expired (vlib_main_t * vm,
f64 now = vlib_time_now (vm);
ip6_reass_t *reass;
- u32 *vec_timeout = NULL;
int *pool_indexes_to_free = NULL;
uword thread_index = 0;
@@ -1334,7 +1268,6 @@ ip6_reass_walk_expired (vlib_main_t * vm,
{
ip6_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
u32 icmp_bi = ~0;
- u32 before = vec_len (vec_timeout);
vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -1345,13 +1278,10 @@ ip6_reass_walk_expired (vlib_main_t * vm,
b->flags &= ~VLIB_BUFFER_IS_TRACED;
}
}
- ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi, &vec_timeout);
- u32 after = vec_len (vec_timeout);
- rt->buffers_n -= (after - before);
+ ip6_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
if (~0 != icmp_bi)
{
vec_add1 (vec_icmp_bi, icmp_bi);
- --rt->buffers_n;
}
ip6_reass_free (rm, rt, reass);
}
@@ -1360,39 +1290,6 @@ ip6_reass_walk_expired (vlib_main_t * vm,
clib_spinlock_unlock (&rt->lock);
}
- while (vec_len (vec_timeout) > 0)
- {
- vlib_frame_t *f = vlib_get_frame_to_node (vm, rm->ip6_drop_idx);
- u32 *to_next = vlib_frame_vector_args (f);
- u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
- int trace_frame = 0;
- while (vec_len (vec_timeout) > 0 && n_left_to_next > 0)
- {
- u32 bi = vec_pop (vec_timeout);
- vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
- {
- if (pool_is_free_index (vm->trace_main.trace_buffer_pool,
- b->trace_index))
- {
- /* the trace is gone, don't trace this buffer anymore */
- b->flags &= ~VLIB_BUFFER_IS_TRACED;
- }
- else
- {
- trace_frame = 1;
- }
- }
- b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
- to_next[0] = bi;
- ++f->n_vectors;
- to_next += 1;
- n_left_to_next -= 1;
- }
- f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
- vlib_put_frame_to_node (vm, rm->ip6_drop_idx, f);
- }
-
while (vec_len (vec_icmp_bi) > 0)
{
vlib_frame_t *f =
@@ -1428,7 +1325,6 @@ ip6_reass_walk_expired (vlib_main_t * vm,
}
vec_free (pool_indexes_to_free);
- vec_free (vec_timeout);
vec_free (vec_icmp_bi);
if (event_data)
{
@@ -1533,7 +1429,6 @@ show_ip6_reass (vlib_main_t * vm, unformat_input_t * input,
/* *INDENT-ON* */
}
sum_reass_n += rt->reass_n;
- sum_buffers_n += rt->buffers_n;
clib_spinlock_unlock (&rt->lock);
}
vlib_cli_output (vm, "---------------------");