aboutsummaryrefslogtreecommitdiffstats
path: root/src/vlib
diff options
context:
space:
mode:
Diffstat (limited to 'src/vlib')
-rw-r--r--src/vlib/buffer.c19
-rw-r--r--src/vlib/buffer.h6
-rw-r--r--src/vlib/buffer_funcs.h11
3 files changed, 32 insertions, 4 deletions
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index a5c955c76e6..908368c080b 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -396,6 +396,8 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm,
hash_set (bm->free_list_by_size, f->n_data_bytes, f->index);
}
+ clib_spinlock_init (&f->global_buffers_lock);
+
for (i = 1; i < vec_len (vlib_mains); i++)
{
vlib_buffer_main_t *wbm = vlib_mains[i]->buffer_main;
@@ -509,6 +511,7 @@ fill_free_list (vlib_main_t * vm,
vlib_buffer_free_list_t * fl, uword min_free_buffers)
{
vlib_buffer_t *buffers, *b;
+ vlib_buffer_free_list_t *mfl;
int n, n_bytes, i;
u32 *bi;
u32 n_remaining, n_alloc, n_this_chunk;
@@ -518,6 +521,22 @@ fill_free_list (vlib_main_t * vm,
if (n <= 0)
return min_free_buffers;
+ mfl = vlib_buffer_get_free_list (vlib_mains[0], fl->index);
+ if (vec_len (mfl->global_buffers) > 0)
+ {
+ int n_copy, n_left;
+ clib_spinlock_lock (&mfl->global_buffers_lock);
+ n_copy = clib_min (vec_len (mfl->global_buffers), n);
+ n_left = vec_len (mfl->global_buffers) - n_copy;
+ vec_add_aligned (fl->buffers, mfl->global_buffers + n_left, n_copy,
+ CLIB_CACHE_LINE_BYTES);
+ _vec_len (mfl->global_buffers) = n_left;
+ clib_spinlock_unlock (&mfl->global_buffers_lock);
+ n = min_free_buffers - vec_len (fl->buffers);
+ if (n <= 0)
+ return min_free_buffers;
+ }
+
/* Always allocate round number of buffers. */
n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h
index 9047ca9a648..5504bf7c90b 100644
--- a/src/vlib/buffer.h
+++ b/src/vlib/buffer.h
@@ -350,6 +350,12 @@ typedef struct vlib_buffer_free_list_t
/* Vector of free buffers. Each element is a byte offset into I/O heap. */
u32 *buffers;
+ /* global vector of free buffers, used only on main thread.
+ Bufers are returned to global buffers only in case when number of
+ buffers on free buffers list grows about threshold */
+ u32 *global_buffers;
+ clib_spinlock_t global_buffers_lock;
+
/* Memory chunks allocated for this free list
recorded here so they can be freed when free list
is deleted. */
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 6a662416b29..78bf9317698 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -848,18 +848,21 @@ vlib_buffer_add_to_free_list (vlib_main_t * vm,
u32 buffer_index, u8 do_init)
{
vlib_buffer_t *b;
- u32 i;
b = vlib_get_buffer (vm, buffer_index);
if (PREDICT_TRUE (do_init))
vlib_buffer_init_for_free_list (b, f);
vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
- if (vec_len (f->buffers) > 3 * VLIB_FRAME_SIZE)
+ if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
{
+ vlib_buffer_free_list_t *mf;
+ mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
+ clib_spinlock_lock (&mf->global_buffers_lock);
/* keep last stored buffers, as they are more likely hot in the cache */
- for (i = 0; i < VLIB_FRAME_SIZE; i++)
- vm->os_physmem_free (vlib_get_buffer (vm, i));
+ vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
+ CLIB_CACHE_LINE_BYTES);
vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
+ clib_spinlock_unlock (&mf->global_buffers_lock);
}
}