aboutsummaryrefslogtreecommitdiffstats
path: root/src/vlib/buffer_funcs.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/vlib/buffer_funcs.h')
-rw-r--r--src/vlib/buffer_funcs.h62
1 files changed, 40 insertions, 22 deletions
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 7480326ee3d..2ba9f1cb894 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -491,19 +491,19 @@ vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers,
ASSERT (bp->buffers);
clib_spinlock_lock (&bp->lock);
- len = vec_len (bp->buffers);
+ len = bp->n_avail;
if (PREDICT_TRUE (n_buffers < len))
{
len -= n_buffers;
vlib_buffer_copy_indices (buffers, bp->buffers + len, n_buffers);
- _vec_len (bp->buffers) = len;
+ bp->n_avail = len;
clib_spinlock_unlock (&bp->lock);
return n_buffers;
}
else
{
vlib_buffer_copy_indices (buffers, bp->buffers, len);
- _vec_len (bp->buffers) = 0;
+ bp->n_avail = 0;
clib_spinlock_unlock (&bp->lock);
return len;
}
@@ -533,14 +533,26 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
dst = buffers;
n_left = n_buffers;
- len = vec_len (bpt->cached_buffers);
+ len = bpt->n_cached;
/* per-thread cache contains enough buffers */
if (len >= n_buffers)
{
src = bpt->cached_buffers + len - n_buffers;
vlib_buffer_copy_indices (dst, src, n_buffers);
- _vec_len (bpt->cached_buffers) -= n_buffers;
+ bpt->n_cached -= n_buffers;
+
+ if (CLIB_DEBUG > 0)
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_FREE);
+ return n_buffers;
+ }
+
+ /* alloc bigger than cache - take buffers directly from main pool */
+ if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ)
+ {
+ n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers,
+ n_buffers);
if (CLIB_DEBUG > 0)
vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
@@ -552,23 +564,22 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
if (len)
{
vlib_buffer_copy_indices (dst, bpt->cached_buffers, len);
- _vec_len (bpt->cached_buffers) = 0;
+ bpt->n_cached = 0;
dst += len;
n_left -= len;
}
len = round_pow2 (n_left, 32);
- vec_validate_aligned (bpt->cached_buffers, len - 1, CLIB_CACHE_LINE_BYTES);
len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers,
len);
- _vec_len (bpt->cached_buffers) = len;
+ bpt->n_cached = len;
if (len)
{
u32 n_copy = clib_min (len, n_left);
src = bpt->cached_buffers + len - n_copy;
vlib_buffer_copy_indices (dst, src, n_copy);
- _vec_len (bpt->cached_buffers) -= n_copy;
+ bpt->n_cached -= n_copy;
n_left -= n_copy;
}
@@ -681,26 +692,33 @@ vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
u32 * buffers, u32 n_buffers)
{
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_pool_thread_t *bpt =
- vec_elt_at_index (bp->threads, vm->thread_index);
+ vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads,
+ vm->thread_index);
+ u32 n_cached, n_empty;
if (CLIB_DEBUG > 0)
vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
VLIB_BUFFER_KNOWN_ALLOCATED);
- vec_add_aligned (bpt->cached_buffers, buffers, n_buffers,
- CLIB_CACHE_LINE_BYTES);
-
- if (vec_len (bpt->cached_buffers) > 4 * VLIB_FRAME_SIZE)
+ n_cached = bpt->n_cached;
+ n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached;
+ if (n_buffers <= n_empty)
{
- clib_spinlock_lock (&bp->lock);
- /* keep last stored buffers, as they are more likely hot in the cache */
- vec_add_aligned (bp->buffers, bpt->cached_buffers, VLIB_FRAME_SIZE,
- CLIB_CACHE_LINE_BYTES);
- vec_delete (bpt->cached_buffers, VLIB_FRAME_SIZE, 0);
- bpt->n_alloc -= VLIB_FRAME_SIZE;
- clib_spinlock_unlock (&bp->lock);
+ vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
+ buffers, n_buffers);
+ bpt->n_cached = n_cached + n_buffers;
+ return;
}
+
+ vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
+ buffers + n_buffers - n_empty, n_empty);
+ bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ;
+
+ clib_spinlock_lock (&bp->lock);
+ vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers,
+ n_buffers - n_empty);
+ bp->n_avail += n_buffers - n_empty;
+ clib_spinlock_unlock (&bp->lock);
}
static_always_inline void