aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-02-05 23:44:42 +0100
committerDave Barach <openvpp@barachs.net>2017-02-06 13:54:50 +0000
commitbd69a5f24c6e83e9101f203dd124864fb2877a17 (patch)
tree7c0c896d5cd5abea4cbe75dbe979bbdaa3f60b3a /src
parent63205141704cb1adafd1b5108f787e640eda71e9 (diff)
vlib: remove algned/unaligned buffers scheme
Change-Id: I4433eaed3f4e201edc329c4842cbbf74beb19a9a Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/vlib/buffer.c220
-rw-r--r--src/vlib/buffer.h13
-rw-r--r--src/vlib/buffer_funcs.h53
-rw-r--r--src/vlib/threads.c3
-rw-r--r--src/vnet/devices/dpdk/buffer.c131
-rw-r--r--src/vnet/replication.c23
6 files changed, 57 insertions, 386 deletions
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index ea4960e2a6a..95b4344f10b 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -304,63 +304,6 @@ vlib_buffer_validate_alloc_free (vlib_main_t * vm,
}
}
-#define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32))
-
-/* Make sure we have at least given number of unaligned buffers. */
-void
-vlib_buffer_free_list_fill_unaligned (vlib_main_t * vm,
- vlib_buffer_free_list_t * free_list,
- uword n_unaligned_buffers)
-{
- word la = vec_len (free_list->aligned_buffers);
- word lu = vec_len (free_list->unaligned_buffers);
-
- /* Aligned come in aligned copy-sized chunks. */
- ASSERT (la % BUFFERS_PER_COPY == 0);
-
- ASSERT (la >= n_unaligned_buffers);
-
- while (lu < n_unaligned_buffers)
- {
- /* Copy 4 buffers from end of aligned vector to unaligned vector. */
- vec_add (free_list->unaligned_buffers,
- free_list->aligned_buffers + la - BUFFERS_PER_COPY,
- BUFFERS_PER_COPY);
- la -= BUFFERS_PER_COPY;
- lu += BUFFERS_PER_COPY;
- }
- _vec_len (free_list->aligned_buffers) = la;
-}
-
-/* After free aligned buffers may not contain even sized chunks. */
-void
-vlib_buffer_free_list_trim_aligned (vlib_buffer_free_list_t * f)
-{
- uword l, n_trim;
-
- /* Add unaligned to aligned before trim. */
- l = vec_len (f->unaligned_buffers);
- if (l > 0)
- {
- vec_add_aligned (f->aligned_buffers, f->unaligned_buffers, l,
- /* align */ sizeof (vlib_copy_unit_t));
-
- _vec_len (f->unaligned_buffers) = 0;
- }
-
- /* Remove unaligned buffers from end of aligned vector and save for next trim. */
- l = vec_len (f->aligned_buffers);
- n_trim = l % BUFFERS_PER_COPY;
- if (n_trim)
- {
- /* Trim aligned -> unaligned. */
- vec_add (f->unaligned_buffers, f->aligned_buffers + l - n_trim, n_trim);
-
- /* Remove from aligned. */
- _vec_len (f->aligned_buffers) = l - n_trim;
- }
-}
-
void
vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
vlib_buffer_free_list_t * src)
@@ -368,23 +311,12 @@ vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
uword l;
u32 *d;
- vlib_buffer_free_list_trim_aligned (src);
- vlib_buffer_free_list_trim_aligned (dst);
-
- l = vec_len (src->aligned_buffers);
- if (l > 0)
- {
- vec_add2_aligned (dst->aligned_buffers, d, l,
- /* align */ sizeof (vlib_copy_unit_t));
- clib_memcpy (d, src->aligned_buffers, l * sizeof (d[0]));
- vec_free (src->aligned_buffers);
- }
-
- l = vec_len (src->unaligned_buffers);
+ l = vec_len (src->buffers);
if (l > 0)
{
- vec_add (dst->unaligned_buffers, src->unaligned_buffers, l);
- vec_free (src->unaligned_buffers);
+ vec_add2_aligned (dst->buffers, d, l, CLIB_CACHE_LINE_BYTES);
+ clib_memcpy (d, src->buffers, l * sizeof (d[0]));
+ vec_free (src->buffers);
}
}
@@ -447,8 +379,7 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm,
ASSERT (f - bm->buffer_free_list_pool ==
wf - wbm->buffer_free_list_pool);
wf[0] = f[0];
- wf->aligned_buffers = 0;
- wf->unaligned_buffers = 0;
+ wf->buffers = 0;
wf->n_alloc = 0;
}
@@ -505,8 +436,7 @@ del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
vm->os_physmem_free (f->buffer_memory_allocated[i]);
vec_free (f->name);
vec_free (f->buffer_memory_allocated);
- vec_free (f->unaligned_buffers);
- vec_free (f->aligned_buffers);
+ vec_free (f->buffers);
}
/* Add buffer free list. */
@@ -522,8 +452,7 @@ vlib_buffer_delete_free_list_internal (vlib_main_t * vm, u32 free_list_index)
f = vlib_buffer_get_free_list (vm, free_list_index);
- ASSERT (vec_len (f->unaligned_buffers) + vec_len (f->aligned_buffers) ==
- f->n_alloc);
+ ASSERT (vec_len (f->buffers) == f->n_alloc);
merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes);
if (merge_index != ~0 && merge_index != free_list_index)
{
@@ -558,15 +487,13 @@ fill_free_list (vlib_main_t * vm,
u32 *bi;
u32 n_remaining, n_alloc, n_this_chunk;
- vlib_buffer_free_list_trim_aligned (fl);
-
/* Already have enough free buffers on free list? */
- n = min_free_buffers - vec_len (fl->aligned_buffers);
+ n = min_free_buffers - vec_len (fl->buffers);
if (n <= 0)
return min_free_buffers;
/* Always allocate round number of buffers. */
- n = round_pow2 (n, BUFFERS_PER_COPY);
+ n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
/* Always allocate new buffers in reasonably large sized chunks. */
n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
@@ -594,8 +521,7 @@ fill_free_list (vlib_main_t * vm,
n_remaining -= n_this_chunk;
b = buffers;
- vec_add2_aligned (fl->aligned_buffers, bi, n_this_chunk,
- sizeof (vlib_copy_unit_t));
+ vec_add2_aligned (fl->buffers, bi, n_this_chunk, CLIB_CACHE_LINE_BYTES);
for (i = 0; i < n_this_chunk; i++)
{
bi[i] = vlib_get_buffer_index (vm, b);
@@ -621,121 +547,28 @@ fill_free_list (vlib_main_t * vm,
return n_alloc;
}
-always_inline uword
-copy_alignment (u32 * x)
-{
- return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY;
-}
-
-
static u32
alloc_from_free_list (vlib_main_t * vm,
vlib_buffer_free_list_t * free_list,
u32 * alloc_buffers, u32 n_alloc_buffers)
{
- u32 *dst, *u_src;
- uword u_len, n_left;
- uword n_unaligned_start, n_unaligned_end, n_filled;
+ u32 *dst, *src;
+ uword len;
+ uword n_filled;
- n_left = n_alloc_buffers;
dst = alloc_buffers;
- n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst))
- & (BUFFERS_PER_COPY - 1));
n_filled = fill_free_list (vm, free_list, n_alloc_buffers);
if (n_filled == 0)
return 0;
- n_left = n_filled < n_left ? n_filled : n_left;
- n_alloc_buffers = n_left;
-
- if (n_unaligned_start >= n_left)
- {
- n_unaligned_start = n_left;
- n_unaligned_end = 0;
- }
- else
- n_unaligned_end = copy_alignment (dst + n_alloc_buffers);
-
- vlib_buffer_free_list_fill_unaligned (vm, free_list,
- n_unaligned_start + n_unaligned_end);
-
- u_len = vec_len (free_list->unaligned_buffers);
- u_src = free_list->unaligned_buffers + u_len - 1;
+ len = vec_len (free_list->buffers);
+ ASSERT (len >= n_alloc_buffers);
- if (n_unaligned_start)
- {
- uword n_copy = n_unaligned_start;
- if (n_copy > n_left)
- n_copy = n_left;
- n_left -= n_copy;
-
- while (n_copy > 0)
- {
- *dst++ = *u_src--;
- n_copy--;
- u_len--;
- }
-
- /* Now dst should be aligned. */
- if (n_left > 0)
- ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0);
- }
-
- /* Aligned copy. */
- {
- vlib_copy_unit_t *d, *s;
- uword n_copy;
-
- if (vec_len (free_list->aligned_buffers) <
- ((n_left / BUFFERS_PER_COPY) * BUFFERS_PER_COPY))
- abort ();
-
- n_copy = n_left / BUFFERS_PER_COPY;
- n_left = n_left % BUFFERS_PER_COPY;
-
- /* Remove buffers from aligned free list. */
- _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY;
-
- s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers);
- d = (vlib_copy_unit_t *) dst;
-
- /* Fast path loop. */
- while (n_copy >= 4)
- {
- d[0] = s[0];
- d[1] = s[1];
- d[2] = s[2];
- d[3] = s[3];
- n_copy -= 4;
- s += 4;
- d += 4;
- }
-
- while (n_copy >= 1)
- {
- d[0] = s[0];
- n_copy -= 1;
- s += 1;
- d += 1;
- }
-
- dst = (void *) d;
- }
-
- /* Unaligned copy. */
- ASSERT (n_unaligned_end == n_left);
- while (n_left > 0)
- {
- *dst++ = *u_src--;
- n_left--;
- u_len--;
- }
+ src = free_list->buffers + len - n_alloc_buffers;
+ clib_memcpy (dst, src, n_alloc_buffers * sizeof (u32));
- if (!free_list->unaligned_buffers)
- ASSERT (u_len == 0);
- else
- _vec_len (free_list->unaligned_buffers) = u_len;
+ _vec_len (free_list->buffers) -= n_alloc_buffers;
/* Verify that buffers are known free. */
vlib_buffer_validate_alloc_free (vm, alloc_buffers,
@@ -831,8 +664,7 @@ again:
vlib_buffer_validate_alloc_free (vm, b,
n_left, VLIB_BUFFER_KNOWN_ALLOCATED);
- vec_add2_aligned (fl->aligned_buffers, f, n_left,
- /* align */ sizeof (vlib_copy_unit_t));
+ vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
n = next_to_free[i_next_to_free];
while (n_left >= 4)
@@ -890,7 +722,7 @@ again:
f -= 2;
n -= free_next0 + free_next1;
- _vec_len (fl->aligned_buffers) = f - fl->aligned_buffers;
+ _vec_len (fl->buffers) = f - fl->buffers;
fl0 = pool_elt_at_index (bm->buffer_free_list_pool, fi0);
fl1 = pool_elt_at_index (bm->buffer_free_list_pool, fi1);
@@ -924,8 +756,7 @@ again:
fl = pool_elt_at_index (bm->buffer_free_list_pool, fi);
}
- vec_add2_aligned (fl->aligned_buffers, f, n_left,
- /* align */ sizeof (vlib_copy_unit_t));
+ vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
}
while (n_left >= 1)
@@ -968,7 +799,7 @@ again:
f -= 1;
n -= free_next0;
- _vec_len (fl->aligned_buffers) = f - fl->aligned_buffers;
+ _vec_len (fl->buffers) = f - fl->buffers;
fl0 = pool_elt_at_index (bm->buffer_free_list_pool, fi0);
@@ -986,8 +817,7 @@ again:
fi = fi0;
fl = pool_elt_at_index (bm->buffer_free_list_pool, fi);
- vec_add2_aligned (fl->aligned_buffers, f, n_left,
- /* align */ sizeof (vlib_copy_unit_t));
+ vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
}
if (follow_buffer_next && ((n_left = n - next_to_free[i_next_to_free]) > 0))
@@ -997,7 +827,7 @@ again:
goto again;
}
- _vec_len (fl->aligned_buffers) = f - fl->aligned_buffers;
+ _vec_len (fl->buffers) = f - fl->buffers;
if (vec_len (announce_list))
{
@@ -1239,7 +1069,7 @@ format_vlib_buffer_free_list (u8 * s, va_list * va)
"#Alloc", "#Free");
size = sizeof (vlib_buffer_t) + f->n_data_bytes;
- n_free = vec_len (f->aligned_buffers) + vec_len (f->unaligned_buffers);
+ n_free = vec_len (f->buffers);
bytes_alloc = size * f->n_alloc;
bytes_free = size * n_free;
diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h
index d270c08a602..fffb50c8fe5 100644
--- a/src/vlib/buffer.h
+++ b/src/vlib/buffer.h
@@ -50,12 +50,6 @@
#define VLIB_BUFFER_DATA_SIZE (2048)
#define VLIB_BUFFER_PRE_DATA_SIZE __PRE_DATA_SIZE
-#if defined (CLIB_HAVE_VEC128) || defined (__aarch64__)
-typedef u8x16 vlib_copy_unit_t;
-#else
-typedef u64 vlib_copy_unit_t;
-#endif
-
/** \file
vlib buffer structure definition and a few select
access methods. This structure and the buffer allocation
@@ -262,11 +256,8 @@ typedef struct vlib_buffer_free_list_t
/* Total number of buffers allocated from this free list. */
u32 n_alloc;
- /* Vector of free buffers. Each element is a byte offset into I/O heap.
- Aligned vectors always has naturally aligned vlib_copy_unit_t sized chunks
- of buffer indices. Unaligned vector has any left over. This is meant to
- speed up copy routines. */
- u32 *aligned_buffers, *unaligned_buffers;
+ /* Vector of free buffers. Each element is a byte offset into I/O heap. */
+ u32 *buffers;
/* Memory chunks allocated for this free list
recorded here so they can be freed when free list
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 543a903c54d..fd051de53de 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -350,10 +350,6 @@ vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
char *fmt, ...);
-
-/* After free aligned buffers may not contain even sized chunks. */
-void vlib_buffer_free_list_trim_aligned (vlib_buffer_free_list_t * f);
-
/* Merge two free lists */
void vlib_buffer_merge_free_lists (vlib_buffer_free_list_t * dst,
vlib_buffer_free_list_t * src);
@@ -664,23 +660,14 @@ unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
return n;
}
-typedef union
-{
- vlib_buffer_t b;
- vlib_copy_unit_t i[sizeof (vlib_buffer_t) / sizeof (vlib_copy_unit_t)];
-}
-vlib_buffer_union_t;
-
/* Set a buffer quickly into "uninitialized" state. We want this to
be extremely cheap and arrange for all fields that need to be
initialized to be in the first 128 bits of the buffer. */
always_inline void
-vlib_buffer_init_for_free_list (vlib_buffer_t * _dst,
+vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
vlib_buffer_free_list_t * fl)
{
- vlib_buffer_union_t *dst = (vlib_buffer_union_t *) _dst;
- vlib_buffer_union_t *src =
- (vlib_buffer_union_t *) & fl->buffer_init_template;
+ vlib_buffer_t *src = &fl->buffer_init_template;
/* Make sure vlib_buffer_t is cacheline aligned and sized */
ASSERT (STRUCT_OFFSET_OF (vlib_buffer_t, cacheline0) == 0);
@@ -692,21 +679,14 @@ vlib_buffer_init_for_free_list (vlib_buffer_t * _dst,
/* Make sure buffer template is sane. */
ASSERT (fl->index == fl->buffer_init_template.free_list_index);
- /* Copy template from src->current_data thru src->free_list_index */
- dst->i[0] = src->i[0];
- if (1 * sizeof (dst->i[0]) < 16)
- dst->i[1] = src->i[1];
- if (2 * sizeof (dst->i[0]) < 16)
- dst->i[2] = src->i[2];
-
/* Make sure it really worked. */
-#define _(f) ASSERT (dst->b.f == src->b.f)
+#define _(f) dst->f = src->f
_(current_data);
_(current_length);
_(flags);
_(free_list_index);
#undef _
- ASSERT (dst->b.total_length_not_including_first_buffer == 0);
+ ASSERT (dst->total_length_not_including_first_buffer == 0);
}
always_inline void
@@ -718,39 +698,28 @@ vlib_buffer_add_to_free_list (vlib_main_t * vm,
b = vlib_get_buffer (vm, buffer_index);
if (PREDICT_TRUE (do_init))
vlib_buffer_init_for_free_list (b, f);
- vec_add1_aligned (f->aligned_buffers, buffer_index,
- sizeof (vlib_copy_unit_t));
+ vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
}
always_inline void
-vlib_buffer_init_two_for_free_list (vlib_buffer_t * _dst0,
- vlib_buffer_t * _dst1,
+vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
+ vlib_buffer_t * dst1,
vlib_buffer_free_list_t * fl)
{
- vlib_buffer_union_t *dst0 = (vlib_buffer_union_t *) _dst0;
- vlib_buffer_union_t *dst1 = (vlib_buffer_union_t *) _dst1;
- vlib_buffer_union_t *src =
- (vlib_buffer_union_t *) & fl->buffer_init_template;
+ vlib_buffer_t *src = &fl->buffer_init_template;
/* Make sure buffer template is sane. */
ASSERT (fl->index == fl->buffer_init_template.free_list_index);
- /* Copy template from src->current_data thru src->free_list_index */
- dst0->i[0] = dst1->i[0] = src->i[0];
- if (1 * sizeof (dst0->i[0]) < 16)
- dst0->i[1] = dst1->i[1] = src->i[1];
- if (2 * sizeof (dst0->i[0]) < 16)
- dst0->i[2] = dst1->i[2] = src->i[2];
-
/* Make sure it really worked. */
-#define _(f) ASSERT (dst0->b.f == src->b.f && dst1->b.f == src->b.f)
+#define _(f) dst0->f = src->f; dst1->f = src->f
_(current_data);
_(current_length);
_(flags);
_(free_list_index);
#undef _
- ASSERT (dst0->b.total_length_not_including_first_buffer == 0);
- ASSERT (dst1->b.total_length_not_including_first_buffer == 0);
+ ASSERT (dst0->total_length_not_including_first_buffer == 0);
+ ASSERT (dst1->total_length_not_including_first_buffer == 0);
}
#if CLIB_DEBUG > 0
diff --git a/src/vlib/threads.c b/src/vlib/threads.c
index b3bbd30ee10..e3ea3c9cb47 100644
--- a/src/vlib/threads.c
+++ b/src/vlib/threads.c
@@ -708,8 +708,7 @@ start_workers (vlib_main_t * vm)
== fl_clone - bm_clone->buffer_free_list_pool);
fl_clone[0] = fl_orig[0];
- fl_clone->aligned_buffers = 0;
- fl_clone->unaligned_buffers = 0;
+ fl_clone->buffers = 0;
fl_clone->n_alloc = 0;
}));
/* *INDENT-ON* */
diff --git a/src/vnet/devices/dpdk/buffer.c b/src/vnet/devices/dpdk/buffer.c
index 038f46d9859..43ceb91e773 100644
--- a/src/vnet/devices/dpdk/buffer.c
+++ b/src/vnet/devices/dpdk/buffer.c
@@ -79,8 +79,6 @@
STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
-#define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32))
-
static void
del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
{
@@ -88,23 +86,15 @@ del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
struct rte_mbuf *mb;
vlib_buffer_t *b;
- for (i = 0; i < vec_len (f->unaligned_buffers); i++)
+ for (i = 0; i < vec_len (f->buffers); i++)
{
- b = vlib_get_buffer (vm, f->unaligned_buffers[i]);
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
- }
- for (i = 0; i < vec_len (f->aligned_buffers); i++)
- {
- b = vlib_get_buffer (vm, f->aligned_buffers[i]);
+ b = vlib_get_buffer (vm, f->buffers[i]);
mb = rte_mbuf_from_vlib_buffer (b);
ASSERT (rte_mbuf_refcnt_read (mb) == 1);
rte_pktmbuf_free (mb);
}
vec_free (f->name);
- vec_free (f->unaligned_buffers);
- vec_free (f->aligned_buffers);
+ vec_free (f->buffers);
}
/* Add buffer free list. */
@@ -162,15 +152,13 @@ fill_free_list (vlib_main_t * vm,
if (PREDICT_FALSE (rmp == 0))
return 0;
- vlib_buffer_free_list_trim_aligned (fl);
-
/* Already have enough free buffers on free list? */
- n = min_free_buffers - vec_len (fl->aligned_buffers);
+ n = min_free_buffers - vec_len (fl->buffers);
if (n <= 0)
return min_free_buffers;
/* Always allocate round number of buffers. */
- n = round_pow2 (n, BUFFERS_PER_COPY);
+ n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
/* Always allocate new buffers in reasonably large sized chunks. */
n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
@@ -192,7 +180,7 @@ fill_free_list (vlib_main_t * vm,
b = vlib_buffer_from_rte_mbuf (mb);
bi = vlib_get_buffer_index (vm, b);
- vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t));
+ vec_add1_aligned (fl->buffers, bi, CLIB_CACHE_LINE_BYTES);
n_alloc++;
n_remaining--;
@@ -207,120 +195,27 @@ fill_free_list (vlib_main_t * vm,
return n;
}
-always_inline uword
-copy_alignment (u32 * x)
-{
- return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY;
-}
-
static u32
alloc_from_free_list (vlib_main_t * vm,
vlib_buffer_free_list_t * free_list,
u32 * alloc_buffers, u32 n_alloc_buffers)
{
- u32 *dst, *u_src;
- uword u_len, n_left;
- uword n_unaligned_start, n_unaligned_end, n_filled;
+ u32 *dst, *src;
+ uword len, n_filled;
- n_left = n_alloc_buffers;
dst = alloc_buffers;
- n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst))
- & (BUFFERS_PER_COPY - 1));
n_filled = fill_free_list (vm, free_list, n_alloc_buffers);
if (n_filled == 0)
return 0;
- n_left = n_filled < n_left ? n_filled : n_left;
- n_alloc_buffers = n_left;
-
- if (n_unaligned_start >= n_left)
- {
- n_unaligned_start = n_left;
- n_unaligned_end = 0;
- }
- else
- n_unaligned_end = copy_alignment (dst + n_alloc_buffers);
-
- vlib_buffer_free_list_fill_unaligned (vm, free_list,
- n_unaligned_start + n_unaligned_end);
-
- u_len = vec_len (free_list->unaligned_buffers);
- u_src = free_list->unaligned_buffers + u_len - 1;
-
- if (n_unaligned_start)
- {
- uword n_copy = n_unaligned_start;
- if (n_copy > n_left)
- n_copy = n_left;
- n_left -= n_copy;
-
- while (n_copy > 0)
- {
- *dst++ = *u_src--;
- n_copy--;
- u_len--;
- }
-
- /* Now dst should be aligned. */
- if (n_left > 0)
- ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0);
- }
-
- /* Aligned copy. */
- {
- vlib_copy_unit_t *d, *s;
- uword n_copy;
-
- if (vec_len (free_list->aligned_buffers) <
- ((n_left / BUFFERS_PER_COPY) * BUFFERS_PER_COPY))
- abort ();
-
- n_copy = n_left / BUFFERS_PER_COPY;
- n_left = n_left % BUFFERS_PER_COPY;
-
- /* Remove buffers from aligned free list. */
- _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY;
-
- s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers);
- d = (vlib_copy_unit_t *) dst;
+ len = vec_len (free_list->buffers);
+ ASSERT (len >= n_alloc_buffers);
- /* Fast path loop. */
- while (n_copy >= 4)
- {
- d[0] = s[0];
- d[1] = s[1];
- d[2] = s[2];
- d[3] = s[3];
- n_copy -= 4;
- s += 4;
- d += 4;
- }
-
- while (n_copy >= 1)
- {
- d[0] = s[0];
- n_copy -= 1;
- s += 1;
- d += 1;
- }
-
- dst = (void *) d;
- }
-
- /* Unaligned copy. */
- ASSERT (n_unaligned_end == n_left);
- while (n_left > 0)
- {
- *dst++ = *u_src--;
- n_left--;
- u_len--;
- }
+ src = free_list->buffers + len - n_alloc_buffers;
+ clib_memcpy (dst, src, n_alloc_buffers * sizeof (u32));
- if (!free_list->unaligned_buffers)
- ASSERT (u_len == 0);
- else
- _vec_len (free_list->unaligned_buffers) = u_len;
+ _vec_len (free_list->buffers) -= n_alloc_buffers;
return n_alloc_buffers;
}
diff --git a/src/vnet/replication.c b/src/vnet/replication.c
index 561c86cdfa4..02755195ac9 100644
--- a/src/vnet/replication.c
+++ b/src/vnet/replication.c
@@ -168,32 +168,20 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
* Note: this could be sped up if the node index were stuffed into
* the freelist itself.
*/
- if (vec_len (fl->aligned_buffers) > 0)
+ if (vec_len (fl->buffers) > 0)
{
- bi0 = fl->aligned_buffers[0];
- b0 = vlib_get_buffer (vm, bi0);
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
- feature_node_index = ctx->recycle_node_index;
- }
- else if (vec_len (fl->unaligned_buffers) > 0)
- {
- bi0 = fl->unaligned_buffers[0];
+ bi0 = fl->buffers[0];
b0 = vlib_get_buffer (vm, bi0);
ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
feature_node_index = ctx->recycle_node_index;
}
- /* aligned, unaligned buffers */
+ /* buffers */
for (i = 0; i < 2; i++)
{
if (i == 0)
{
- from = fl->aligned_buffers;
- n_left_from = vec_len (from);
- }
- else
- {
- from = fl->unaligned_buffers;
+ from = fl->buffers;
n_left_from = vec_len (from);
}
@@ -245,8 +233,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
}
}
- vec_reset_length (fl->aligned_buffers);
- vec_reset_length (fl->unaligned_buffers);
+ vec_reset_length (fl->buffers);
if (f)
{