aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-01-25 14:18:03 +0100
committerDave Barach <openvpp@barachs.net>2017-02-28 21:12:48 +0000
commitc47ed032c6d036a9f942fc9ced48874fad55b48c (patch)
treef983e46b265200797648f9d5ec6be51b659a41e5 /src
parent05472b625fc401f1514a148f7122e6c3c571765a (diff)
vlib: add buffer cloning support
Change-Id: I50070611af15b2b4cc29664a8bee4f821ac3c835 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src')
-rw-r--r--src/scripts/vnet/mcast/ip419
-rw-r--r--src/vlib/buffer.c254
-rw-r--r--src/vlib/buffer.h4
-rw-r--r--src/vlib/buffer_funcs.h113
-rw-r--r--src/vnet/devices/dpdk/buffer.c41
-rw-r--r--src/vnet/devices/dpdk/device.c11
-rw-r--r--src/vnet/dpo/replicate_dpo.c76
-rw-r--r--src/vnet/dpo/replicate_dpo.h3
8 files changed, 256 insertions, 265 deletions
diff --git a/src/scripts/vnet/mcast/ip4 b/src/scripts/vnet/mcast/ip4
index 69f1ee00fa8..eb6bab27bc6 100644
--- a/src/scripts/vnet/mcast/ip4
+++ b/src/scripts/vnet/mcast/ip4
@@ -2,7 +2,7 @@ packet-generator new {
name x
limit 1
node ip4-input
- size 64-64
+ size 512-512
no-recycle
data {
ICMP: 1.0.0.2 -> 232.1.1.1
@@ -11,12 +11,15 @@ packet-generator new {
}
}
-trace add pg-input 100
-loop create
-loop create
-set int state loop0 up
-set int state loop1 up
+create packet-generator interface pg1
+create packet-generator interface pg2
+create packet-generator interface pg3
+
+set int state pg1 up
+set int state pg2 up
+set int state pg3 up
ip mroute add 232.1.1.1 via pg0 Accept
-ip mroute add 232.1.1.1 via loop0 Forward
-ip mroute add 232.1.1.1 via loop1 Forward
+ip mroute add 232.1.1.1 via pg1 Forward
+ip mroute add 232.1.1.1 via pg2 Forward
+ip mroute add 232.1.1.1 via pg3 Forward
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index 95b4344f10b..4f5eb09da91 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -68,8 +68,9 @@ format_vlib_buffer (u8 * s, va_list * args)
vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
uword indent = format_get_indent (s);
- s = format (s, "current data %d, length %d, free-list %d",
- b->current_data, b->current_length, b->free_list_index);
+ s = format (s, "current data %d, length %d, free-list %d, clone-count %u",
+ b->current_data, b->current_length, b->free_list_index,
+ b->n_add_refs);
if (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
s = format (s, ", totlen-nifb %d",
@@ -84,8 +85,10 @@ format_vlib_buffer (u8 * s, va_list * args)
u32 next_buffer = b->next_buffer;
b = vlib_get_buffer (vm, next_buffer);
- s = format (s, "\n%Unext-buffer 0x%x, segment length %d",
- format_white_space, indent, next_buffer, b->current_length);
+ s =
+ format (s, "\n%Unext-buffer 0x%x, segment length %d, clone-count %u",
+ format_white_space, indent, next_buffer, b->current_length,
+ b->n_add_refs);
}
return s;
@@ -262,7 +265,7 @@ vlib_main_t **vlib_mains;
/* When dubugging validate that given buffers are either known allocated
or known free. */
-static void __attribute__ ((unused))
+static void
vlib_buffer_validate_alloc_free (vlib_main_t * vm,
u32 * buffers,
uword n_buffers,
@@ -362,6 +365,7 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm,
/* Setup free buffer template. */
f->buffer_init_template.free_list_index = f->index;
+ f->buffer_init_template.n_add_refs = 0;
if (is_public)
{
@@ -620,19 +624,11 @@ vlib_buffer_free_inline (vlib_main_t * vm,
{
vlib_buffer_main_t *bm = vm->buffer_main;
vlib_buffer_free_list_t *fl;
- static u32 *next_to_free[2]; /* smp bad */
- u32 i_next_to_free, *b, *n, *f, fi;
- uword n_left;
+ u32 fi;
int i;
- static vlib_buffer_free_list_t **announce_list;
- vlib_buffer_free_list_t *fl0 = 0, *fl1 = 0;
- u32 bi0 = (u32) ~ 0, bi1 = (u32) ~ 0, fi0, fi1 = (u32) ~ 0;
- u8 free0, free1 = 0, free_next0, free_next1;
u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
u32 follow_buffer_next);
- ASSERT (os_get_cpu_number () == 0);
-
cb = bm->buffer_free_callback;
if (PREDICT_FALSE (cb != 0))
@@ -641,203 +637,68 @@ vlib_buffer_free_inline (vlib_main_t * vm,
if (!n_buffers)
return;
- /* Use first buffer to get default free list. */
- {
- u32 bi0 = buffers[0];
- vlib_buffer_t *b0;
-
- b0 = vlib_get_buffer (vm, bi0);
- fl = vlib_buffer_get_buffer_free_list (vm, b0, &fi);
- if (fl->buffers_added_to_freelist_function)
- vec_add1 (announce_list, fl);
- }
-
- vec_validate (next_to_free[0], n_buffers - 1);
- vec_validate (next_to_free[1], n_buffers - 1);
-
- i_next_to_free = 0;
- n_left = n_buffers;
- b = buffers;
-
-again:
- /* Verify that buffers are known allocated. */
- vlib_buffer_validate_alloc_free (vm, b,
- n_left, VLIB_BUFFER_KNOWN_ALLOCATED);
-
- vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
-
- n = next_to_free[i_next_to_free];
- while (n_left >= 4)
- {
- vlib_buffer_t *b0, *b1, *binit0, *binit1, dummy_buffers[2];
-
- bi0 = b[0];
- bi1 = b[1];
-
- f[0] = bi0;
- f[1] = bi1;
- f += 2;
- b += 2;
- n_left -= 2;
-
- /* Prefetch buffers for next iteration. */
- vlib_prefetch_buffer_with_index (vm, b[0], WRITE);
- vlib_prefetch_buffer_with_index (vm, b[1], WRITE);
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
-
- free0 = (b0->flags & VLIB_BUFFER_RECYCLE) == 0;
- free1 = (b1->flags & VLIB_BUFFER_RECYCLE) == 0;
-
- /* Must be before init which will over-write buffer flags. */
- if (follow_buffer_next)
- {
- n[0] = b0->next_buffer;
- free_next0 = free0 && (b0->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
- n += free_next0;
-
- n[0] = b1->next_buffer;
- free_next1 = free1 && (b1->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
- n += free_next1;
- }
- else
- free_next0 = free_next1 = 0;
-
- /* Must be before init which will over-write buffer free list. */
- fi0 = b0->free_list_index;
- fi1 = b1->free_list_index;
-
- if (PREDICT_FALSE (fi0 != fi || fi1 != fi))
- goto slow_path_x2;
-
- binit0 = free0 ? b0 : &dummy_buffers[0];
- binit1 = free1 ? b1 : &dummy_buffers[1];
-
- vlib_buffer_init_two_for_free_list (binit0, binit1, fl);
- continue;
-
- slow_path_x2:
- /* Backup speculation. */
- f -= 2;
- n -= free_next0 + free_next1;
-
- _vec_len (fl->buffers) = f - fl->buffers;
-
- fl0 = pool_elt_at_index (bm->buffer_free_list_pool, fi0);
- fl1 = pool_elt_at_index (bm->buffer_free_list_pool, fi1);
-
- vlib_buffer_add_to_free_list (vm, fl0, bi0, free0);
- if (PREDICT_FALSE (fl0->buffers_added_to_freelist_function != 0))
- {
- int i;
- for (i = 0; i < vec_len (announce_list); i++)
- if (fl0 == announce_list[i])
- goto no_fl0;
- vec_add1 (announce_list, fl0);
- }
- no_fl0:
- if (PREDICT_FALSE (fl1->buffers_added_to_freelist_function != 0))
- {
- int i;
- for (i = 0; i < vec_len (announce_list); i++)
- if (fl1 == announce_list[i])
- goto no_fl1;
- vec_add1 (announce_list, fl1);
- }
-
- no_fl1:
- vlib_buffer_add_to_free_list (vm, fl1, bi1, free1);
-
- /* Possibly change current free list. */
- if (fi0 != fi && fi1 != fi)
- {
- fi = fi1;
- fl = pool_elt_at_index (bm->buffer_free_list_pool, fi);
- }
-
- vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
- }
-
- while (n_left >= 1)
+ for (i = 0; i < n_buffers; i++)
{
- vlib_buffer_t *b0, *binit0, dummy_buffers[1];
+ vlib_buffer_t *b;
+ u32 bi = buffers[i];
- bi0 = b[0];
- f[0] = bi0;
- f += 1;
- b += 1;
- n_left -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
+ b = vlib_get_buffer (vm, bi);
- free0 = (b0->flags & VLIB_BUFFER_RECYCLE) == 0;
+ fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
- /* Must be before init which will over-write buffer flags. */
- if (follow_buffer_next)
+ /* The only current use of this callback: multicast recycle */
+ if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
{
- n[0] = b0->next_buffer;
- free_next0 = free0 && (b0->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
- n += free_next0;
+ int j;
+
+ vlib_buffer_add_to_free_list
+ (vm, fl, buffers[i], (b->flags & VLIB_BUFFER_RECYCLE) == 0);
+
+ for (j = 0; j < vec_len (bm->announce_list); j++)
+ {
+ if (fl == bm->announce_list[j])
+ goto already_announced;
+ }
+ vec_add1 (bm->announce_list, fl);
+ already_announced:
+ ;
}
else
- free_next0 = 0;
-
- /* Must be before init which will over-write buffer free list. */
- fi0 = b0->free_list_index;
-
- if (PREDICT_FALSE (fi0 != fi))
- goto slow_path_x1;
-
- binit0 = free0 ? b0 : &dummy_buffers[0];
-
- vlib_buffer_init_for_free_list (binit0, fl);
- continue;
-
- slow_path_x1:
- /* Backup speculation. */
- f -= 1;
- n -= free_next0;
-
- _vec_len (fl->buffers) = f - fl->buffers;
-
- fl0 = pool_elt_at_index (bm->buffer_free_list_pool, fi0);
-
- vlib_buffer_add_to_free_list (vm, fl0, bi0, free0);
- if (PREDICT_FALSE (fl0->buffers_added_to_freelist_function != 0))
{
- int i;
- for (i = 0; i < vec_len (announce_list); i++)
- if (fl0 == announce_list[i])
- goto no_fl00;
- vec_add1 (announce_list, fl0);
+ if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
+ {
+ u32 flags, next;
+
+ do
+ {
+ vlib_buffer_t *nb = vlib_get_buffer (vm, bi);
+ flags = nb->flags;
+ next = nb->next_buffer;
+ if (nb->n_add_refs)
+ nb->n_add_refs--;
+ else
+ {
+ vlib_buffer_validate_alloc_free (vm, &bi, 1,
+ VLIB_BUFFER_KNOWN_ALLOCATED);
+ vlib_buffer_add_to_free_list (vm, fl, bi, 1);
+ }
+ bi = next;
+ }
+ while (follow_buffer_next
+ && (flags & VLIB_BUFFER_NEXT_PRESENT));
+
+ }
}
-
- no_fl00:
- fi = fi0;
- fl = pool_elt_at_index (bm->buffer_free_list_pool, fi);
-
- vec_add2_aligned (fl->buffers, f, n_left, CLIB_CACHE_LINE_BYTES);
}
-
- if (follow_buffer_next && ((n_left = n - next_to_free[i_next_to_free]) > 0))
- {
- b = next_to_free[i_next_to_free];
- i_next_to_free ^= 1;
- goto again;
- }
-
- _vec_len (fl->buffers) = f - fl->buffers;
-
- if (vec_len (announce_list))
+ if (vec_len (bm->announce_list))
{
vlib_buffer_free_list_t *fl;
- for (i = 0; i < vec_len (announce_list); i++)
+ for (i = 0; i < vec_len (bm->announce_list); i++)
{
- fl = announce_list[i];
+ fl = bm->announce_list[i];
fl->buffers_added_to_freelist_function (vm, fl);
}
- _vec_len (announce_list) = 0;
+ _vec_len (bm->announce_list) = 0;
}
}
@@ -922,6 +783,7 @@ vlib_packet_template_init (vlib_main_t * vm,
fl->buffer_init_template.current_data = 0;
fl->buffer_init_template.current_length = n_packet_data_bytes;
fl->buffer_init_template.flags = 0;
+ fl->buffer_init_template.n_add_refs = 0;
vlib_worker_thread_barrier_release (vm);
}
diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h
index 8ea79502455..b4015b302c3 100644
--- a/src/vlib/buffer.h
+++ b/src/vlib/buffer.h
@@ -119,7 +119,9 @@ typedef struct
feature node
*/
- u8 dont_waste_me[3]; /**< Available space in the (precious)
+ u8 n_add_refs; /**< Number of additional references to this buffer. */
+
+ u8 dont_waste_me[2]; /**< Available space in the (precious)
first 32 octets of buffer metadata
Before allocating any of it, discussion required!
*/
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 0b583a61994..e0fde5f2194 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -530,6 +530,110 @@ vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
return fd;
}
+/** \brief Create multiple clones of buffer and store them in the supplied array
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param src_buffer - (u32) source buffer index
+ @param buffers - (u32 * ) buffer index array
+ @param n_buffers - (u8) number of buffer clones requested
+ @param head_end_offset - (u16) offset relative to current position
+ where packet head ends
+ @return - (u8) number of buffers actually cloned, may be
+ less than the number requested or zero
+*/
+
+always_inline u8
+vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u8 n_buffers, u16 head_end_offset)
+{
+ u8 i;
+ vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
+
+ ASSERT (s->n_add_refs == 0);
+ ASSERT (n_buffers);
+
+ if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
+ {
+ buffers[0] = src_buffer;
+ for (i = 1; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d;
+ d = vlib_buffer_copy (vm, s);
+ if (d == 0)
+ return i;
+ buffers[i] = vlib_get_buffer_index (vm, d);
+
+ }
+ return n_buffers;
+ }
+
+ n_buffers = vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
+ s->free_list_index);
+ if (PREDICT_FALSE (n_buffers == 0))
+ {
+ buffers[0] = src_buffer;
+ return 1;
+ }
+
+ for (i = 0; i < n_buffers; i++)
+ {
+ vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
+ d->current_data = s->current_data;
+ d->current_length = head_end_offset;
+ d->free_list_index = s->free_list_index;
+ d->total_length_not_including_first_buffer =
+ s->total_length_not_including_first_buffer + s->current_length -
+ head_end_offset;
+ d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
+ d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
+ clib_memcpy (d->opaque, s->opaque, sizeof (s->opaque));
+ clib_memcpy (vlib_buffer_get_current (d), vlib_buffer_get_current (s),
+ head_end_offset);
+ d->next_buffer = src_buffer;
+ }
+ vlib_buffer_advance (s, head_end_offset);
+ s->n_add_refs = n_buffers - 1;
+ while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ s = vlib_get_buffer (vm, s->next_buffer);
+ s->n_add_refs = n_buffers - 1;
+ }
+
+ return n_buffers;
+}
+
+/** \brief Attach cloned tail to the buffer
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param head - (vlib_buffer_t *) head buffer
+ @param tail - (Vlib buffer_t *) tail buffer to clone and attach to head
+*/
+
+always_inline void
+vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head,
+ vlib_buffer_t * tail)
+{
+ ASSERT ((head->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
+ ASSERT (head->free_list_index == tail->free_list_index);
+
+ head->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ head->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ head->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
+ head->flags |= (tail->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID);
+ head->next_buffer = vlib_get_buffer_index (vm, tail);
+ head->total_length_not_including_first_buffer = tail->current_length +
+ tail->total_length_not_including_first_buffer;
+
+next_segment:
+ __sync_add_and_fetch (&tail->n_add_refs, 1);
+
+ if (tail->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ tail = vlib_get_buffer (vm, tail->next_buffer);
+ goto next_segment;
+ }
+}
+
/* Initializes the buffer as an empty packet with no chained buffers. */
always_inline void
vlib_buffer_chain_init (vlib_buffer_t * first)
@@ -695,7 +799,8 @@ vlib_buffer_init_for_free_list (vlib_buffer_t * dst,
_(flags);
_(free_list_index);
#undef _
- ASSERT (dst->total_length_not_including_first_buffer == 0);
+ dst->total_length_not_including_first_buffer = 0;
+ ASSERT (dst->n_add_refs == 0);
}
always_inline void
@@ -727,8 +832,10 @@ vlib_buffer_init_two_for_free_list (vlib_buffer_t * dst0,
_(flags);
_(free_list_index);
#undef _
- ASSERT (dst0->total_length_not_including_first_buffer == 0);
- ASSERT (dst1->total_length_not_including_first_buffer == 0);
+ dst0->total_length_not_including_first_buffer = 0;
+ dst1->total_length_not_including_first_buffer = 0;
+ ASSERT (dst0->n_add_refs == 0);
+ ASSERT (dst1->n_add_refs == 0);
}
#if CLIB_DEBUG > 0
diff --git a/src/vnet/devices/dpdk/buffer.c b/src/vnet/devices/dpdk/buffer.c
index 007093e493a..f95d4cb5c38 100644
--- a/src/vnet/devices/dpdk/buffer.c
+++ b/src/vnet/devices/dpdk/buffer.c
@@ -79,20 +79,46 @@
STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
"VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
+static_always_inline void
+dpdk_rte_pktmbuf_free (vlib_main_t * vm, vlib_buffer_t * b)
+{
+ vlib_buffer_t *hb = b;
+ struct rte_mbuf *mb;
+ u32 next, flags;
+ mb = rte_mbuf_from_vlib_buffer (hb);
+
+next:
+ flags = b->flags;
+ next = b->next_buffer;
+ mb = rte_mbuf_from_vlib_buffer (b);
+
+ if (PREDICT_FALSE (b->n_add_refs))
+ {
+ rte_mbuf_refcnt_update (mb, b->n_add_refs);
+ b->n_add_refs = 0;
+ }
+
+ rte_pktmbuf_free_seg (mb);
+
+ if (flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ b = vlib_get_buffer (vm, next);
+ goto next;
+ }
+}
+
static void
del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
{
u32 i;
- struct rte_mbuf *mb;
vlib_buffer_t *b;
for (i = 0; i < vec_len (f->buffers); i++)
{
b = vlib_get_buffer (vm, f->buffers[i]);
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
+ dpdk_rte_pktmbuf_free (vm, b);
}
+
vec_free (f->name);
vec_free (f->buffers);
}
@@ -325,7 +351,6 @@ vlib_buffer_free_inline (vlib_main_t * vm,
for (i = 0; i < n_buffers; i++)
{
vlib_buffer_t *b;
- struct rte_mbuf *mb;
b = vlib_get_buffer (vm, buffers[i]);
@@ -351,11 +376,7 @@ vlib_buffer_free_inline (vlib_main_t * vm,
else
{
if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
- {
- mb = rte_mbuf_from_vlib_buffer (b);
- ASSERT (rte_mbuf_refcnt_read (mb) == 1);
- rte_pktmbuf_free (mb);
- }
+ dpdk_rte_pktmbuf_free (vm, b);
}
}
if (vec_len (bm->announce_list))
diff --git a/src/vnet/devices/dpdk/device.c b/src/vnet/devices/dpdk/device.c
index c9d9a567b7a..17397900c59 100644
--- a/src/vnet/devices/dpdk/device.c
+++ b/src/vnet/devices/dpdk/device.c
@@ -168,13 +168,11 @@ dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
{
b2 = vlib_get_buffer (vm, b2->next_buffer);
mb = rte_mbuf_from_vlib_buffer (b2);
- last_mb->next = mb;
- last_mb = mb;
rte_pktmbuf_reset (mb);
}
}
- first_mb = mb = rte_mbuf_from_vlib_buffer (b);
+ last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
first_mb->nb_segs = 1;
mb->data_len = b->current_length;
mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
@@ -185,10 +183,17 @@ dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
{
b = vlib_get_buffer (vm, b->next_buffer);
mb = rte_mbuf_from_vlib_buffer (b);
+ last_mb->next = mb;
+ last_mb = mb;
mb->data_len = b->current_length;
mb->pkt_len = b->current_length;
mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
first_mb->nb_segs++;
+ if (PREDICT_FALSE (b->n_add_refs))
+ {
+ rte_mbuf_refcnt_update (mb, b->n_add_refs);
+ b->n_add_refs = 0;
+ }
}
}
diff --git a/src/vnet/dpo/replicate_dpo.c b/src/vnet/dpo/replicate_dpo.c
index a67b19c893f..a9f334be7cf 100644
--- a/src/vnet/dpo/replicate_dpo.c
+++ b/src/vnet/dpo/replicate_dpo.c
@@ -625,6 +625,7 @@ replicate_inline (vlib_main_t * vm,
vlib_frame_t * frame)
{
vlib_combined_counter_main_t * cm = &replicate_main.repm_counters;
+ replicate_main_t * rm = &replicate_main;
u32 n_left_from, * from, * to_next, next_index;
u32 cpu_index = os_get_cpu_number();
@@ -645,13 +646,11 @@ replicate_inline (vlib_main_t * vm,
const replicate_t *rep0;
vlib_buffer_t * b0, *c0;
const dpo_id_t *dpo0;
+ u8 num_cloned;
bi0 = from[0];
- to_next[0] = bi0;
from += 1;
- to_next += 1;
n_left_from -= 1;
- n_left_to_next -= 1;
b0 = vlib_get_buffer (vm, bi0);
repi0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
@@ -661,50 +660,21 @@ replicate_inline (vlib_main_t * vm,
cm, cpu_index, repi0, 1,
vlib_buffer_length_in_chain(vm, b0));
- /* ship the original to the first bucket */
- dpo0 = replicate_get_bucket_i(rep0, 0);
- next0 = dpo0->dpoi_next_node;
- vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+ vec_validate (rm->clones[cpu_index], rep0->rep_n_buckets - 1);
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- {
- replicate_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
- t->rep_index = repi0;
- t->dpo = *dpo0;
- }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[cpu_index], rep0->rep_n_buckets, 128);
- /* ship copies to the rest of the buckets */
- for (bucket = 1; bucket < rep0->rep_n_buckets; bucket++)
- {
- /*
- * After the enqueue of the first buffer, and of all subsequent
- * buffers in this loop, it is possible that we over-flow the
- * frame of the to-next node. When this happens we need to 'put'
- * that full frame to the node and get a fresh empty one.
- * Note that these are macros with side effects that change
- * to_next & n_left_to_next
- */
- if (PREDICT_FALSE(0 == n_left_to_next))
- {
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- }
+ if (num_cloned != rep0->rep_n_buckets)
+ {
+ vlib_node_increment_counter
+ (vm, node->node_index,
+ REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE, 1);
+ }
- /* Make a copy. This can fail, so deal with it. */
- c0 = vlib_buffer_copy(vm, b0);
- if (PREDICT_FALSE (c0 == 0))
- {
- vlib_node_increment_counter
- (vm, node->node_index,
- REPLICATE_DPO_ERROR_BUFFER_ALLOCATION_FAILURE,
- 1);
- continue;
- }
-
- ci0 = vlib_get_buffer_index(vm, c0);
+ for (bucket = 0; bucket < num_cloned; bucket++)
+ {
+ ci0 = rm->clones[cpu_index][bucket];
+ c0 = vlib_get_buffer(vm, ci0);
to_next[0] = ci0;
to_next += 1;
@@ -724,7 +694,13 @@ replicate_inline (vlib_main_t * vm,
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
ci0, next0);
+ if (PREDICT_FALSE (n_left_to_next == 0))
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ }
}
+ vec_reset_length (rm->clones[cpu_index]);
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
@@ -797,3 +773,15 @@ VLIB_REGISTER_NODE (ip6_replicate_node) = {
[0] = "error-drop",
},
};
+
+clib_error_t *
+replicate_dpo_init (vlib_main_t * vm)
+{
+ replicate_main_t * rm = &replicate_main;
+
+ vec_validate (rm->clones, vlib_num_workers());
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (replicate_dpo_init);
diff --git a/src/vnet/dpo/replicate_dpo.h b/src/vnet/dpo/replicate_dpo.h
index a564739c9f2..77273015c9e 100644
--- a/src/vnet/dpo/replicate_dpo.h
+++ b/src/vnet/dpo/replicate_dpo.h
@@ -32,6 +32,9 @@
typedef struct replicate_main_t_
{
vlib_combined_counter_main_t repm_counters;
+
+ /* per-cpu vector of cloned packets */
+ u32 **clones;
} replicate_main_t;
extern replicate_main_t replicate_main;