aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2016-06-17 14:09:56 -0400
committerKeith Burns <alagalah@gmail.com>2016-06-18 01:01:36 +0000
commitb5adaeab2fe89844db678a7a2d61d45b5d4cc4c8 (patch)
tree0a892bdfe428756dde66fad98cc1b85fb516d020
parentf289ca6bc6be76216a23320dab8aa4ce4b284c05 (diff)
Move pkt replication counter to the opaque2 cache line
Change-Id: I5e6edfd03ab41949be2c768dfe68aa824bbc1f38 Signed-off-by: Dave Barach <dave@barachs.net>
-rw-r--r--vlib/vlib/buffer.c6
-rw-r--r--vlib/vlib/buffer.h18
-rw-r--r--vlib/vlib/dpdk_buffer.c6
-rw-r--r--vnet/vnet/devices/af_packet/node.c1
-rw-r--r--vnet/vnet/devices/dpdk/device.c11
-rw-r--r--vnet/vnet/devices/dpdk/node.c2
-rw-r--r--vnet/vnet/devices/netmap/node.c1
-rw-r--r--vnet/vnet/devices/ssvm/node.c1
-rw-r--r--vnet/vnet/mcast/mcast.c6
-rw-r--r--vnet/vnet/replication.c11
-rw-r--r--vnet/vnet/replication.h3
-rw-r--r--vnet/vnet/unix/tapcli.c1
12 files changed, 33 insertions, 34 deletions
diff --git a/vlib/vlib/buffer.c b/vlib/vlib/buffer.c
index b7ae0a6fa34..f727e67a55a 100644
--- a/vlib/vlib/buffer.c
+++ b/vlib/vlib/buffer.c
@@ -901,8 +901,8 @@ vlib_buffer_free_inline (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- free0 = b0->clone_count == 0;
- free1 = b1->clone_count == 0;
+ free0 = (b0->flags & VLIB_BUFFER_RECYCLE) == 0;
+ free1 = (b1->flags & VLIB_BUFFER_RECYCLE) == 0;
/* Must be before init which will over-write buffer flags. */
if (follow_buffer_next)
@@ -986,7 +986,7 @@ vlib_buffer_free_inline (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
- free0 = b0->clone_count == 0;
+ free0 = (b0->flags & VLIB_BUFFER_RECYCLE) == 0;
/* Must be before init which will over-write buffer flags. */
if (follow_buffer_next)
diff --git a/vlib/vlib/buffer.h b/vlib/vlib/buffer.h
index 52749572509..bc799bc6223 100644
--- a/vlib/vlib/buffer.h
+++ b/vlib/vlib/buffer.h
@@ -86,6 +86,7 @@ typedef struct {
<br> VLIB_BUFFER_NEXT_PRESENT: this is a multi-chunk buffer.
<br> VLIB_BUFFER_TOTAL_LENGTH_VALID: as it says
<br> VLIB_BUFFER_REPL_FAIL: packet replication failure
+ <br> VLIB_BUFFER_RECYCLE: as it says
<br> VLIB_BUFFER_FLAG_USER(n): user-defined bit N
*/
#define VLIB_BUFFER_IS_TRACED (1 << 0)
@@ -94,6 +95,7 @@ typedef struct {
#define VLIB_BUFFER_IS_RECYCLED (1 << 2)
#define VLIB_BUFFER_TOTAL_LENGTH_VALID (1 << 3)
#define VLIB_BUFFER_REPL_FAIL (1 << 4)
+#define VLIB_BUFFER_RECYCLE (1 << 5)
/* User defined buffer flags. */
#define LOG2_VLIB_BUFFER_FLAG_USER(n) (32 - (n))
@@ -112,14 +114,6 @@ typedef struct {
Only valid if VLIB_BUFFER_NEXT_PRESENT flag is set.
*/
- u32 clone_count; /**< Specifies whether this buffer should be
- reinitialized when freed. It will be reinitialized
- if the value is 0. This field can be used
- as a counter or for other state during packet
- replication. The buffer free function does not
- modify this value.
- */
-
vlib_error_t error; /**< Error code for buffers to be enqueued
to error handler.
*/
@@ -127,6 +121,11 @@ typedef struct {
visit enabled feature nodes
*/
+ u32 dont_waste_me; /**< Available space in the (precious)
+ first 32 octets of buffer metadata
+ Before allocating any of it, discussion required!
+ */
+
u32 opaque[8]; /**< Opaque data used by sub-graphs for their own purposes.
See .../vnet/vnet/buffer.h
*/
@@ -135,7 +134,8 @@ typedef struct {
u32 trace_index; /**< Specifies index into trace buffer
if VLIB_PACKET_IS_TRACED flag is set.
*/
- u32 opaque2[15]; /**< More opaque data, currently unused */
+ u32 recycle_count; /**< Used by L2 path recycle code */
+ u32 opaque2[14]; /**< More opaque data, currently unused */
/***** end of second cache line */
CLIB_CACHE_LINE_ALIGN_MARK(cacheline2);
diff --git a/vlib/vlib/dpdk_buffer.c b/vlib/vlib/dpdk_buffer.c
index ce0f32649aa..c0094938479 100644
--- a/vlib/vlib/dpdk_buffer.c
+++ b/vlib/vlib/dpdk_buffer.c
@@ -708,7 +708,9 @@ vlib_buffer_free_inline (vlib_main_t * vm,
{
int j;
- add_buffer_to_free_list (vm, fl, buffers[i], b->clone_count == 0);
+ add_buffer_to_free_list
+ (vm, fl, buffers[i],
+ (b->flags & VLIB_BUFFER_RECYCLE) == 0);
for (j = 0; j < vec_len (bm->announce_list); j++)
{
@@ -721,7 +723,7 @@ vlib_buffer_free_inline (vlib_main_t * vm,
}
else
{
- if (PREDICT_TRUE (b->clone_count == 0))
+ if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_RECYCLE) == 0))
{
mb = rte_mbuf_from_vlib_buffer(b);
ASSERT(rte_mbuf_refcnt_read(mb) == 1);
diff --git a/vnet/vnet/devices/af_packet/node.c b/vnet/vnet/devices/af_packet/node.c
index 0c608ea41bf..6e2ec46e7ef 100644
--- a/vnet/vnet/devices/af_packet/node.c
+++ b/vnet/vnet/devices/af_packet/node.c
@@ -183,7 +183,6 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
clib_memcpy (vlib_buffer_get_current (b0), (u8 *) tph + tph->tp_mac + offset, bytes_to_copy);
/* fill buffer header */
- b0->clone_count = 0;
b0->current_length = bytes_to_copy;
if (offset == 0)
diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c
index f4d4fe779e1..b248f40f80f 100644
--- a/vnet/vnet/devices/dpdk/device.c
+++ b/vnet/vnet/devices/dpdk/device.c
@@ -672,10 +672,12 @@ dpdk_interface_tx (vlib_main_t * vm,
mb0 = rte_mbuf_from_vlib_buffer(b0);
mb1 = rte_mbuf_from_vlib_buffer(b1);
- any_clone = b0->clone_count | b1->clone_count;
+ any_clone = (b0->flags & VLIB_BUFFER_RECYCLE)
+ | (b1->flags & VLIB_BUFFER_RECYCLE);
if (PREDICT_FALSE(any_clone != 0))
{
- if (PREDICT_FALSE(b0->clone_count != 0))
+ if (PREDICT_FALSE
+ ((b0->flags & VLIB_BUFFER_RECYCLE) != 0))
{
struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
if (PREDICT_FALSE(mb0_new == 0))
@@ -688,7 +690,8 @@ dpdk_interface_tx (vlib_main_t * vm,
mb0 = mb0_new;
vec_add1 (dm->recycle[my_cpu], bi0);
}
- if (PREDICT_FALSE(b1->clone_count != 0))
+ if (PREDICT_FALSE
+ ((b1->flags & VLIB_BUFFER_RECYCLE) != 0))
{
struct rte_mbuf * mb1_new = dpdk_replicate_packet_mb (b1);
if (PREDICT_FALSE(mb1_new == 0))
@@ -772,7 +775,7 @@ dpdk_interface_tx (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
mb0 = rte_mbuf_from_vlib_buffer(b0);
- if (PREDICT_FALSE(b0->clone_count != 0))
+ if (PREDICT_FALSE((b0->flags & VLIB_BUFFER_RECYCLE) != 0))
{
struct rte_mbuf * mb0_new = dpdk_replicate_packet_mb (b0);
if (PREDICT_FALSE(mb0_new == 0))
diff --git a/vnet/vnet/devices/dpdk/node.c b/vnet/vnet/devices/dpdk/node.c
index 0d453085cf1..dc2a60e4aa2 100644
--- a/vnet/vnet/devices/dpdk/node.c
+++ b/vnet/vnet/devices/dpdk/node.c
@@ -439,7 +439,6 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm,
}
vlib_buffer_init_for_free_list (b0, fl);
- b0->clone_count = 0;
bi0 = vlib_get_buffer_index (vm, b0);
@@ -490,7 +489,6 @@ static inline u32 dpdk_device_input ( dpdk_main_t * dm,
b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
vlib_buffer_init_for_free_list (b_seg, fl);
- b_seg->clone_count = 0;
ASSERT((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
ASSERT(b_seg->current_data == 0);
diff --git a/vnet/vnet/devices/netmap/node.c b/vnet/vnet/devices/netmap/node.c
index f4c39e6259f..eae189c0e64 100644
--- a/vnet/vnet/devices/netmap/node.c
+++ b/vnet/vnet/devices/netmap/node.c
@@ -183,7 +183,6 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
bytes_to_copy);
/* fill buffer header */
- b0->clone_count = 0;
b0->current_length = bytes_to_copy;
if (offset == 0)
diff --git a/vnet/vnet/devices/ssvm/node.c b/vnet/vnet/devices/ssvm/node.c
index 57b5fd22b5b..b182fef9373 100644
--- a/vnet/vnet/devices/ssvm/node.c
+++ b/vnet/vnet/devices/ssvm/node.c
@@ -166,7 +166,6 @@ ssvm_eth_device_input (ssvm_eth_main_t * em,
while (1)
{
vlib_buffer_init_for_free_list (b0, fl);
- b0->clone_count = 0;
b0->current_data = elt->current_data_hint;
b0->current_length = elt->length_this_buffer;
diff --git a/vnet/vnet/mcast/mcast.c b/vnet/vnet/mcast/mcast.c
index e9177c71f2c..55be89ae907 100644
--- a/vnet/vnet/mcast/mcast.c
+++ b/vnet/vnet/mcast/mcast.c
@@ -193,7 +193,8 @@ mcast_prep_node_fn (vlib_main_t * vm,
/*
* Make sure that intermediate "frees" don't screw up
*/
- b0->clone_count = vec_len (g0->members);
+ b0->recycle_count = vec_len (g0->members);
+ b0->flags |= VLIB_BUFFER_RECYCLE;
/* Set up for the recycle node */
vnet_buffer(b0)->mcast.mcast_current_index = 1;
@@ -394,11 +395,12 @@ mcast_recycle_node_fn (vlib_main_t * vm,
vnet_buffer(b0)->mcast.mcast_group_index);
/* No more replicas? */
- if (b0->clone_count == 1)
+ if (b0->recycle_count == 1)
{
/* Restore the original free list index */
b0->free_list_index =
vnet_buffer(b0)->mcast.original_free_list_index;
+ b0->flags &= ~(VLIB_BUFFER_RECYCLE);
}
current_member0 = vnet_buffer(b0)->mcast.mcast_current_index;
diff --git a/vnet/vnet/replication.c b/vnet/vnet/replication.c
index 4e12f0b89dd..999e1b12326 100644
--- a/vnet/vnet/replication.c
+++ b/vnet/vnet/replication.c
@@ -45,13 +45,13 @@ replication_prep (vlib_main_t * vm,
ctx_id = ctx - rm->contexts[cpu_number];
// Save state from vlib buffer
- ctx->saved_clone_count = b0->clone_count;
ctx->saved_free_list_index = b0->free_list_index;
ctx->current_data = b0->current_data;
// Set up vlib buffer hooks
- b0->clone_count = ctx_id;
+ b0->recycle_count = ctx_id;
b0->free_list_index = rm->recycle_list_index;
+ b0->flags |= VLIB_BUFFER_RECYCLE;
// Save feature state
ctx->recycle_node_index = recycle_node_index;
@@ -95,7 +95,7 @@ replication_recycle (vlib_main_t * vm,
ip4_header_t * ip;
// Get access to the replication context
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
// Restore vnet buffer state
clib_memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
@@ -121,7 +121,6 @@ replication_recycle (vlib_main_t * vm,
if (is_last) {
// This is the last replication in the list.
// Restore original buffer free functionality.
- b0->clone_count = ctx->saved_clone_count;
b0->free_list_index = ctx->saved_free_list_index;
// Free context back to its pool
@@ -162,12 +161,12 @@ static void replication_recycle_callback (vlib_main_t *vm,
bi0 = fl->aligned_buffers[0];
b0 = vlib_get_buffer (vm, bi0);
ctx = pool_elt_at_index (rm->contexts[cpu_number],
- b0->clone_count);
+ b0->recycle_count);
feature_node_index = ctx->recycle_node_index;
} else if (vec_len (fl->unaligned_buffers) > 0) {
bi0 = fl->unaligned_buffers[0];
b0 = vlib_get_buffer (vm, bi0);
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->clone_count);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
feature_node_index = ctx->recycle_node_index;
}
diff --git a/vnet/vnet/replication.h b/vnet/vnet/replication.h
index 9de5717f4d3..b16d5dc430d 100644
--- a/vnet/vnet/replication.h
+++ b/vnet/vnet/replication.h
@@ -36,7 +36,6 @@ typedef struct {
u32 recycle_node_index; // feature's recycle node index
// data saved from the start of replication and restored at the end of replication
- u32 saved_clone_count; // from vlib buffer
u32 saved_free_list_index; // from vlib buffer
// data saved from the original packet and restored for each replica
@@ -93,7 +92,7 @@ replication_get_ctx (vlib_buffer_t * b0)
replication_main_t * rm = &replication_main;
return replication_is_recycled (b0) ?
- pool_elt_at_index (rm->contexts[os_get_cpu_number()], b0->clone_count) :
+ pool_elt_at_index (rm->contexts[os_get_cpu_number()], b0->recycle_count) :
0;
}
diff --git a/vnet/vnet/unix/tapcli.c b/vnet/vnet/unix/tapcli.c
index 7856b05751f..bb280db9140 100644
--- a/vnet/vnet/unix/tapcli.c
+++ b/vnet/vnet/unix/tapcli.c
@@ -261,7 +261,6 @@ static uword tapcli_rx_iface(vlib_main_t * vm,
vec_validate (tm->iovecs, tm->mtu_buffers - 1);
for (j = 0; j < tm->mtu_buffers; j++) {
b = vlib_get_buffer (vm, tm->rx_buffers[i_rx - j]);
- b->clone_count = 0;
tm->iovecs[j].iov_base = b->data;
tm->iovecs[j].iov_len = buffer_size;
}