summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2023-10-13 09:59:00 +0000
committerDamjan Marion <damarion@cisco.com>2023-10-17 17:44:41 +0000
commitbf236630f518ae94bc2ef76f83b269b98dd109d4 (patch)
tree91cf566e0eb108e919011c6d912b5b01a7a5c441
parent0094fe0190b623dbef0e57b7f4032ba3cf5f36b0 (diff)
buffers: introduce vlib_buffer_template_t
Type: improvement Change-Id: Ie86a5edf2ada21355543e9a0382052b16ff86927 Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r--src/plugins/dpdk/buffer.c20
-rw-r--r--src/vlib/buffer.c5
-rw-r--r--src/vlib/buffer.h112
-rw-r--r--src/vlib/buffer_funcs.h33
4 files changed, 91 insertions, 79 deletions
diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c
index 8b4b4a926b3..2379a9aa7d0 100644
--- a/src/plugins/dpdk/buffer.c
+++ b/src/plugins/dpdk/buffer.c
@@ -135,7 +135,7 @@ dpdk_buffer_pool_init (vlib_main_t * vm, vlib_buffer_pool_t * bp)
{
vlib_buffer_t *b;
b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0);
- vlib_buffer_copy_template (b, &bp->buffer_template);
+ b->template = bp->buffer_template;
}
/* map DMA pages if at least one physical device exists */
@@ -197,7 +197,7 @@ dpdk_ops_vpp_free (struct rte_mempool *mp)
#endif
static_always_inline void
-dpdk_ops_vpp_enqueue_one (vlib_buffer_t * bt, void *obj)
+dpdk_ops_vpp_enqueue_one (vlib_buffer_template_t *bt, void *obj)
{
/* Only non-replicated packets (b->ref_count == 1) expected */
@@ -205,7 +205,7 @@ dpdk_ops_vpp_enqueue_one (vlib_buffer_t * bt, void *obj)
vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
ASSERT (b->ref_count == 1);
ASSERT (b->buffer_pool_index == bt->buffer_pool_index);
- vlib_buffer_copy_template (b, bt);
+ b->template = *bt;
}
int
@@ -214,14 +214,14 @@ CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
{
const int batch_size = 32;
vlib_main_t *vm = vlib_get_main ();
- vlib_buffer_t bt;
+ vlib_buffer_template_t bt;
u8 buffer_pool_index = mp->pool_id;
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
u32 bufs[batch_size];
u32 n_left = n;
void *const *obj = obj_table;
- vlib_buffer_copy_template (&bt, &bp->buffer_template);
+ bt = bp->buffer_template;
while (n_left >= 4)
{
@@ -263,9 +263,9 @@ CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
CLIB_MARCH_FN_REGISTRATION (dpdk_ops_vpp_enqueue);
static_always_inline void
-dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t * vm, struct rte_mempool *old,
+dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t *vm, struct rte_mempool *old,
struct rte_mempool *new, void *obj,
- vlib_buffer_t * bt)
+ vlib_buffer_template_t *bt)
{
struct rte_mbuf *mb = obj;
vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
@@ -273,7 +273,7 @@ dpdk_ops_vpp_enqueue_no_cache_one (vlib_main_t * vm, struct rte_mempool *old,
if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
{
u32 bi = vlib_get_buffer_index (vm, b);
- vlib_buffer_copy_template (b, bt);
+ b->template = *bt;
vlib_buffer_pool_put (vm, bt->buffer_pool_index, &bi, 1);
return;
}
@@ -285,12 +285,12 @@ CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue_no_cache) (struct rte_mempool * cmp,
unsigned n)
{
vlib_main_t *vm = vlib_get_main ();
- vlib_buffer_t bt;
+ vlib_buffer_template_t bt;
struct rte_mempool *mp;
mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id];
u8 buffer_pool_index = cmp->pool_id;
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_copy_template (&bt, &bp->buffer_template);
+ bt = bp->buffer_template;
while (n >= 4)
{
diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c
index 00686593c6b..e798743bc98 100644
--- a/src/vlib/buffer.c
+++ b/src/vlib/buffer.c
@@ -58,9 +58,6 @@ STATIC_ASSERT_FITS_IN (vlib_buffer_t, ref_count, 16);
STATIC_ASSERT_FITS_IN (vlib_buffer_t, buffer_pool_index, 16);
#endif
-/* Make sure that buffer template size is not accidentally changed */
-STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
-
u16 __vlib_buffer_external_hdr_size = 0;
uword
@@ -577,7 +574,7 @@ vlib_buffer_pool_create (vlib_main_t *vm, u32 data_size, u32 physmem_map_index,
continue;
b = (vlib_buffer_t *) (p + bm->ext_hdr_size);
- vlib_buffer_copy_template (b, &bp->buffer_template);
+ b->template = bp->buffer_template;
bi = vlib_get_buffer_index (vm, b);
bp->buffers[bp->n_avail++] = bi;
vlib_get_buffer (vm, bi);
diff --git a/src/vlib/buffer.h b/src/vlib/buffer.h
index 2a5af210330..4de2deab7d0 100644
--- a/src/vlib/buffer.h
+++ b/src/vlib/buffer.h
@@ -107,62 +107,78 @@ enum
#define VLIB_BUFFER_TRACE_TRAJECTORY 0
#endif /* VLIB_BUFFER_TRACE_TRAJECTORY */
+#define vlib_buffer_template_fields \
+ /** signed offset in data[], pre_data[] that we are currently \
+ * processing. If negative current header points into predata area. */ \
+ i16 current_data; \
+ \
+ /** Nbytes between current data and the end of this buffer. */ \
+ u16 current_length; \
+ /** buffer flags: \
+ <br> VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list \
+ index, <br> VLIB_BUFFER_IS_TRACED: trace this buffer. <br> \
+ VLIB_BUFFER_NEXT_PRESENT: this is a multi-chunk buffer. <br> \
+ VLIB_BUFFER_TOTAL_LENGTH_VALID: as it says <br> \
+ VLIB_BUFFER_EXT_HDR_VALID: buffer contains valid external buffer manager \
+ header, set to avoid adding it to a flow report <br> \
+ VLIB_BUFFER_FLAG_USER(n): user-defined bit N \
+ */ \
+ u32 flags; \
+ \
+ /** Generic flow identifier */ \
+ u32 flow_id; \
+ \
+ /** Reference count for this buffer. */ \
+ volatile u8 ref_count; \
+ \
+ /** index of buffer pool this buffer belongs. */ \
+ u8 buffer_pool_index; \
+ \
+ /** Error code for buffers to be enqueued to error handler. */ \
+ vlib_error_t error; \
+ \
+ /** Next buffer for this linked-list of buffers. Only valid if \
+ * VLIB_BUFFER_NEXT_PRESENT flag is set. */ \
+ u32 next_buffer; \
+ \
+ /** The following fields can be in a union because once a packet enters \
+ * the punt path, it is no longer on a feature arc */ \
+ union \
+ { \
+ /** Used by feature subgraph arcs to visit enabled feature nodes */ \
+ u32 current_config_index; \
+ /* the reason the packet once punted */ \
+ u32 punt_reason; \
+ }; \
+ \
+ /** Opaque data used by sub-graphs for their own purposes. */ \
+ u32 opaque[10];
+
+typedef struct
+{
+ CLIB_ALIGN_MARK (align_mark, 64);
+ vlib_buffer_template_fields
+} vlib_buffer_template_t;
+
+STATIC_ASSERT_SIZEOF (vlib_buffer_template_t, 64);
+
/** VLIB buffer representation. */
typedef union
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct
{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-
- /** signed offset in data[], pre_data[] that we are currently
- * processing. If negative current header points into predata area. */
- i16 current_data;
-
- /** Nbytes between current data and the end of this buffer. */
- u16 current_length;
-
- /** buffer flags:
- <br> VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,
- <br> VLIB_BUFFER_IS_TRACED: trace this buffer.
- <br> VLIB_BUFFER_NEXT_PRESENT: this is a multi-chunk buffer.
- <br> VLIB_BUFFER_TOTAL_LENGTH_VALID: as it says
- <br> VLIB_BUFFER_EXT_HDR_VALID: buffer contains valid external buffer manager header,
- set to avoid adding it to a flow report
- <br> VLIB_BUFFER_FLAG_USER(n): user-defined bit N
- */
- u32 flags;
-
- /** Generic flow identifier */
- u32 flow_id;
-
- /** Reference count for this buffer. */
- volatile u8 ref_count;
-
- /** index of buffer pool this buffer belongs. */
- u8 buffer_pool_index;
-
- /** Error code for buffers to be enqueued to error handler. */
- vlib_error_t error;
-
- /** Next buffer for this linked-list of buffers. Only valid if
- * VLIB_BUFFER_NEXT_PRESENT flag is set. */
- u32 next_buffer;
-
- /** The following fields can be in a union because once a packet enters
- * the punt path, it is no longer on a feature arc */
union
{
- /** Used by feature subgraph arcs to visit enabled feature nodes */
- u32 current_config_index;
- /* the reason the packet once punted */
- u32 punt_reason;
+ struct
+ {
+ vlib_buffer_template_fields
+ };
+ vlib_buffer_template_t template;
};
- /** Opaque data used by sub-graphs for their own purposes. */
- u32 opaque[10];
-
- /** part of buffer metadata which is initialized on alloc ends here. */
- STRUCT_MARK (template_end);
+ /* Data above is initialized or zeroed on alloc, data bellow is not
+ * and it is app responsibility to ensure data is valid */
/** start of 2nd half (2nd cacheline on systems where cacheline size is 64) */
CLIB_ALIGN_MARK (second_half, 64);
@@ -468,7 +484,7 @@ typedef struct
vlib_buffer_pool_thread_t *threads;
/* buffer metadata template */
- vlib_buffer_t buffer_template;
+ vlib_buffer_template_t buffer_template;
} vlib_buffer_pool_t;
#define VLIB_BUFFER_MAX_NUMA_NODES 32
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h
index 017a70f4fbf..e65a80bfad8 100644
--- a/src/vlib/buffer_funcs.h
+++ b/src/vlib/buffer_funcs.h
@@ -181,7 +181,6 @@ vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
}
}
-STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
static_always_inline void
vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
{
@@ -762,7 +761,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
vlib_buffer_pool_t *bp = 0;
u8 buffer_pool_index = ~0;
u32 n_queue = 0, queue[queue_size + 4];
- vlib_buffer_t bt = { };
+ vlib_buffer_template_t bt = {};
#if defined(CLIB_HAVE_VEC128)
vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 };
vlib_buffer_t bpi_vec = {};
@@ -778,7 +777,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
vlib_buffer_t *b = vlib_get_buffer (vm, buffers[0]);
buffer_pool_index = b->buffer_pool_index;
bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_copy_template (&bt, &bp->buffer_template);
+ bt = bp->buffer_template;
#if defined(CLIB_HAVE_VEC128)
bpi_vec.buffer_pool_index = buffer_pool_index;
#endif
@@ -870,14 +869,14 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
#if defined(CLIB_HAVE_VEC512)
vlib_buffer_copy_indices (queue + n_queue, buffers, 8);
- vlib_buffer_copy_template (b[0], &bt);
- vlib_buffer_copy_template (b[1], &bt);
- vlib_buffer_copy_template (b[2], &bt);
- vlib_buffer_copy_template (b[3], &bt);
- vlib_buffer_copy_template (b[4], &bt);
- vlib_buffer_copy_template (b[5], &bt);
- vlib_buffer_copy_template (b[6], &bt);
- vlib_buffer_copy_template (b[7], &bt);
+ b[0]->template = bt;
+ b[1]->template = bt;
+ b[2]->template = bt;
+ b[3]->template = bt;
+ b[4]->template = bt;
+ b[5]->template = bt;
+ b[6]->template = bt;
+ b[7]->template = bt;
n_queue += 8;
vlib_buffer_validate (vm, b[0]);
@@ -899,10 +898,10 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[7]);
#else
vlib_buffer_copy_indices (queue + n_queue, buffers, 4);
- vlib_buffer_copy_template (b[0], &bt);
- vlib_buffer_copy_template (b[1], &bt);
- vlib_buffer_copy_template (b[2], &bt);
- vlib_buffer_copy_template (b[3], &bt);
+ b[0]->template = bt;
+ b[1]->template = bt;
+ b[2]->template = bt;
+ b[3]->template = bt;
n_queue += 4;
vlib_buffer_validate (vm, b[0]);
@@ -952,7 +951,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
bpi_vec.buffer_pool_index = buffer_pool_index;
#endif
bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_copy_template (&bt, &bp->buffer_template);
+ bt = bp->buffer_template;
}
vlib_buffer_validate (vm, b[0]);
@@ -961,7 +960,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0)
{
- vlib_buffer_copy_template (b[0], &bt);
+ b[0]->template = bt;
queue[n_queue++] = bi;
}