summaryrefslogtreecommitdiffstats
path: root/vnet/vnet/replication.c
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2016-08-08 09:51:21 -0400
committerKeith Burns <alagalah@gmail.com>2016-08-08 15:25:14 +0000
commitba868bb7898edc46ad5f8cd4925af6c8b92e7c89 (patch)
treea6f4092a3373e0a8d4f4a67da6e60f78331fe37f /vnet/vnet/replication.c
parentda62e1a1d84728094c427a4a7ec03e76e2df988c (diff)
VPP-311 Coding standards cleanup for vnet/vnet/*.[ch]
Change-Id: I08ed983f594072bc8c72202e77205a7789eea599 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'vnet/vnet/replication.c')
-rw-r--r--vnet/vnet/replication.c316
1 files changed, 167 insertions, 149 deletions
diff --git a/vnet/vnet/replication.c b/vnet/vnet/replication.c
index fc2cbd1d860..571be7d807c 100644
--- a/vnet/vnet/replication.c
+++ b/vnet/vnet/replication.c
@@ -27,57 +27,62 @@ replication_main_t replication_main;
replication_context_t *
replication_prep (vlib_main_t * vm,
- vlib_buffer_t * b0,
- u32 recycle_node_index,
- u32 l2_packet)
+ vlib_buffer_t * b0, u32 recycle_node_index, u32 l2_packet)
{
- replication_main_t * rm = &replication_main;
- replication_context_t * ctx;
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
uword cpu_number = vm->cpu_index;
- ip4_header_t * ip;
+ ip4_header_t *ip;
u32 ctx_id;
- // Allocate a context, reserve context 0
- if (PREDICT_FALSE(rm->contexts[cpu_number] == 0))
+ /* Allocate a context, reserve context 0 */
+ if (PREDICT_FALSE (rm->contexts[cpu_number] == 0))
pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
-
+
pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
ctx_id = ctx - rm->contexts[cpu_number];
- // Save state from vlib buffer
+ /* Save state from vlib buffer */
ctx->saved_free_list_index = b0->free_list_index;
ctx->current_data = b0->current_data;
- // Set up vlib buffer hooks
+ /* Set up vlib buffer hooks */
b0->recycle_count = ctx_id;
b0->free_list_index = rm->recycle_list_index;
b0->flags |= VLIB_BUFFER_RECYCLE;
- // Save feature state
+ /* Save feature state */
ctx->recycle_node_index = recycle_node_index;
- // Save vnet state
- clib_memcpy (ctx->vnet_buffer, vnet_buffer(b0), sizeof(vnet_buffer_opaque_t));
+ /* Save vnet state */
+ clib_memcpy (ctx->vnet_buffer, vnet_buffer (b0),
+ sizeof (vnet_buffer_opaque_t));
- // Save packet contents
+ /* Save packet contents */
ctx->l2_packet = l2_packet;
- ip = (ip4_header_t *)vlib_buffer_get_current (b0);
- if (l2_packet) {
- // Save ethernet header
- ctx->l2_header[0] = ((u64 *)ip)[0];
- ctx->l2_header[1] = ((u64 *)ip)[1];
- ctx->l2_header[2] = ((u64 *)ip)[2];
- // set ip to the true ip header
- ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
- }
-
- // Copy L3 fields.
- // We need to save TOS for ip4 and ip6 packets. Fortunately the TOS field is
- // in the first two bytes of both the ip4 and ip6 headers.
- ctx->ip_tos = *((u16 *)(ip));
-
- // Save the ip4 checksum as well. We just blindly save the corresponding two
- // bytes even for ip6 packets.
+ ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+ if (l2_packet)
+ {
+ /* Save ethernet header */
+ ctx->l2_header[0] = ((u64 *) ip)[0];
+ ctx->l2_header[1] = ((u64 *) ip)[1];
+ ctx->l2_header[2] = ((u64 *) ip)[2];
+ /* set ip to the true ip header */
+ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+ }
+
+ /*
+ * Copy L3 fields.
+ * We need to save TOS for ip4 and ip6 packets.
+ * Fortunately the TOS field is
+ * in the first two bytes of both the ip4 and ip6 headers.
+ */
+ ctx->ip_tos = *((u16 *) (ip));
+
+ /*
+ * Save the ip4 checksum as well. We just blindly save the corresponding two
+ * bytes even for ip6 packets.
+ */
ctx->ip4_checksum = ip->checksum;
return ctx;
@@ -85,48 +90,51 @@ replication_prep (vlib_main_t * vm,
replication_context_t *
-replication_recycle (vlib_main_t * vm,
- vlib_buffer_t * b0,
- u32 is_last)
+replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
{
- replication_main_t * rm = &replication_main;
- replication_context_t * ctx;
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
uword cpu_number = vm->cpu_index;
- ip4_header_t * ip;
+ ip4_header_t *ip;
- // Get access to the replication context
+ /* Get access to the replication context */
ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
- // Restore vnet buffer state
- clib_memcpy (vnet_buffer(b0), ctx->vnet_buffer, sizeof(vnet_buffer_opaque_t));
+ /* Restore vnet buffer state */
+ clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer,
+ sizeof (vnet_buffer_opaque_t));
- // Restore the packet start (current_data) and length
- vlib_buffer_advance(b0, ctx->current_data - b0->current_data);
+ /* Restore the packet start (current_data) and length */
+ vlib_buffer_advance (b0, ctx->current_data - b0->current_data);
- // Restore packet contents
- ip = (ip4_header_t *)vlib_buffer_get_current (b0);
- if (ctx->l2_packet) {
- // Restore ethernet header
- ((u64 *)ip)[0] = ctx->l2_header[0];
- ((u64 *)ip)[1] = ctx->l2_header[1];
- ((u64 *)ip)[2] = ctx->l2_header[2];
- // set ip to the true ip header
- ip = (ip4_header_t *)(((u8 *)ip) + vnet_buffer(b0)->l2.l2_len);
- }
+ /* Restore packet contents */
+ ip = (ip4_header_t *) vlib_buffer_get_current (b0);
+ if (ctx->l2_packet)
+ {
+ /* Restore ethernet header */
+ ((u64 *) ip)[0] = ctx->l2_header[0];
+ ((u64 *) ip)[1] = ctx->l2_header[1];
+ ((u64 *) ip)[2] = ctx->l2_header[2];
+ /* set ip to the true ip header */
+ ip = (ip4_header_t *) (((u8 *) ip) + vnet_buffer (b0)->l2.l2_len);
+ }
// Restore L3 fields
- *((u16 *)(ip)) = ctx->ip_tos;
+ *((u16 *) (ip)) = ctx->ip_tos;
ip->checksum = ctx->ip4_checksum;
- if (is_last) {
- // This is the last replication in the list.
- // Restore original buffer free functionality.
- b0->free_list_index = ctx->saved_free_list_index;
- b0->flags &= ~VLIB_BUFFER_RECYCLE;
-
- // Free context back to its pool
- pool_put (rm->contexts[cpu_number], ctx);
- }
+ if (is_last)
+ {
+ /*
+ * This is the last replication in the list.
+ * Restore original buffer free functionality.
+ */
+ b0->free_list_index = ctx->saved_free_list_index;
+ b0->flags &= ~VLIB_BUFFER_RECYCLE;
+
+ /* Free context back to its pool */
+ pool_put (rm->contexts[cpu_number], ctx);
+ }
return ctx;
}
@@ -137,133 +145,143 @@ replication_recycle (vlib_main_t * vm,
* fish pkts back from the recycle queue/freelist
* un-flatten the context chains
*/
-static void replication_recycle_callback (vlib_main_t *vm,
- vlib_buffer_free_list_t * fl)
+static void
+replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
{
- vlib_frame_t * f = 0;
+ vlib_frame_t *f = 0;
u32 n_left_from;
u32 n_left_to_next = 0;
u32 n_this_frame = 0;
- u32 * from;
- u32 * to_next = 0;
+ u32 *from;
+ u32 *to_next = 0;
u32 bi0, pi0;
vlib_buffer_t *b0;
int i;
- replication_main_t * rm = &replication_main;
- replication_context_t * ctx;
- u32 feature_node_index = 0;
+ replication_main_t *rm = &replication_main;
+ replication_context_t *ctx;
+ u32 feature_node_index = 0;
uword cpu_number = vm->cpu_index;
- // All buffers in the list are destined to the same recycle node.
- // Pull the recycle node index from the first buffer.
- // Note: this could be sped up if the node index were stuffed into
- // the freelist itself.
- if (vec_len (fl->aligned_buffers) > 0) {
- bi0 = fl->aligned_buffers[0];
- b0 = vlib_get_buffer (vm, bi0);
- ctx = pool_elt_at_index (rm->contexts[cpu_number],
- b0->recycle_count);
- feature_node_index = ctx->recycle_node_index;
- } else if (vec_len (fl->unaligned_buffers) > 0) {
- bi0 = fl->unaligned_buffers[0];
- b0 = vlib_get_buffer (vm, bi0);
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
- feature_node_index = ctx->recycle_node_index;
- }
+ /*
+ * All buffers in the list are destined to the same recycle node.
+ * Pull the recycle node index from the first buffer.
+ * Note: this could be sped up if the node index were stuffed into
+ * the freelist itself.
+ */
+ if (vec_len (fl->aligned_buffers) > 0)
+ {
+ bi0 = fl->aligned_buffers[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ feature_node_index = ctx->recycle_node_index;
+ }
+ else if (vec_len (fl->unaligned_buffers) > 0)
+ {
+ bi0 = fl->unaligned_buffers[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ feature_node_index = ctx->recycle_node_index;
+ }
/* aligned, unaligned buffers */
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 2; i++)
{
if (i == 0)
- {
- from = fl->aligned_buffers;
- n_left_from = vec_len (from);
- }
+ {
+ from = fl->aligned_buffers;
+ n_left_from = vec_len (from);
+ }
else
- {
- from = fl->unaligned_buffers;
- n_left_from = vec_len (from);
- }
-
+ {
+ from = fl->unaligned_buffers;
+ n_left_from = vec_len (from);
+ }
+
while (n_left_from > 0)
- {
- if (PREDICT_FALSE(n_left_to_next == 0))
- {
- if (f)
- {
- f->n_vectors = n_this_frame;
- vlib_put_frame_to_node (vm, feature_node_index, f);
- }
-
- f = vlib_get_frame_to_node (vm, feature_node_index);
- to_next = vlib_frame_vector_args (f);
- n_left_to_next = VLIB_FRAME_SIZE;
- n_this_frame = 0;
- }
-
- bi0 = from[0];
- if (PREDICT_TRUE(n_left_from > 1))
- {
- pi0 = from[1];
- vlib_prefetch_buffer_with_index(vm,pi0,LOAD);
- }
+ {
+ if (PREDICT_FALSE (n_left_to_next == 0))
+ {
+ if (f)
+ {
+ f->n_vectors = n_this_frame;
+ vlib_put_frame_to_node (vm, feature_node_index, f);
+ }
+
+ f = vlib_get_frame_to_node (vm, feature_node_index);
+ to_next = vlib_frame_vector_args (f);
+ n_left_to_next = VLIB_FRAME_SIZE;
+ n_this_frame = 0;
+ }
+
+ bi0 = from[0];
+ if (PREDICT_TRUE (n_left_from > 1))
+ {
+ pi0 = from[1];
+ vlib_prefetch_buffer_with_index (vm, pi0, LOAD);
+ }
b0 = vlib_get_buffer (vm, bi0);
- // Mark that this buffer was just recycled
- b0->flags |= VLIB_BUFFER_IS_RECYCLED;
+ /* Mark that this buffer was just recycled */
+ b0->flags |= VLIB_BUFFER_IS_RECYCLED;
- // If buffer is traced, mark frame as traced
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
- f->flags |= VLIB_FRAME_TRACE;
+ /* If buffer is traced, mark frame as traced */
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ f->flags |= VLIB_FRAME_TRACE;
- to_next[0] = bi0;
+ to_next[0] = bi0;
- from++;
- to_next++;
- n_this_frame++;
- n_left_to_next--;
- n_left_from--;
- }
+ from++;
+ to_next++;
+ n_this_frame++;
+ n_left_to_next--;
+ n_left_from--;
+ }
}
-
+
vec_reset_length (fl->aligned_buffers);
vec_reset_length (fl->unaligned_buffers);
if (f)
{
- ASSERT(n_this_frame);
+ ASSERT (n_this_frame);
f->n_vectors = n_this_frame;
vlib_put_frame_to_node (vm, feature_node_index, f);
}
}
-
-
-clib_error_t *replication_init (vlib_main_t *vm)
+clib_error_t *
+replication_init (vlib_main_t * vm)
{
- replication_main_t * rm = &replication_main;
- vlib_buffer_main_t * bm = vm->buffer_main;
- vlib_buffer_free_list_t * fl;
- __attribute__((unused)) replication_context_t * ctx;
- vlib_thread_main_t * tm = vlib_get_thread_main();
-
+ replication_main_t *rm = &replication_main;
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_free_list_t *fl;
+ __attribute__ ((unused)) replication_context_t *ctx;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
rm->vlib_main = vm;
- rm->vnet_main = vnet_get_main();
- rm->recycle_list_index =
- vlib_buffer_create_free_list (vm, 1024 /* fictional */,
- "replication-recycle");
+ rm->vnet_main = vnet_get_main ();
+ rm->recycle_list_index =
+ vlib_buffer_create_free_list (vm, 1024 /* fictional */ ,
+ "replication-recycle");
- fl = pool_elt_at_index (bm->buffer_free_list_pool,
- rm->recycle_list_index);
+ fl = pool_elt_at_index (bm->buffer_free_list_pool, rm->recycle_list_index);
fl->buffers_added_to_freelist_function = replication_recycle_callback;
- // Verify the replication context is the expected size
- ASSERT(sizeof(replication_context_t) == 128); // 2 cache lines
+ /* Verify the replication context is the expected size */
+ ASSERT (sizeof (replication_context_t) == 128); /* 2 cache lines */
vec_validate (rm->contexts, tm->n_vlib_mains - 1);
return 0;
}
VLIB_INIT_FUNCTION (replication_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */