aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/nat/nat_inlines.h
diff options
context:
space:
mode:
authorKlement Sekera <ksekera@cisco.com>2020-06-17 13:46:41 +0000
committerDamjan Marion <dmarion@me.com>2020-06-27 10:19:46 +0000
commit9466e4fed6faaf9c161767e2bc42c9cb0cf2c7ec (patch)
treeeea62e53387d82c0fdc683d0f16b5e7e9b009b49 /src/plugins/nat/nat_inlines.h
parent78c61c33f323720f8225f53969bf196e6fc9e6dd (diff)
nat: replace speculative buffer enqueue model
Replace speculative buffer enqueue coding model with vlib_get_buffers(...)/vlib_buffer_enqueue_to_next(...). Type: improvement Change-Id: I7dbfac2234a7bd754c599857eb1d5b601da5bc7c Signed-off-by: Klement Sekera <ksekera@cisco.com>
Diffstat (limited to 'src/plugins/nat/nat_inlines.h')
-rw-r--r--src/plugins/nat/nat_inlines.h157
1 files changed, 68 insertions, 89 deletions
diff --git a/src/plugins/nat/nat_inlines.h b/src/plugins/nat/nat_inlines.h
index 0254de34d49..01c866a0787 100644
--- a/src/plugins/nat/nat_inlines.h
+++ b/src/plugins/nat/nat_inlines.h
@@ -105,120 +105,99 @@ nat_pre_node_fn_inline (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame, u32 def_next)
{
- u32 n_left_from, *from, *to_next;
- u16 next_index;
+ u32 n_left_from, *from;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
- next_index = node->cached_next_index;
- while (n_left_from > 0)
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ vlib_get_buffers (vm, from, b, n_left_from);
+
+ while (n_left_from >= 2)
{
- u32 n_left_to_next;
+ u32 next0, next1;
+ u32 arc_next0, arc_next1;
+ vlib_buffer_t *b0, *b1;
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ b0 = *b;
+ b++;
+ b1 = *b;
+ b++;
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ /* Prefetch next iteration. */
+ if (PREDICT_TRUE (n_left_from >= 4))
{
- u32 next0, next1;
- u32 arc_next0, arc_next1;
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
-
- /* Prefetch next iteration. */
- {
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, LOAD);
- }
+ vlib_buffer_t *p2, *p3;
- /* speculatively enqueue b0 and b1 to the current next frame */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
+ p2 = *b;
+ p3 = *(b + 1);
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
+ vlib_prefetch_buffer_header (p2, LOAD);
+ vlib_prefetch_buffer_header (p3, LOAD);
- next0 = def_next;
- next1 = def_next;
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
- vnet_feature_next (&arc_next0, b0);
- vnet_feature_next (&arc_next1, b1);
+ next0 = def_next;
+ next1 = def_next;
- vnet_buffer2 (b0)->nat.arc_next = arc_next0;
- vnet_buffer2 (b1)->nat.arc_next = arc_next1;
+ vnet_feature_next (&arc_next0, b0);
+ vnet_feature_next (&arc_next1, b1);
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
- {
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
- {
- nat_pre_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->next_index = next0;
- t->arc_next_index = arc_next0;
- }
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
- {
- nat_pre_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
- t->next_index = next1;
- t->arc_next_index = arc_next1;
- }
- }
+ vnet_buffer2 (b0)->nat.arc_next = arc_next0;
+ vnet_buffer2 (b1)->nat.arc_next = arc_next1;
- /* verify speculative enqueues, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, bi1, next0, next1);
- }
-
- while (n_left_from > 0 && n_left_to_next > 0)
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
{
- u32 next0;
- u32 arc_next0;
- u32 bi0;
- vlib_buffer_t *b0;
-
- /* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- next0 = def_next;
- vnet_feature_next (&arc_next0, b0);
- vnet_buffer2 (b0)->nat.arc_next = arc_next0;
-
- if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
- && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
t->next_index = next0;
t->arc_next_index = arc_next0;
}
+ if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next1;
+ t->arc_next_index = arc_next1;
+ }
+ }
+
+ n_left_from -= 2;
+ next[0] = next0;
+ next[1] = next1;
+ next += 2;
+ }
- /* verify speculative enqueue, maybe switch current next frame */
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
+ while (n_left_from > 0)
+ {
+ u32 next0;
+ u32 arc_next0;
+ vlib_buffer_t *b0;
+
+ b0 = *b;
+ b++;
+
+ next0 = def_next;
+ vnet_feature_next (&arc_next0, b0);
+ vnet_buffer2 (b0)->nat.arc_next = arc_next0;
+
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
+ && (b0->flags & VLIB_BUFFER_IS_TRACED)))
+ {
+ nat_pre_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
+ t->next_index = next0;
+ t->arc_next_index = arc_next0;
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ n_left_from--;
+ next[0] = next0;
+ next++;
}
+ vlib_buffer_enqueue_to_next (vm, node, from, (u16 *) nexts,
+ frame->n_vectors);
return frame->n_vectors;
}