aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--vnet/vnet/l2/l2_input.c81
-rw-r--r--vnet/vnet/l2/l2_learn.c113
-rw-r--r--vnet/vnet/l2/l2_output.c288
3 files changed, 265 insertions, 217 deletions
diff --git a/vnet/vnet/l2/l2_input.c b/vnet/vnet/l2/l2_input.c
index dfb7bce2f36..5d4a3761857 100644
--- a/vnet/vnet/l2/l2_input.c
+++ b/vnet/vnet/l2/l2_input.c
@@ -256,9 +256,6 @@ l2input_node_fn (vlib_main_t * vm,
u32 n_left_from, *from, *to_next;
l2input_next_t next_index;
l2input_main_t *msm = &l2input_main;
- vlib_node_t *n = vlib_get_node (vm, l2input_node.index);
- u32 node_counter_base_index = n->error_heap_index;
- vlib_error_main_t *em = &vm->error_main;
u32 cpu_index = os_get_cpu_number ();
from = vlib_frame_vector_args (frame);
@@ -272,40 +269,32 @@ l2input_node_fn (vlib_main_t * vm,
/* get space to enqueue frame to graph node "next_index" */
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (n_left_from >= 6 && n_left_to_next >= 2)
+ while (n_left_from >= 8 && n_left_to_next >= 4)
{
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u32 next0, next1;
- u32 sw_if_index0, sw_if_index1;
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
/* Prefetch next iteration. */
{
- vlib_buffer_t *p2, *p3, *p4, *p5;
- u32 sw_if_index2, sw_if_index3;
+ vlib_buffer_t *p4, *p5, *p6, *p7;
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
p4 = vlib_get_buffer (vm, from[4]);
p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
/* Prefetch the buffer header and packet for the N+2 loop iteration */
vlib_prefetch_buffer_header (p4, LOAD);
vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
-
- /*
- * Prefetch the input config for the N+1 loop iteration
- * This depends on the buffer header above
- */
- sw_if_index2 = vnet_buffer (p2)->sw_if_index[VLIB_RX];
- sw_if_index3 = vnet_buffer (p3)->sw_if_index[VLIB_RX];
- CLIB_PREFETCH (&msm->configs[sw_if_index2], CLIB_CACHE_LINE_BYTES,
- LOAD);
- CLIB_PREFETCH (&msm->configs[sw_if_index3], CLIB_CACHE_LINE_BYTES,
- LOAD);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
/*
* Don't bother prefetching the bridge-domain config (which
@@ -319,19 +308,25 @@ l2input_node_fn (vlib_main_t * vm,
/* bi is "buffer index", b is pointer to the buffer */
to_next[0] = bi0 = from[0];
to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
{
/* RX interface handles */
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
@@ -351,19 +346,40 @@ l2input_node_fn (vlib_main_t * vm,
clib_memcpy (t->src, h1->src_address, 6);
clib_memcpy (t->dst, h1->dst_address, 6);
}
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h2 = vlib_buffer_get_current (b2);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ethernet_header_t *h3 = vlib_buffer_get_current (b3);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
}
- em->counters[node_counter_base_index + L2INPUT_ERROR_L2INPUT] += 2;
+ vlib_node_increment_counter (vm, l2input_node.index,
+ L2INPUT_ERROR_L2INPUT, 4);
classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0);
-
classify_and_dispatch (vm, node, cpu_index, msm, b1, &next1);
+ classify_and_dispatch (vm, node, cpu_index, msm, b2, &next2);
+ classify_and_dispatch (vm, node, cpu_index, msm, b3, &next3);
/* verify speculative enqueues, maybe switch current next frame */
/* if next0==next1==next_index then nothing special needs to be done */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
to_next, n_left_to_next,
- bi0, bi1, next0, next1);
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
}
while (n_left_from > 0 && n_left_to_next > 0)
@@ -394,7 +410,8 @@ l2input_node_fn (vlib_main_t * vm,
clib_memcpy (t->dst, h0->dst_address, 6);
}
- em->counters[node_counter_base_index + L2INPUT_ERROR_L2INPUT] += 1;
+ vlib_node_increment_counter (vm, l2input_node.index,
+ L2INPUT_ERROR_L2INPUT, 1);
classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0);
diff --git a/vnet/vnet/l2/l2_learn.c b/vnet/vnet/l2/l2_learn.c
index e34fe7a83c8..9feb7289466 100644
--- a/vnet/vnet/l2/l2_learn.c
+++ b/vnet/vnet/l2/l2_learn.c
@@ -258,51 +258,65 @@ l2learn_node_fn (vlib_main_t * vm,
/* get space to enqueue frame to graph node "next_index" */
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ while (n_left_from >= 8 && n_left_to_next >= 4)
{
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u32 next0, next1;
- u32 sw_if_index0, sw_if_index1;
- ethernet_header_t *h0, *h1;
- l2fib_entry_key_t key0, key1;
- l2fib_entry_result_t result0, result1;
- u32 bucket0, bucket1;
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ ethernet_header_t *h0, *h1, *h2, *h3;
+ l2fib_entry_key_t key0, key1, key2, key3;
+ l2fib_entry_result_t result0, result1, result2, result3;
+ u32 bucket0, bucket1, bucket2, bucket3;
/* Prefetch next iteration. */
{
- vlib_buffer_t *p2, *p3;
-
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p3->data, CLIB_CACHE_LINE_BYTES, STORE);
+ vlib_buffer_t *p4, *p5, *p6, *p7;;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
}
/* speculatively enqueue b0 and b1 to the current next frame */
/* bi is "buffer index", b is pointer to the buffer */
to_next[0] = bi0 = from[0];
to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
/* RX interface handles */
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
- /* Process 2 x pkts */
+ /* Process 4 x pkts */
h0 = vlib_buffer_get_current (b0);
h1 = vlib_buffer_get_current (b1);
+ h2 = vlib_buffer_get_current (b2);
+ h3 = vlib_buffer_get_current (b3);
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
{
@@ -324,18 +338,42 @@ l2learn_node_fn (vlib_main_t * vm,
clib_memcpy (t->src, h1->src_address, 6);
clib_memcpy (t->dst, h1->dst_address, 6);
}
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ t->bd_index = vnet_buffer (b2)->l2.bd_index;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2learn_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ t->bd_index = vnet_buffer (b3)->l2.bd_index;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
}
- /* process 2 pkts */
- em->counters[node_counter_base_index + L2LEARN_ERROR_L2LEARN] += 2;
+ /* process 4 pkts */
+ vlib_node_increment_counter (vm, l2learn_node.index,
+ L2LEARN_ERROR_L2LEARN, 4);
- l2fib_lookup_2 (msm->mac_table, &cached_key, &cached_result,
+ l2fib_lookup_4 (msm->mac_table, &cached_key, &cached_result,
h0->src_address,
h1->src_address,
+ h2->src_address,
+ h3->src_address,
vnet_buffer (b0)->l2.bd_index,
vnet_buffer (b1)->l2.bd_index,
- &key0,
- &key1, &bucket0, &bucket1, &result0, &result1);
+ vnet_buffer (b2)->l2.bd_index,
+ vnet_buffer (b3)->l2.bd_index,
+ &key0, &key1, &key2, &key3,
+ &bucket0, &bucket1, &bucket2, &bucket3,
+ &result0, &result1, &result2, &result3);
l2learn_process (node, msm, &em->counters[node_counter_base_index],
b0, sw_if_index0, &key0, &cached_key,
@@ -345,11 +383,20 @@ l2learn_node_fn (vlib_main_t * vm,
b1, sw_if_index1, &key1, &cached_key,
&bucket1, &result1, &next1);
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b2, sw_if_index2, &key2, &cached_key,
+ &bucket2, &result2, &next2);
+
+ l2learn_process (node, msm, &em->counters[node_counter_base_index],
+ b3, sw_if_index3, &key3, &cached_key,
+ &bucket3, &result3, &next3);
+
/* verify speculative enqueues, maybe switch current next frame */
/* if next0==next1==next_index then nothing special needs to be done */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
to_next, n_left_to_next,
- bi0, bi1, next0, next1);
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
}
while (n_left_from > 0 && n_left_to_next > 0)
@@ -388,7 +435,9 @@ l2learn_node_fn (vlib_main_t * vm,
}
/* process 1 pkt */
- em->counters[node_counter_base_index + L2LEARN_ERROR_L2LEARN] += 1;
+ vlib_node_increment_counter (vm, l2learn_node.index,
+ L2LEARN_ERROR_L2LEARN, 1);
+
l2fib_lookup_1 (msm->mac_table, &cached_key, &cached_result,
h0->src_address, vnet_buffer (b0)->l2.bd_index,
diff --git a/vnet/vnet/l2/l2_output.c b/vnet/vnet/l2/l2_output.c
index 22ce663b899..953fcb0222a 100644
--- a/vnet/vnet/l2/l2_output.c
+++ b/vnet/vnet/l2/l2_output.c
@@ -92,6 +92,51 @@ split_horizon_violation (u8 shg1, u8 shg2)
}
}
+static_always_inline void
+l2output_vtr (vlib_node_runtime_t * node, l2_output_config_t * config,
+ u32 feature_bitmap, vlib_buffer_t * b, u32 * next)
+{
+ if (PREDICT_FALSE (config->out_vtr_flag))
+ {
+ /* Perform pre-vtr EFP filter check if configured */
+ if (config->output_vtr.push_and_pop_bytes)
+ {
+ /*
+ * Perform output vlan tag rewrite and the pre-vtr EFP filter check.
+ * The EFP Filter only needs to be run if there is an output VTR
+ * configured. The flag for the post-vtr EFP Filter node is used
+ * to trigger the pre-vtr check as well.
+ */
+ u32 failed1 = (feature_bitmap & L2OUTPUT_FEAT_EFP_FILTER)
+ && (l2_efp_filter_process (b, &(config->input_vtr)));
+ u32 failed2 = l2_vtr_process (b, &(config->output_vtr));
+
+ if (PREDICT_FALSE (failed1 | failed2))
+ {
+ *next = L2OUTPUT_NEXT_DROP;
+ if (failed2)
+ {
+ b->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
+ }
+ if (failed1)
+ {
+ b->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
+ }
+ }
+ }
+ // perform the PBB rewrite
+ else if (config->output_pbb_vtr.push_and_pop_bytes)
+ {
+ u32 failed = l2_pbb_process (b, &(config->output_pbb_vtr));
+ if (PREDICT_FALSE (failed))
+ {
+ *next = L2OUTPUT_NEXT_DROP;
+ b->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
+ }
+ }
+ }
+}
+
static vlib_node_registration_t l2output_node;
@@ -102,9 +147,6 @@ l2output_node_fn (vlib_main_t * vm,
u32 n_left_from, *from, *to_next;
l2output_next_t next_index;
l2output_main_t *msm = &l2output_main;
- vlib_node_t *n = vlib_get_node (vm, l2output_node.index);
- u32 node_counter_base_index = n->error_heap_index;
- vlib_error_main_t *em = &vm->error_main;
u32 cached_sw_if_index;
u32 cached_next_index;
@@ -123,64 +165,61 @@ l2output_node_fn (vlib_main_t * vm,
/* get space to enqueue frame to graph node "next_index" */
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (n_left_from >= 6 && n_left_to_next >= 2)
+ while (n_left_from >= 8 && n_left_to_next >= 4)
{
- u32 bi0, bi1;
- vlib_buffer_t *b0, *b1;
- u32 next0, next1;
- u32 sw_if_index0, sw_if_index1;
- ethernet_header_t *h0, *h1;
- l2_output_config_t *config0, *config1;
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+ u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
+ ethernet_header_t *h0, *h1, *h2, *h3;
+ l2_output_config_t *config0, *config1, *config2, *config3;
u32 feature_bitmap0, feature_bitmap1;
+ u32 feature_bitmap2, feature_bitmap3;
/* Prefetch next iteration. */
{
- vlib_buffer_t *p2, *p3, *p4, *p5;
- u32 sw_if_index2, sw_if_index3;
+ vlib_buffer_t *p4, *p5, *p6, *p7;
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
p4 = vlib_get_buffer (vm, from[4]);
p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
/* Prefetch the buffer header for the N+2 loop iteration */
vlib_prefetch_buffer_header (p4, LOAD);
vlib_prefetch_buffer_header (p5, LOAD);
- /*
- * Note: no need to prefetch packet data.
- * This node doesn't reference it.
- *
- * Prefetch the input config for the N+1 loop iteration
- * This depends on the buffer header above
- */
- sw_if_index2 = vnet_buffer (p2)->sw_if_index[VLIB_TX];
- sw_if_index3 = vnet_buffer (p3)->sw_if_index[VLIB_TX];
- CLIB_PREFETCH (&msm->configs[sw_if_index2], CLIB_CACHE_LINE_BYTES,
- LOAD);
- CLIB_PREFETCH (&msm->configs[sw_if_index3], CLIB_CACHE_LINE_BYTES,
- LOAD);
+ vlib_prefetch_buffer_header (p6, LOAD);
+ vlib_prefetch_buffer_header (p7, LOAD);
}
/* speculatively enqueue b0 and b1 to the current next frame */
/* bi is "buffer index", b is pointer to the buffer */
to_next[0] = bi0 = from[0];
to_next[1] = bi1 = from[1];
- from += 2;
- to_next += 2;
- n_left_from -= 2;
- n_left_to_next -= 2;
+ to_next[2] = bi2 = from[2];
+ to_next[3] = bi3 = from[3];
+ from += 4;
+ to_next += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
/* TX interface handles */
sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
{
h0 = vlib_buffer_get_current (b0);
h1 = vlib_buffer_get_current (b1);
+ h2 = vlib_buffer_get_current (b2);
+ h3 = vlib_buffer_get_current (b3);
if (b0->flags & VLIB_BUFFER_IS_TRACED)
{
l2output_trace_t *t =
@@ -197,14 +236,32 @@ l2output_node_fn (vlib_main_t * vm,
clib_memcpy (t->src, h1->src_address, 6);
clib_memcpy (t->dst, h1->dst_address, 6);
}
+ if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b2, sizeof (*t));
+ t->sw_if_index = sw_if_index2;
+ clib_memcpy (t->src, h2->src_address, 6);
+ clib_memcpy (t->dst, h2->dst_address, 6);
+ }
+ if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ l2output_trace_t *t =
+ vlib_add_trace (vm, node, b3, sizeof (*t));
+ t->sw_if_index = sw_if_index3;
+ clib_memcpy (t->src, h3->src_address, 6);
+ clib_memcpy (t->dst, h3->dst_address, 6);
+ }
}
- em->counters[node_counter_base_index + L2OUTPUT_ERROR_L2OUTPUT] +=
- 2;
+ vlib_node_increment_counter (vm, l2output_node.index,
+ L2OUTPUT_ERROR_L2OUTPUT, 4);
/* Get config for the output interface */
config0 = vec_elt_at_index (msm->configs, sw_if_index0);
config1 = vec_elt_at_index (msm->configs, sw_if_index1);
+ config2 = vec_elt_at_index (msm->configs, sw_if_index2);
+ config3 = vec_elt_at_index (msm->configs, sw_if_index3);
/*
* Get features from the config
@@ -212,6 +269,8 @@ l2output_node_fn (vlib_main_t * vm,
*/
feature_bitmap0 = config0->feature_bitmap;
feature_bitmap1 = config1->feature_bitmap;
+ feature_bitmap2 = config2->feature_bitmap;
+ feature_bitmap3 = config3->feature_bitmap;
/* Determine next node */
l2_output_dispatch (msm->vlib_main,
@@ -232,86 +291,35 @@ l2output_node_fn (vlib_main_t * vm,
&msm->next_nodes,
b1, sw_if_index1, feature_bitmap1, &next1);
- if (PREDICT_FALSE (config0->out_vtr_flag))
- {
- /* Perform pre-vtr EFP filter check if configured */
- if (config0->output_vtr.push_and_pop_bytes)
- {
- /*
- * Perform output vlan tag rewrite and the pre-vtr EFP filter check.
- * The EFP Filter only needs to be run if there is an output VTR
- * configured. The flag for the post-vtr EFP Filter node is used
- * to trigger the pre-vtr check as well.
- */
- u32 failed1 = (feature_bitmap0 & L2OUTPUT_FEAT_EFP_FILTER)
- && (l2_efp_filter_process (b0, &(config0->input_vtr)));
- u32 failed2 = l2_vtr_process (b0, &(config0->output_vtr));
-
- if (PREDICT_FALSE (failed1 | failed2))
- {
- next0 = L2OUTPUT_NEXT_DROP;
- if (failed2)
- {
- b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- if (failed1)
- {
- b0->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
- }
- }
- }
- // perform the PBB rewrite
- else if (config0->output_pbb_vtr.push_and_pop_bytes)
- {
- u32 failed =
- l2_pbb_process (b0, &(config0->output_pbb_vtr));
- if (PREDICT_FALSE (failed))
- {
- next0 = L2OUTPUT_NEXT_DROP;
- b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- }
- }
- if (PREDICT_FALSE (config1->out_vtr_flag))
- {
- /* Perform pre-vtr EFP filter check if configured */
- if (config1->output_vtr.push_and_pop_bytes)
- {
- u32 failed1 = (feature_bitmap1 & L2OUTPUT_FEAT_EFP_FILTER)
- && (l2_efp_filter_process (b1, &(config1->input_vtr)));
- u32 failed2 = l2_vtr_process (b1, &(config1->output_vtr));
-
- if (PREDICT_FALSE (failed1 | failed2))
- {
- next1 = L2OUTPUT_NEXT_DROP;
- if (failed2)
- {
- b1->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- if (failed1)
- {
- b1->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
- }
- }
- }
- // perform the PBB rewrite
- else if (config1->output_pbb_vtr.push_and_pop_bytes)
- {
- u32 failed =
- l2_pbb_process (b0, &(config1->output_pbb_vtr));
- if (PREDICT_FALSE (failed))
- {
- next1 = L2OUTPUT_NEXT_DROP;
- b1->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- }
- }
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b2, sw_if_index2, feature_bitmap2, &next2);
+
+ l2_output_dispatch (msm->vlib_main,
+ msm->vnet_main,
+ node,
+ l2output_node.index,
+ &cached_sw_if_index,
+ &cached_next_index,
+ &msm->next_nodes,
+ b3, sw_if_index3, feature_bitmap3, &next3);
+
+ l2output_vtr (node, config0, feature_bitmap0, b0, &next0);
+ l2output_vtr (node, config1, feature_bitmap1, b1, &next1);
+ l2output_vtr (node, config2, feature_bitmap2, b2, &next2);
+ l2output_vtr (node, config3, feature_bitmap3, b3, &next3);
/*
* Perform the split horizon check
* The check can only fail for non-zero shg's
*/
- if (PREDICT_FALSE (config0->shg + config1->shg))
+ if (PREDICT_FALSE (config0->shg + config1->shg +
+ config2->shg + config3->shg))
{
/* one of the checks might fail, check both */
if (split_horizon_violation
@@ -326,13 +334,26 @@ l2output_node_fn (vlib_main_t * vm,
next1 = L2OUTPUT_NEXT_DROP;
b1->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
}
+ if (split_horizon_violation
+ (config2->shg, vnet_buffer (b2)->l2.shg))
+ {
+ next2 = L2OUTPUT_NEXT_DROP;
+ b2->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
+ if (split_horizon_violation
+ (config3->shg, vnet_buffer (b3)->l2.shg))
+ {
+ next3 = L2OUTPUT_NEXT_DROP;
+ b3->error = node->errors[L2OUTPUT_ERROR_SHG_DROP];
+ }
}
/* verify speculative enqueues, maybe switch current next frame */
/* if next0==next1==next_index then nothing special needs to be done */
- vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
to_next, n_left_to_next,
- bi0, bi1, next0, next1);
+ bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
}
while (n_left_from > 0 && n_left_to_next > 0)
@@ -368,8 +389,8 @@ l2output_node_fn (vlib_main_t * vm,
clib_memcpy (t->dst, h0->dst_address, 6);
}
- em->counters[node_counter_base_index +
- L2OUTPUT_ERROR_L2OUTPUT] += 1;
+ vlib_node_increment_counter (vm, l2output_node.index,
+ L2OUTPUT_ERROR_L2OUTPUT, 1);
/* Get config for the output interface */
config0 = vec_elt_at_index (msm->configs, sw_if_index0);
@@ -390,47 +411,8 @@ l2output_node_fn (vlib_main_t * vm,
&msm->next_nodes,
b0, sw_if_index0, feature_bitmap0, &next0);
- if (PREDICT_FALSE (config0->out_vtr_flag))
- {
- /*
- * Perform output vlan tag rewrite and the pre-vtr EFP filter check.
- * The EFP Filter only needs to be run if there is an output VTR
- * configured. The flag for the post-vtr EFP Filter node is used
- * to trigger the pre-vtr check as well.
- */
-
- if (config0->output_vtr.push_and_pop_bytes)
- {
- /* Perform pre-vtr EFP filter check if configured */
- u32 failed1 = (feature_bitmap0 & L2OUTPUT_FEAT_EFP_FILTER)
- && (l2_efp_filter_process (b0, &(config0->input_vtr)));
- u32 failed2 = l2_vtr_process (b0, &(config0->output_vtr));
-
- if (PREDICT_FALSE (failed1 | failed2))
- {
- next0 = L2OUTPUT_NEXT_DROP;
- if (failed2)
- {
- b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- if (failed1)
- {
- b0->error = node->errors[L2OUTPUT_ERROR_EFP_DROP];
- }
- }
- }
- // perform the PBB rewrite
- else if (config0->output_pbb_vtr.push_and_pop_bytes)
- {
- u32 failed =
- l2_pbb_process (b0, &(config0->output_pbb_vtr));
- if (PREDICT_FALSE (failed))
- {
- next0 = L2OUTPUT_NEXT_DROP;
- b0->error = node->errors[L2OUTPUT_ERROR_VTR_DROP];
- }
- }
- }
+ l2output_vtr (node, config0, feature_bitmap0, b0, &next0);
+
/* Perform the split horizon check */
if (PREDICT_FALSE
(split_horizon_violation