summaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'vnet')
-rw-r--r--vnet/vnet/nsh-gre/encap.c81
1 files changed, 77 insertions, 4 deletions
diff --git a/vnet/vnet/nsh-gre/encap.c b/vnet/vnet/nsh-gre/encap.c
index 8521aeaef1a..78b02178263 100644
--- a/vnet/vnet/nsh-gre/encap.c
+++ b/vnet/vnet/nsh-gre/encap.c
@@ -64,13 +64,18 @@ nsh_gre_encap (vlib_main_t * vm,
u32 n_left_from, next_index, * from, * to_next;
nsh_gre_main_t * ngm = &nsh_gre_main;
vnet_main_t * vnm = ngm->vnet_main;
+ vnet_interface_main_t * im = &vnm->interface_main;
u32 pkts_encapsulated = 0;
u16 old_l0 = 0, old_l1 = 0;
+ u32 cpu_index = os_get_cpu_number();
+ u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
+ stats_sw_if_index = node->runtime_data[0];
+ stats_n_packets = stats_n_bytes = 0;
while (n_left_from > 0)
{
@@ -85,6 +90,7 @@ nsh_gre_encap (vlib_main_t * vm,
vlib_buffer_t * b0, * b1;
u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
u32 next1 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 sw_if_index0, sw_if_index1, len0, len1;
vnet_hw_interface_t * hi0, * hi1;
ip4_header_t * ip0, * ip1;
u64 * copy_src0, * copy_dst0;
@@ -119,7 +125,10 @@ nsh_gre_encap (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- hi0 = vnet_get_sup_hw_interface
+ /* 1-wide cache? */
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface
(vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
hi1 = vnet_get_sup_hw_interface
(vnm, vnet_buffer(b1)->sw_if_index[VLIB_TX]);
@@ -183,9 +192,43 @@ nsh_gre_encap (vlib_main_t * vm,
/* Reset to look up tunnel partner in the configured FIB */
vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
+ vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
pkts_encapsulated += 2;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ len0 = vlib_buffer_length_in_chain(vm, b0);
+ len1 = vlib_buffer_length_in_chain(vm, b0);
+ stats_n_packets += 2;
+ stats_n_bytes += len0 + len1;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE(
+ (sw_if_index0 != stats_sw_if_index)
+ || (sw_if_index1 != stats_sw_if_index))) {
+ stats_n_packets -= 2;
+ stats_n_bytes -= len0 + len1;
+ if (sw_if_index0 == sw_if_index1) {
+ if (stats_n_packets)
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_sw_if_index = sw_if_index0;
+ stats_n_packets = 2;
+ stats_n_bytes = len0 + len1;
+ } else {
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index0, 1, len0);
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, sw_if_index1, 1, len1);
+ }
+ }
+
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
nsh_gre_encap_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
@@ -209,6 +252,7 @@ nsh_gre_encap (vlib_main_t * vm,
u32 bi0;
vlib_buffer_t * b0;
u32 next0 = NSH_GRE_ENCAP_NEXT_IP4_LOOKUP;
+ u32 sw_if_index0, len0;
vnet_hw_interface_t * hi0;
ip4_header_t * ip0;
u64 * copy_src0, * copy_dst0;
@@ -226,7 +270,8 @@ nsh_gre_encap (vlib_main_t * vm,
b0 = vlib_get_buffer (vm, bi0);
/* 1-wide cache? */
- hi0 = vnet_get_sup_hw_interface
+ sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
+ hi0 = vnet_get_sup_hw_interface
(vnm, vnet_buffer(b0)->sw_if_index[VLIB_TX]);
t0 = pool_elt_at_index (ngm->tunnels, hi0->dev_instance);
@@ -262,9 +307,29 @@ nsh_gre_encap (vlib_main_t * vm,
/* Reset to look up tunnel partner in the configured FIB */
vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
+ vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
pkts_encapsulated ++;
- if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
+ len0 = vlib_buffer_length_in_chain(vm, b0);
+ stats_n_packets += 1;
+ stats_n_bytes += len0;
+
+ /* Batch stats increment on the same vxlan tunnel so counter is not
+ incremented per packet. Note stats are still incremented for deleted
+ and admin-down tunnel where packets are dropped. It is not worthwhile
+ to check for this rare case and affect normal path performance. */
+ if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index)) {
+ stats_n_packets -= 1;
+ stats_n_bytes -= len0;
+ if (stats_n_packets)
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX,
+ cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ stats_n_packets = 1;
+ stats_n_bytes = len0;
+ stats_sw_if_index = sw_if_index0;
+ }
+ if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
nsh_gre_encap_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
@@ -280,6 +345,14 @@ nsh_gre_encap (vlib_main_t * vm,
vlib_node_increment_counter (vm, node->node_index,
NSH_GRE_ENCAP_ERROR_ENCAPSULATED,
pkts_encapsulated);
+ /* Increment any remaining batch stats */
+ if (stats_n_packets) {
+ vlib_increment_combined_counter(
+ im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index,
+ stats_sw_if_index, stats_n_packets, stats_n_bytes);
+ node->runtime_data[0] = stats_sw_if_index;
+ }
+
return from_frame->n_vectors;
}