diff options
author | Damjan Marion <damarion@cisco.com> | 2016-06-10 19:26:54 +0200 |
---|---|---|
committer | Damjan Marion <damarion@cisco.com> | 2016-06-15 16:38:41 +0200 |
commit | 1f25d1a2ec19283635b039e527edcee0e9301748 (patch) | |
tree | 33263f2958cd3dc2a1a449f65860c8ae7ac125c6 | |
parent | a55cf2c1e5ee791e7bc1d657c75eb65255ce050f (diff) |
Fix double-enqueued packet in interface-output dual-loop, fixes VPP-116
When speculative enqueue fails and a buffer needs to be moved to a new
node queue the original buffer is not correctly removed from the
original queue so buffer get send for transmit and encryption at the
same time. This issue will only be hit with the double loop so low
throughput traffic like pings will not hit the issue. This code path is
also only hit when the feature flag is enabled so will not be hit by
normal traffic
Patch also reorgnizes code to reduce number of branches in the interface
output node loop.
Change-Id: I3653400e58bdfd833e6c42823bab51586128b54b
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r-- | vnet/vnet/buffer.h | 2 | ||||
-rw-r--r-- | vnet/vnet/interface_output.c | 59 |
2 files changed, 42 insertions, 19 deletions
diff --git a/vnet/vnet/buffer.h b/vnet/vnet/buffer.h index 7153f3541b1..050642276b5 100644 --- a/vnet/vnet/buffer.h +++ b/vnet/vnet/buffer.h @@ -61,6 +61,8 @@ #define ETH_BUFFER_VLAN_BITS (ETH_BUFFER_VLAN_1_DEEP | \ ETH_BUFFER_VLAN_2_DEEP) +#define LOG2_BUFFER_OUTPUT_FEAT_DONE LOG2_VLIB_BUFFER_FLAG_USER(5) +#define BUFFER_OUTPUT_FEAT_DONE (1 << LOG2_BUFFER_OUTPUT_FEAT_DONE) #define foreach_buffer_opaque_union_subtype \ _(ethernet) \ diff --git a/vnet/vnet/interface_output.c b/vnet/vnet/interface_output.c index a8dd451fe82..92c1380a6fc 100644 --- a/vnet/vnet/interface_output.c +++ b/vnet/vnet/interface_output.c @@ -393,11 +393,11 @@ vnet_interface_output_node (vlib_main_t * vm, return n_buffers; } - -uword -vnet_interface_output_node_no_flatten (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) +always_inline uword +vnet_interface_output_node_no_flatten_inline (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame, + int with_features) { vnet_main_t * vnm = vnet_get_main(); vnet_interface_output_runtime_t * rt = (void *) node->runtime_data; @@ -465,6 +465,7 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, u32 bi0, bi1; vlib_buffer_t * b0, * b1; u32 tx_swif0, tx_swif1; + u32 next0, next1; /* Prefetch next iteration. */ vlib_prefetch_buffer_with_index (vm, from[2], LOAD); @@ -493,19 +494,16 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, n_bytes += n_bytes_b0 + n_bytes_b1; n_packets += 2; - - if (PREDICT_FALSE(si->output_feature_bitmap && - vnet_buffer(b0)->output_features.bitmap != (1 << INTF_OUTPUT_FEAT_DONE))) + if (with_features) { - u32 next0; + b0->flags |= BUFFER_OUTPUT_FEAT_DONE; vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap; count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap); vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx, - n_left_to_tx, bi0, next0); } else { + next0 = VNET_INTERFACE_OUTPUT_NEXT_TX; vnet_buffer(b0)->output_features.bitmap = 0; if (PREDICT_FALSE(tx_swif0 != rt->sw_if_index)) @@ -520,18 +518,16 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, } } - if (PREDICT_FALSE(si->output_feature_bitmap && - vnet_buffer(b1)->output_features.bitmap != (1 << INTF_OUTPUT_FEAT_DONE))) + if (with_features) { - u32 next1; + b1->flags |= BUFFER_OUTPUT_FEAT_DONE; vnet_buffer(b1)->output_features.bitmap = si->output_feature_bitmap; count_trailing_zeros(next1, vnet_buffer(b1)->output_features.bitmap); vnet_buffer(b1)->output_features.bitmap &= ~(1 << next1); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_tx, - n_left_to_tx, bi1, next1); } else { + next1 = VNET_INTERFACE_OUTPUT_NEXT_TX; vnet_buffer(b1)->output_features.bitmap = 0; /* update vlan subif tx counts, if required */ @@ -546,7 +542,9 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, n_bytes_b1); } } - + if (with_features) + vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_tx, + n_left_to_tx, bi0, bi1, next0, next1); } while (from + 1 <= from_end && n_left_to_tx >= 1) @@ -572,10 +570,10 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, n_bytes += n_bytes_b0; n_packets += 1; - if (PREDICT_FALSE(si->output_feature_bitmap && - vnet_buffer(b0)->output_features.bitmap != (1 << INTF_OUTPUT_FEAT_DONE))) + if (with_features) { u32 next0; + b0->flags |= BUFFER_OUTPUT_FEAT_DONE; vnet_buffer(b0)->output_features.bitmap = si->output_feature_bitmap; count_trailing_zeros(next0, vnet_buffer(b0)->output_features.bitmap); vnet_buffer(b0)->output_features.bitmap &= ~(1 << next0); @@ -613,6 +611,29 @@ vnet_interface_output_node_no_flatten (vlib_main_t * vm, return n_buffers; } +uword +vnet_interface_output_node_no_flatten (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + vnet_main_t * vnm = vnet_get_main (); + vnet_interface_output_runtime_t * rt = (void *) node->runtime_data; + vnet_sw_interface_t * si; + si = vnet_get_sw_interface (vnm, rt->sw_if_index); + + if (PREDICT_FALSE(si->output_feature_bitmap)) + { + /* if first pakcet in the frame have BUFFER_OUTPUT_FEAT_DONE flag set + then whole frame is arriving from feature node */ + + u32 * from = vlib_frame_args (frame); + vlib_buffer_t * b = vlib_get_buffer (vm, from[0]); + + if ((b->flags & BUFFER_OUTPUT_FEAT_DONE) == 0) + return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 1); + } + return vnet_interface_output_node_no_flatten_inline (vm, node, frame, 0); +} /* Use buffer's sw_if_index[VNET_TX] to choose output interface. */ static uword |