diff options
author | Neale Ranns <nranns@cisco.com> | 2018-10-02 07:27:02 -0700 |
---|---|---|
committer | Marco Varlese <marco.varlese@suse.de> | 2018-10-12 07:35:46 +0000 |
commit | 9a5f9c9a43ae6057fe760a59d40443a12af37f90 (patch) | |
tree | 7a54d4ef075022cd3b1149c97a210315f0ec7513 /src/vnet | |
parent | 9864f87b1bd410a6ef533f34b571e28500ee80f7 (diff) |
L2-flood: no clone for 1 replication
Change-Id: If178dd38e7920f35588f5d821ff097168b078026
Signed-off-by: Neale Ranns <nranns@cisco.com>
(cherry picked from commit b9fa29d513bfad0d9f18e8ed8c2da3feaa6d3bf0)
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/l2/l2_flood.c | 128 |
1 files changed, 69 insertions, 59 deletions
diff --git a/src/vnet/l2/l2_flood.c b/src/vnet/l2/l2_flood.c index 97a4ff59da7..8908c4312d3 100644 --- a/src/vnet/l2/l2_flood.c +++ b/src/vnet/l2/l2_flood.c @@ -209,77 +209,87 @@ l2flood_node_fn (vlib_main_t * vm, bi0, L2FLOOD_NEXT_DROP); continue; } - - vec_validate (msm->clones[thread_index], n_clones); - vec_reset_length (msm->clones[thread_index]); - - /* - * the header offset needs to be large enough to incorporate - * all the L3 headers that could be touched when doing BVI - * processing. So take the current l2 length plus 2 * IPv6 - * headers (for tunnel encap) - */ - n_cloned = vlib_buffer_clone (vm, bi0, - msm->clones[thread_index], - n_clones, - (vnet_buffer (b0)->l2.l2_len + - sizeof (udp_header_t) + - 2 * sizeof (ip6_header_t))); - - if (PREDICT_FALSE (n_cloned != n_clones)) + else if (n_clones > 1) { - b0->error = node->errors[L2FLOOD_ERROR_REPL_FAIL]; - } + vec_validate (msm->clones[thread_index], n_clones); + vec_reset_length (msm->clones[thread_index]); + + /* + * the header offset needs to be large enough to incorporate + * all the L3 headers that could be touched when doing BVI + * processing. So take the current l2 length plus 2 * IPv6 + * headers (for tunnel encap) + */ + n_cloned = vlib_buffer_clone (vm, bi0, + msm->clones[thread_index], + n_clones, + (vnet_buffer (b0)->l2.l2_len + + sizeof (udp_header_t) + + 2 * sizeof (ip6_header_t))); + + if (PREDICT_FALSE (n_cloned != n_clones)) + { + b0->error = node->errors[L2FLOOD_ERROR_REPL_FAIL]; + } - /* - * for all but the last clone, these are not BVI bound - */ - for (clone0 = 0; clone0 < n_cloned - 1; clone0++) - { - member = msm->members[thread_index][clone0]; - ci0 = msm->clones[thread_index][clone0]; - c0 = vlib_get_buffer (vm, ci0); + /* + * for all but the last clone, these are not BVI bound + */ + for (clone0 = 0; clone0 < n_cloned - 1; clone0++) + { + member = msm->members[thread_index][clone0]; + ci0 = msm->clones[thread_index][clone0]; + c0 = vlib_get_buffer (vm, ci0); - to_next[0] = ci0; - to_next += 1; - n_left_to_next -= 1; + to_next[0] = ci0; + to_next += 1; + n_left_to_next -= 1; - if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && - (b0->flags & VLIB_BUFFER_IS_TRACED))) - { - ethernet_header_t *h0; - l2flood_trace_t *t; - - if (c0 != b0) - vlib_buffer_copy_trace_flag (vm, b0, ci0); - - t = vlib_add_trace (vm, node, c0, sizeof (*t)); - h0 = vlib_buffer_get_current (c0); - t->sw_if_index = sw_if_index0; - t->bd_index = vnet_buffer (c0)->l2.bd_index; - clib_memcpy (t->src, h0->src_address, 6); - clib_memcpy (t->dst, h0->dst_address, 6); - } + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && + (b0->flags & VLIB_BUFFER_IS_TRACED))) + { + ethernet_header_t *h0; + l2flood_trace_t *t; + + if (c0 != b0) + vlib_buffer_copy_trace_flag (vm, b0, ci0); + + t = vlib_add_trace (vm, node, c0, sizeof (*t)); + h0 = vlib_buffer_get_current (c0); + t->sw_if_index = sw_if_index0; + t->bd_index = vnet_buffer (c0)->l2.bd_index; + clib_memcpy (t->src, h0->src_address, 6); + clib_memcpy (t->dst, h0->dst_address, 6); + } - /* Do normal L2 forwarding */ - vnet_buffer (c0)->sw_if_index[VLIB_TX] = member->sw_if_index; + /* Do normal L2 forwarding */ + vnet_buffer (c0)->sw_if_index[VLIB_TX] = + member->sw_if_index; - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - ci0, next0); - if (PREDICT_FALSE (0 == n_left_to_next)) - { - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - vlib_get_next_frame (vm, node, next_index, - to_next, n_left_to_next); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + ci0, next0); + if (PREDICT_FALSE (0 == n_left_to_next)) + { + vlib_put_next_frame (vm, node, next_index, + n_left_to_next); + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } } + member = msm->members[thread_index][clone0]; + ci0 = msm->clones[thread_index][clone0]; + } + else + { + /* one clone */ + ci0 = bi0; + member = msm->members[thread_index][0]; } /* * the last clone that might go to a BVI */ - member = msm->members[thread_index][clone0]; - ci0 = msm->clones[thread_index][clone0]; c0 = vlib_get_buffer (vm, ci0); to_next[0] = ci0; |