summaryrefslogtreecommitdiffstats
path: root/src/vnet/bonding/device.c
diff options
context:
space:
mode:
authorSteven <sluong@cisco.com>2017-12-20 12:43:01 -0800
committerDamjan Marion <dmarion.lists@gmail.com>2018-03-21 21:02:15 +0000
commit9cd2d7a5a4fafadb65d772c48109d55d1e19d425 (patch)
tree4a9e0665be0096ee6bfc2235388f90b276b23814 /src/vnet/bonding/device.c
parent43ebe29b6ea1107c30311cfb3dbd8190282903d0 (diff)
bond: Add bonding driver and LACP protocol
Add bonding driver to support creation of bond interface which composes of multiple slave interfaces. The slave interfaces could be physical interfaces, or just any virtual interfaces. For example, memif interfaces. The syntax to create a bond interface is create bond mode <lacp | xor | acitve-backup | broadcast | round-robin> To enslave an interface to the bond interface, enslave interface TenGigabitEthernet6/0/0 to BondEthernet0 Please see src/plugins/lacp/lacp_doc.md for more examples and additional options. LACP is a control plane protocol which manages and monitors the status of the slave interfaces. The protocol is part of 802.3ad standard. This patch implements LACPv1. LACPv2 is not supported. To enable LACP on the bond interface, specify "mode lacp" when the bond interface is created. The syntax to enslave a slave interface is the same as other bonding modes. Change-Id: I06581d3b87635972f9f0e1ec50b67560fc13e26c Signed-off-by: Steven <sluong@cisco.com>
Diffstat (limited to 'src/vnet/bonding/device.c')
-rw-r--r--src/vnet/bonding/device.c610
1 files changed, 610 insertions, 0 deletions
diff --git a/src/vnet/bonding/device.c b/src/vnet/bonding/device.c
new file mode 100644
index 00000000000..8f9b3a95591
--- /dev/null
+++ b/src/vnet/bonding/device.c
@@ -0,0 +1,610 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#define _GNU_SOURCE
+#include <stdint.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/ip/ip6_hop_by_hop_packet.h>
+#include <vnet/bonding/node.h>
+
+#define foreach_bond_tx_error \
+ _(NONE, "no error") \
+ _(IF_DOWN, "interface down") \
+ _(NO_SLAVE, "no slave")
+
+typedef enum
+{
+#define _(f,s) BOND_TX_ERROR_##f,
+ foreach_bond_tx_error
+#undef _
+ BOND_TX_N_ERROR,
+} bond_tx_error_t;
+
+static char *bond_tx_error_strings[] = {
+#define _(n,s) s,
+ foreach_bond_tx_error
+#undef _
+};
+
+static u8 *
+format_bond_tx_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
+ vnet_hw_interface_t *hw, *hw1;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ hw = vnet_get_sup_hw_interface (vnm, t->sw_if_index);
+ hw1 = vnet_get_sup_hw_interface (vnm, t->bond_sw_if_index);
+ s = format (s, "src %U, dst %U, %s -> %s",
+ format_ethernet_address, t->ethernet.src_address,
+ format_ethernet_address, t->ethernet.dst_address,
+ hw->name, hw1->name);
+
+ return s;
+}
+
+u8 *
+format_bond_interface_name (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ bond_main_t *bm = &bond_main;
+ bond_if_t *bif = pool_elt_at_index (bm->interfaces, dev_instance);
+
+ s = format (s, "BondEthernet%lu", bif->dev_instance);
+
+ return s;
+}
+
+static __clib_unused clib_error_t *
+bond_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+static clib_error_t *
+bond_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ bond_main_t *bm = &bond_main;
+ bond_if_t *bif = pool_elt_at_index (bm->interfaces, hif->dev_instance);
+
+ bif->admin_up = is_up;
+ if (is_up && vec_len (bif->active_slaves))
+ vnet_hw_interface_set_flags (vnm, bif->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ return 0;
+}
+
+static inline u32
+bond_load_balance_broadcast (vlib_main_t * vm, vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_buffer_t *c0;
+ int i;
+ u32 *to_next = 0;
+ u32 sw_if_index;
+ vlib_frame_t *f;
+
+
+ for (i = 1; i < vec_len (bif->active_slaves); i++)
+ {
+ sw_if_index = *vec_elt_at_index (bif->active_slaves, i);
+ f = vnet_get_frame_to_sw_interface (vnm, sw_if_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next += f->n_vectors;
+ c0 = vlib_buffer_copy (vm, b0);
+ if (PREDICT_TRUE (c0 != 0))
+ {
+ vnet_buffer (c0)->sw_if_index[VLIB_TX] = sw_if_index;
+ to_next[0] = vlib_get_buffer_index (vm, c0);
+ f->n_vectors++;
+ vnet_put_frame_to_sw_interface (vnm, sw_if_index, f);
+ }
+ }
+
+ return 0;
+}
+
+static inline u32
+bond_load_balance_l2 (vlib_main_t * vm, vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ u32 a = 0, b = 0, c = 0, t1, t2;
+ u16 t11, t22;
+
+ memcpy (&t1, eth->src_address, sizeof (t1));
+ memcpy (&t11, &eth->src_address[4], sizeof (t11));
+ a = t1 ^ t11;
+
+ memcpy (&t2, eth->dst_address, sizeof (t2));
+ memcpy (&t22, &eth->dst_address[4], sizeof (t22));
+ b = t2 ^ t22;
+
+ hash_v3_mix32 (a, b, c);
+ hash_v3_finalize32 (a, b, c);
+
+ return c % vec_len (bif->active_slaves);
+}
+
+static inline u16 *
+bond_locate_ethertype (ethernet_header_t * eth)
+{
+ u16 *ethertype_p;
+ ethernet_vlan_header_t *vlan;
+
+ if (!ethernet_frame_is_tagged (clib_net_to_host_u16 (eth->type)))
+ {
+ ethertype_p = &eth->type;
+ }
+ else
+ {
+ vlan = (void *) (eth + 1);
+ ethertype_p = &vlan->type;
+ if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
+ {
+ vlan++;
+ ethertype_p = &vlan->type;
+ }
+ }
+ return ethertype_p;
+}
+
+static inline u32
+bond_load_balance_l23 (vlib_main_t * vm, vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ u8 ip_version;
+ ip4_header_t *ip4;
+ u16 ethertype, *ethertype_p;
+
+ ethertype_p = bond_locate_ethertype (eth);
+ ethertype = *ethertype_p;
+
+ if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
+ (ethertype != htons (ETHERNET_TYPE_IP6)))
+ return (bond_load_balance_l2 (vm, node, bif, b0));
+
+ ip4 = (ip4_header_t *) (ethertype_p + 1);
+ ip_version = (ip4->ip_version_and_header_length >> 4);
+
+ if (ip_version == 0x4)
+ {
+ u16 t11, t22;
+ u32 a = 0, b = 0, c = 0, t1, t2;
+
+ memcpy (&t1, eth->src_address, sizeof (t1));
+ memcpy (&t11, &eth->src_address[4], sizeof (t11));
+ a = t1 ^ t11;
+
+ memcpy (&t2, eth->dst_address, sizeof (t2));
+ memcpy (&t22, &eth->dst_address[4], sizeof (t22));
+ b = t2 ^ t22;
+
+ c = ip4->src_address.data_u32 ^ ip4->dst_address.data_u32;
+
+ hash_v3_mix32 (a, b, c);
+ hash_v3_finalize32 (a, b, c);
+
+ return c % vec_len (bif->active_slaves);
+ }
+ else if (ip_version == 0x6)
+ {
+ u64 a, b, c;
+ u64 t1 = 0, t2 = 0;
+ ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);
+
+ memcpy (&t1, eth->src_address, sizeof (eth->src_address));
+ memcpy (&t2, eth->dst_address, sizeof (eth->dst_address));
+ a = t1 ^ t2;
+
+ b = (ip6->src_address.as_u64[0] ^ ip6->src_address.as_u64[1]);
+ c = (ip6->dst_address.as_u64[0] ^ ip6->dst_address.as_u64[1]);
+
+ hash_mix64 (a, b, c);
+ return c % vec_len (bif->active_slaves);
+ }
+ return (bond_load_balance_l2 (vm, node, bif, b0));
+}
+
+static inline u32
+bond_load_balance_l34 (vlib_main_t * vm, vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ u8 ip_version;
+ uword is_tcp_udp = 0;
+ ip4_header_t *ip4;
+ u16 ethertype, *ethertype_p;
+
+ ethertype_p = bond_locate_ethertype (eth);
+ ethertype = *ethertype_p;
+
+ if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
+ (ethertype != htons (ETHERNET_TYPE_IP6)))
+ return (bond_load_balance_l2 (vm, node, bif, b0));
+
+ ip4 = (ip4_header_t *) (ethertype_p + 1);
+ ip_version = (ip4->ip_version_and_header_length >> 4);
+
+ if (ip_version == 0x4)
+ {
+ u32 a = 0, b = 0, c = 0, t1, t2;
+ tcp_header_t *tcp = (void *) (ip4 + 1);
+ is_tcp_udp = (ip4->protocol == IP_PROTOCOL_TCP) ||
+ (ip4->protocol == IP_PROTOCOL_UDP);
+
+ a = ip4->src_address.data_u32 ^ ip4->dst_address.data_u32;
+
+ t1 = is_tcp_udp ? tcp->src : 0;
+ t2 = is_tcp_udp ? tcp->dst : 0;
+ b = t1 + (t2 << 16);
+
+ hash_v3_mix32 (a, b, c);
+ hash_v3_finalize32 (a, b, c);
+
+ return c % vec_len (bif->active_slaves);
+ }
+ else if (ip_version == 0x6)
+ {
+ u64 a, b, c;
+ u64 t1, t2;
+ ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);
+ tcp_header_t *tcp = (void *) (ip6 + 1);
+
+ if (PREDICT_TRUE ((ip6->protocol == IP_PROTOCOL_TCP) ||
+ (ip6->protocol == IP_PROTOCOL_UDP)))
+ {
+ is_tcp_udp = 1;
+ tcp = (void *) (ip6 + 1);
+ }
+ else if (ip6->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
+ {
+ ip6_hop_by_hop_header_t *hbh =
+ (ip6_hop_by_hop_header_t *) (ip6 + 1);
+ if ((hbh->protocol == IP_PROTOCOL_TCP)
+ || (hbh->protocol == IP_PROTOCOL_UDP))
+ {
+ is_tcp_udp = 1;
+ tcp = (tcp_header_t *) ((u8 *) hbh + ((hbh->length + 1) << 3));
+ }
+ }
+ a = (ip6->src_address.as_u64[0] ^ ip6->src_address.as_u64[1]);
+ b = (ip6->dst_address.as_u64[0] ^ ip6->dst_address.as_u64[1]);
+
+ t1 = is_tcp_udp ? tcp->src : 0;
+ t2 = is_tcp_udp ? tcp->dst : 0;
+ c = (t2 << 16) | t1;
+ hash_mix64 (a, b, c);
+
+ return c % vec_len (bif->active_slaves);
+ }
+
+ return (bond_load_balance_l2 (vm, node, bif, b0));
+}
+
+static inline u32
+bond_load_balance_round_robin (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ bif->lb_rr_last_index++;
+ bif->lb_rr_last_index %= vec_len (bif->active_slaves);
+
+ return bif->lb_rr_last_index;
+}
+
+static inline u32
+bond_load_balance_active_backup (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ bond_if_t * bif, vlib_buffer_t * b0)
+{
+ /* First interface is the active, the rest is backup */
+ return 0;
+}
+
+static bond_load_balance_func_t bond_load_balance_table[] = {
+#define _(v,f,s, p) { bond_load_balance_##p },
+ foreach_bond_lb_algo
+#undef _
+};
+
+static uword
+bond_tx_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
+ bond_main_t *bm = &bond_main;
+ bond_if_t *bif = pool_elt_at_index (bm->interfaces, rund->dev_instance);
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left_from;
+ ethernet_header_t *eth;
+ u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
+ u32 port, port1, port2, port3;
+ u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3;
+ bond_packet_trace_t *t0;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u16 thread_index = vlib_get_thread_index ();
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 *to_next, *to_next1, *to_next2, *to_next3;
+ u32 sif_if_index, sif_if_index1, sif_if_index2, sif_if_index3;
+ vlib_frame_t *f, *f1, *f2, *f3;
+
+ if (PREDICT_FALSE (bif->admin_up == 0))
+ {
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
+ VNET_INTERFACE_COUNTER_DROP,
+ thread_index, bif->sw_if_index,
+ frame->n_vectors);
+ vlib_error_count (vm, node->node_index, BOND_TX_ERROR_IF_DOWN,
+ frame->n_vectors);
+ return frame->n_vectors;
+ }
+
+ if (PREDICT_FALSE (vec_len (bif->active_slaves) == 0))
+ {
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+ vlib_increment_combined_counter
+ (vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX, thread_index, bif->sw_if_index,
+ frame->n_vectors, b0->current_length);
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
+ VNET_INTERFACE_COUNTER_DROP,
+ thread_index, bif->sw_if_index,
+ frame->n_vectors);
+ vlib_error_count (vm, node->node_index, BOND_TX_ERROR_NO_SLAVE,
+ frame->n_vectors);
+ return frame->n_vectors;
+ }
+
+ /* Number of buffers / pkts */
+ n_left_from = frame->n_vectors;
+
+ while (n_left_from >= 8)
+ {
+ // Prefetch next iteration
+ {
+ vlib_buffer_t *p4, *p5, *p6, *p7;
+
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
+ p6 = vlib_get_buffer (vm, from[6]);
+ p7 = vlib_get_buffer (vm, from[7]);
+
+ vlib_prefetch_buffer_header (p4, STORE);
+ vlib_prefetch_buffer_header (p5, STORE);
+ vlib_prefetch_buffer_header (p6, STORE);
+ vlib_prefetch_buffer_header (p7, STORE);
+
+ CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ bi1 = from[1];
+ bi2 = from[2];
+ bi3 = from[3];
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
+
+ sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+ sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
+ sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];
+
+ port =
+ (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, b0);
+ port1 =
+ (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, b1);
+ port2 =
+ (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, b2);
+ port3 =
+ (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, b3);
+
+ sif_if_index = *vec_elt_at_index (bif->active_slaves, port);
+ sif_if_index1 = *vec_elt_at_index (bif->active_slaves, port1);
+ sif_if_index2 = *vec_elt_at_index (bif->active_slaves, port2);
+ sif_if_index3 = *vec_elt_at_index (bif->active_slaves, port3);
+
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = sif_if_index1;
+ vnet_buffer (b2)->sw_if_index[VLIB_TX] = sif_if_index2;
+ vnet_buffer (b3)->sw_if_index[VLIB_TX] = sif_if_index3;
+
+ f = vnet_get_frame_to_sw_interface (vnm, sif_if_index);
+ f1 = vnet_get_frame_to_sw_interface (vnm, sif_if_index1);
+ f2 = vnet_get_frame_to_sw_interface (vnm, sif_if_index2);
+ f3 = vnet_get_frame_to_sw_interface (vnm, sif_if_index3);
+
+ to_next = vlib_frame_vector_args (f);
+ to_next1 = vlib_frame_vector_args (f1);
+ to_next2 = vlib_frame_vector_args (f2);
+ to_next3 = vlib_frame_vector_args (f3);
+
+ to_next += f->n_vectors;
+ to_next1 += f1->n_vectors;
+ to_next2 += f2->n_vectors;
+ to_next3 += f3->n_vectors;
+
+ to_next[0] = vlib_get_buffer_index (vm, b0);
+ to_next1[0] = vlib_get_buffer_index (vm, b1);
+ to_next2[0] = vlib_get_buffer_index (vm, b2);
+ to_next3[0] = vlib_get_buffer_index (vm, b3);
+
+ f->n_vectors++;
+ f1->n_vectors++;
+ f2->n_vectors++;
+ f3->n_vectors++;
+
+ vnet_put_frame_to_sw_interface (vnm, sif_if_index, f);
+ vnet_put_frame_to_sw_interface (vnm, sif_if_index1, f1);
+ vnet_put_frame_to_sw_interface (vnm, sif_if_index2, f2);
+ vnet_put_frame_to_sw_interface (vnm, sif_if_index3, f3);
+
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
+ vlib_set_trace_count (vm, node, --n_trace);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
+ eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ t0->ethernet = *eth;
+ t0->sw_if_index = sw_if_index;
+ t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_TRUE (n_trace > 0))
+ {
+ vlib_trace_buffer (vm, node, next1, b1, 0 /* follow_chain */ );
+ vlib_set_trace_count (vm, node, --n_trace);
+ t0 = vlib_add_trace (vm, node, b1, sizeof (*t0));
+ eth = (ethernet_header_t *) vlib_buffer_get_current (b1);
+ t0->ethernet = *eth;
+ t0->sw_if_index = sw_if_index1;
+ t0->bond_sw_if_index = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_TRUE (n_trace > 0))
+ {
+ vlib_trace_buffer (vm, node, next2, b2,
+ 0 /* follow_chain */ );
+ vlib_set_trace_count (vm, node, --n_trace);
+ t0 = vlib_add_trace (vm, node, b2, sizeof (*t0));
+ eth = (ethernet_header_t *) vlib_buffer_get_current (b2);
+ t0->ethernet = *eth;
+ t0->sw_if_index = sw_if_index2;
+ t0->bond_sw_if_index =
+ vnet_buffer (b2)->sw_if_index[VLIB_TX];
+
+ if (PREDICT_TRUE (n_trace > 0))
+ {
+ vlib_trace_buffer (vm, node, next3, b3,
+ 0 /* follow_chain */ );
+ vlib_set_trace_count (vm, node, --n_trace);
+ t0 = vlib_add_trace (vm, node, b3, sizeof (*t0));
+ eth =
+ (ethernet_header_t *) vlib_buffer_get_current (b3);
+ t0->ethernet = *eth;
+ t0->sw_if_index = sw_if_index3;
+ t0->bond_sw_if_index =
+ vnet_buffer (b3)->sw_if_index[VLIB_TX];
+ }
+ }
+ }
+ }
+
+ from += 4;
+ n_left_from -= 4;
+ }
+
+ while (n_left_from > 0)
+ {
+ // Prefetch next iteration
+ if (n_left_from > 1)
+ {
+ vlib_buffer_t *p2;
+
+ p2 = vlib_get_buffer (vm, from[1]);
+ vlib_prefetch_buffer_header (p2, STORE);
+ CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+ port =
+ (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif, b0);
+ sif_if_index = *vec_elt_at_index (bif->active_slaves, port);
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index;
+ f = vnet_get_frame_to_sw_interface (vnm, sif_if_index);
+ to_next = vlib_frame_vector_args (f);
+ to_next += f->n_vectors;
+
+ to_next[0] = vlib_get_buffer_index (vm, b0);
+ f->n_vectors++;
+ vnet_put_frame_to_sw_interface (vnm, sif_if_index, f);
+
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
+ vlib_set_trace_count (vm, node, --n_trace);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
+ eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
+ t0->ethernet = *eth;
+ t0->sw_if_index = sw_if_index;
+ t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+ }
+
+ from += 1;
+ n_left_from -= 1;
+ }
+
+ vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters
+ + VNET_INTERFACE_COUNTER_TX, thread_index,
+ bif->sw_if_index, frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (bond_dev_class) = {
+ .name = "bond",
+ .tx_function = bond_tx_fn,
+ .tx_function_n_errors = BOND_TX_N_ERROR,
+ .tx_function_error_strings = bond_tx_error_strings,
+ .format_device_name = format_bond_interface_name,
+ .admin_up_down_function = bond_interface_admin_up_down,
+ .subif_add_del_function = bond_subif_add_del_function,
+ .format_tx_trace = format_bond_tx_trace,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (bond_dev_class, bond_tx_fn)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */