summaryrefslogtreecommitdiffstats
path: root/src/vnet/bonding/device.c
blob: 53123dd7c4f1041fd04e5fb7f146868d4d1aefa4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
# SRv6: Segment Routing for IPv6    {#srv6_doc}

This is a memo intended to contain documentation of the VPP SRv6 implementation.
Everything that is not directly obvious should come here.
For any feedback on content that should be explained please mailto:pcamaril@cisco.com

## Segment Routing

Segment routing is a network technology focused on addressing the limitations of existing IP and Multiprotocol Label Switching (MPLS) networks in terms of simplicity, scale, and ease of operation. It is a foundation for application engineered routing as it prepares the networks for new business models where applications can control the network behavior.

Segment routing seeks the right balance between distributed intelligence and centralized optimization and programming. It was built for the software-defined networking (SDN) era.

Segment routing enhances packet forwarding behavior by enabling a network to transport unicast packets through a specific forwarding path, different from the normal path that a packet usually takes (IGP shortest path or BGP best path). This capability benefits many use cases, and one can build those specific paths based on application requirements.

Segment routing uses the source routing paradigm. A node, usually a router but also a switch, a trusted server, or a virtual forwarder running on a hypervisor, steers a packet through an ordered list of instructions, called segments. A segment can represent any instruction, topological or service-based. A segment can have a local semantic to a segment-routing node or global within a segment-routing network. Segment routing allows an operator to enforce a flow through any topological path and service chain while maintaining per-flow state only at the ingress node to the segment-routing network. Segment routing also supports equal-cost multipath (ECMP) by design.

Segment routing can operate with either an MPLS or an IPv6 data plane. All the currently available MPLS services, such as Layer 3 VPN (L3VPN), L2VPN (Virtual Private Wire Service [VPWS], Virtual Private LAN Services [VPLS], Ethernet VPN [E-VPN], and Provider Backbone Bridging Ethernet VPN [PBB-EVPN]), can run on top of a segment-routing transport network.

**The implementation of Segment Routing in VPP covers both the IPv6 data plane (SRv6) as well as the MPLS data plane (SR-MPLS). This page contains the SRv6 documentation.**

## Segment Routing terminology

* Segment Routing Header (SRH): IPv6 routing extension header of type 'Segment Routing'. (draft-ietf-6man-segment-routing-header-05)
* SegmentID (SID): is an IPv6 address.
* Segment List (SL) (SID List): is the sequence of SIDs that the packet will traverse.
* SR Policy: defines the SRH that will be applied to a packet. A packet steered into an SR policy may either receive the SRH by IPv6 header encapsulation (as recommended in draft-ietf-6man-rfc2460bis) or it could be inserted within an existing IPv6 header. An SR policy is uniquely identified by its Binding SID and associated with a weighted set of Segment Lists. In case several SID lists are defined, traffic steered into the policy is unevenly load-balanced among them according to their respective weights.
* Local SID: is a SID associated with a processing function on the local node, which may go from advancing to the next SID in the SRH, to complex user-defined behaviors. When a FIB lookup, either in the main FIB or in a specific VRF, returns a match on a local SID, the associated function is performed.
* BindingSID: a BindingSID is a SID (only one) associated one-one with an SR Policy. If a packet arrives with an IPv6 DA corresponding to a BindingSID, then the SR policy will be applied to such packet.

## SRv6 Features in VPP

The <a href="https://datatracker.ietf.org/doc/draft-filsfils-spring-srv6-network-programming/">SRv6 Network Programming (*draft-filsfils-spring-srv6-network-programming*)</a> defines the SRv6 architecture.

VPP supports the following SRv6 LocalSID functions: End, End.X, End.DX6, End.DT6, End.DX4, End.DT4, End.DX2, End.B6, End.B6.Encaps.

For further information and how to configure each specific function: @subpage srv6_localsid_doc


The <a href="https://datatracker.ietf.org/doc/draft-filsfils-spring-segment-routing-policy/">Segment Routing Policy (*draft-filsfils-spring-segment-routing-policy*)</a> defines SR Policies.

VPP supports SRv6 Policies with T.Insert and T.Encaps behaviors.

For further information on how to create SR Policies: @subpage srv6_policy_doc

For further information on how to steer traffic into SR Policies: @subpage srv6_steering_doc

## SRv6 LocalSID development framework

One of the *'key'* concepts about SRv6 is network programmability. This is why an SRv6 LocalSID is associated with an specific function. 

However, the trully way to enable network programmability is allowing any developer **easily** create his own SRv6 LocalSID function. That is the reason why we have added some API calls such that any developer can code his own SRv6 LocalSID behaviors as plugins an add them to the running SRv6 code.

The principle is that the developer only codes the behavior -the graph node-. However all the FIB handling, SR LocalSID instantiation and so on are done by the VPP SRv6 code.

For more information please refer to: @subpage srv6_plugin_doc

Available SRv6 plugins include:

- @subpage srv6_as_plugin_doc
- @subpage srv6_ad_plugin_doc
- @subpage srv6_am_plugin_doc
- @subpage srv6_mobile_plugin_doc
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773
/*
 *------------------------------------------------------------------
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#define _GNU_SOURCE
#include <stdint.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vnet/ip/ip6_hop_by_hop_packet.h>
#include <vnet/bonding/node.h>
#include <vppinfra/lb_hash_hash.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/arp_packet.h>

#define foreach_bond_tx_error     \
  _(NONE, "no error")             \
  _(IF_DOWN, "interface down")    \
  _(NO_SLAVE, "no slave")

typedef enum
{
#define _(f,s) BOND_TX_ERROR_##f,
  foreach_bond_tx_error
#undef _
    BOND_TX_N_ERROR,
} bond_tx_error_t;

static char *bond_tx_error_strings[] = {
#define _(n,s) s,
  foreach_bond_tx_error
#undef _
};

static u8 *
format_bond_tx_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  bond_packet_trace_t *t = va_arg (*args, bond_packet_trace_t *);
  vnet_hw_interface_t *hw, *hw1;
  vnet_main_t *vnm = vnet_get_main ();

  hw = vnet_get_sup_hw_interface (vnm, t->sw_if_index);
  hw1 = vnet_get_sup_hw_interface (vnm, t->bond_sw_if_index);
  s = format (s, "src %U, dst %U, %s -> %s",
	      format_ethernet_address, t->ethernet.src_address,
	      format_ethernet_address, t->ethernet.dst_address,
	      hw->name, hw1->name);

  return s;
}

u8 *
format_bond_interface_name (u8 * s, va_list * args)
{
  u32 dev_instance = va_arg (*args, u32);
  bond_main_t *bm = &bond_main;
  bond_if_t *bif = pool_elt_at_index (bm->interfaces, dev_instance);

  s = format (s, "BondEthernet%lu", bif->dev_instance);

  return s;
}

static __clib_unused clib_error_t *
bond_set_l2_mode_function (vnet_main_t * vnm,
			   struct vnet_hw_interface_t *bif_hw,
			   i32 l2_if_adjust)
{
  bond_if_t *bif;
  u32 *sw_if_index;
  struct vnet_hw_interface_t *sif_hw;

  bif = bond_get_master_by_sw_if_index (bif_hw->sw_if_index);
  if (!bif)
    return 0;

  if ((bif_hw->l2_if_count == 1) && (l2_if_adjust == 1))
    {
      /* Just added first L2 interface on this port */
      vec_foreach (sw_if_index, bif->slaves)
      {
	sif_hw = vnet_get_sup_hw_interface (vnm, *sw_if_index);
	ethernet_set_flags (vnm, sif_hw->hw_if_index,
			    ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);

	/* ensure all packets go to ethernet-input */
	ethernet_set_rx_redirect (vnm, sif_hw, 1);
      }
    }

  return 0;
}

static __clib_unused clib_error_t *
bond_subif_add_del_function (vnet_main_t * vnm, u32 hw_if_index,
			     struct vnet_sw_interface_t *st, int is_add)
{
  /* Nothing for now */
  return 0;
}

static clib_error_t *
bond_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
{
  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
  bond_main_t *bm = &bond_main;
  bond_if_t *bif = pool_elt_at_index (bm->interfaces, hif->dev_instance);

  bif->admin_up = is_up;
  if (is_up && vec_len (bif->active_slaves))
    vnet_hw_interface_set_flags (vnm, bif->hw_if_index,
				 VNET_HW_INTERFACE_FLAG_LINK_UP);
  return 0;
}

static_always_inline u32
bond_load_balance_broadcast (vlib_main_t * vm, vlib_node_runtime_t * node,
			     bond_if_t * bif, vlib_buffer_t * b0,
			     uword slave_count)
{
  vnet_main_t *vnm = vnet_get_main ();
  vlib_buffer_t *c0;
  int port;
  u32 *to_next = 0;
  u32 sw_if_index;
  vlib_frame_t *f;
  u16 thread_index = vm->thread_index;

  for (port = 1; port < slave_count; port++)
    {
      sw_if_index = *vec_elt_at_index (bif->active_slaves, port);
      if (bif->per_thread_info[thread_index].frame[port] == 0)
	bif->per_thread_info[thread_index].frame[port] =
	  vnet_get_frame_to_sw_interface (vnm, sw_if_index);
      f = bif->per_thread_info[thread_index].frame[port];
      to_next = vlib_frame_vector_args (f);
      to_next += f->n_vectors;
      c0 = vlib_buffer_copy (vm, b0);
      if (PREDICT_TRUE (c0 != 0))
	{
	  vnet_buffer (c0)->sw_if_index[VLIB_TX] = sw_if_index;
	  to_next[0] = vlib_get_buffer_index (vm, c0);
	  f->n_vectors++;
	}
    }

  return 0;
}

static_always_inline u32
bond_load_balance_l2 (vlib_main_t * vm, vlib_node_runtime_t * node,
		      bond_if_t * bif, vlib_buffer_t * b0, uword slave_count)
{
  ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
  u32 c;
  u64 *dst = (u64 *) & eth->dst_address[0];
  u64 a = clib_mem_unaligned (dst, u64);
  u32 *src = (u32 *) & eth->src_address[2];
  u32 b = clib_mem_unaligned (src, u32);

  c = lb_hash_hash_2_tuples (a, b);

  if (BOND_MODULO_SHORTCUT (slave_count))
    return (c & (slave_count - 1));
  else
    return c % slave_count;
}

static_always_inline u16 *
bond_locate_ethertype (ethernet_header_t * eth)
{
  u16 *ethertype_p;
  ethernet_vlan_header_t *vlan;

  if (!ethernet_frame_is_tagged (clib_net_to_host_u16 (eth->type)))
    {
      ethertype_p = &eth->type;
    }
  else
    {
      vlan = (void *) (eth + 1);
      ethertype_p = &vlan->type;
      if (*ethertype_p == ntohs (ETHERNET_TYPE_VLAN))
	{
	  vlan++;
	  ethertype_p = &vlan->type;
	}
    }
  return ethertype_p;
}

static_always_inline u32
bond_load_balance_l23 (vlib_main_t * vm, vlib_node_runtime_t * node,
		       bond_if_t * bif, vlib_buffer_t * b0, uword slave_count)
{
  ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
  u8 ip_version;
  ip4_header_t *ip4;
  u16 ethertype, *ethertype_p;
  u32 *mac1, *mac2, *mac3;

  ethertype_p = bond_locate_ethertype (eth);
  ethertype = clib_mem_unaligned (ethertype_p, u16);

  if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
      (ethertype != htons (ETHERNET_TYPE_IP6)))
    return (bond_load_balance_l2 (vm, node, bif, b0, slave_count));

  ip4 = (ip4_header_t *) (ethertype_p + 1);
  ip_version = (ip4->ip_version_and_header_length >> 4);

  if (ip_version == 0x4)
    {
      u32 a, c;

      mac1 = (u32 *) & eth->dst_address[0];
      mac2 = (u32 *) & eth->dst_address[4];
      mac3 = (u32 *) & eth->src_address[2];

      a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^
	clib_mem_unaligned (mac3, u32);
      c =
	lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64),
			       a);
      if (BOND_MODULO_SHORTCUT (slave_count))
	return (c & (slave_count - 1));
      else
	return c % slave_count;
    }
  else if (ip_version == 0x6)
    {
      u64 a;
      u32 c;
      ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);

      mac1 = (u32 *) & eth->dst_address[0];
      mac2 = (u32 *) & eth->dst_address[4];
      mac3 = (u32 *) & eth->src_address[2];

      a = clib_mem_unaligned (mac1, u32) ^ clib_mem_unaligned (mac2, u32) ^
	clib_mem_unaligned (mac3, u32);
      c =
	lb_hash_hash (clib_mem_unaligned
		      (&ip6->src_address.as_uword[0], uword),
		      clib_mem_unaligned (&ip6->src_address.as_uword[1],
					  uword),
		      clib_mem_unaligned (&ip6->dst_address.as_uword[0],
					  uword),
		      clib_mem_unaligned (&ip6->dst_address.as_uword[1],
					  uword), a);
      if (BOND_MODULO_SHORTCUT (slave_count))
	return (c & (slave_count - 1));
      else
	return c % slave_count;
    }
  return (bond_load_balance_l2 (vm, node, bif, b0, slave_count));
}

static_always_inline u32
bond_load_balance_l34 (vlib_main_t * vm, vlib_node_runtime_t * node,
		       bond_if_t * bif, vlib_buffer_t * b0, uword slave_count)
{
  ethernet_header_t *eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
  u8 ip_version;
  uword is_tcp_udp;
  ip4_header_t *ip4;
  u16 ethertype, *ethertype_p;

  ethertype_p = bond_locate_ethertype (eth);
  ethertype = clib_mem_unaligned (ethertype_p, u16);

  if ((ethertype != htons (ETHERNET_TYPE_IP4)) &&
      (ethertype != htons (ETHERNET_TYPE_IP6)))
    return (bond_load_balance_l2 (vm, node, bif, b0, slave_count));

  ip4 = (ip4_header_t *) (ethertype_p + 1);
  ip_version = (ip4->ip_version_and_header_length >> 4);

  if (ip_version == 0x4)
    {
      u32 a, c, t1, t2;
      tcp_header_t *tcp = (void *) (ip4 + 1);

      is_tcp_udp = (ip4->protocol == IP_PROTOCOL_TCP) ||
	(ip4->protocol == IP_PROTOCOL_UDP);
      t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0;
      t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0;
      a = t1 ^ t2;
      c =
	lb_hash_hash_2_tuples (clib_mem_unaligned (&ip4->address_pair, u64),
			       a);
      if (BOND_MODULO_SHORTCUT (slave_count))
	return (c & (slave_count - 1));
      else
	return c % slave_count;
    }
  else if (ip_version == 0x6)
    {
      u64 a;
      u32 c, t1, t2;
      ip6_header_t *ip6 = (ip6_header_t *) (eth + 1);
      tcp_header_t *tcp = (void *) (ip6 + 1);

      is_tcp_udp = 0;
      if (PREDICT_TRUE ((ip6->protocol == IP_PROTOCOL_TCP) ||
			(ip6->protocol == IP_PROTOCOL_UDP)))
	{
	  is_tcp_udp = 1;
	  tcp = (void *) (ip6 + 1);
	}
      else if (ip6->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS)
	{
	  ip6_hop_by_hop_header_t *hbh =
	    (ip6_hop_by_hop_header_t *) (ip6 + 1);
	  if ((hbh->protocol == IP_PROTOCOL_TCP)
	      || (hbh->protocol == IP_PROTOCOL_UDP))
	    {
	      is_tcp_udp = 1;
	      tcp = (tcp_header_t *) ((u8 *) hbh + ((hbh->length + 1) << 3));
	    }
	}
      t1 = is_tcp_udp ? clib_mem_unaligned (&tcp->src, u16) : 0;
      t2 = is_tcp_udp ? clib_mem_unaligned (&tcp->dst, u16) : 0;
      a = t1 ^ t2;
      c =
	lb_hash_hash (clib_mem_unaligned
		      (&ip6->src_address.as_uword[0], uword),
		      clib_mem_unaligned (&ip6->src_address.as_uword[1],
					  uword),
		      clib_mem_unaligned (&ip6->dst_address.as_uword[0],
					  uword),
		      clib_mem_unaligned (&ip6->dst_address.as_uword[1],
					  uword), a);
      if (BOND_MODULO_SHORTCUT (slave_count))
	return (c & (slave_count - 1));
      else
	return c % slave_count;
    }

  return (bond_load_balance_l2 (vm, node, bif, b0, slave_count));
}

static_always_inline u32
bond_load_balance_round_robin (vlib_main_t * vm,
			       vlib_node_runtime_t * node,
			       bond_if_t * bif, vlib_buffer_t * b0,
			       uword slave_count)
{
  bif->lb_rr_last_index++;
  if (BOND_MODULO_SHORTCUT (slave_count))
    bif->lb_rr_last_index &= slave_count - 1;
  else
    bif->lb_rr_last_index %= slave_count;

  return bif->lb_rr_last_index;
}

static_always_inline u32
bond_load_balance_active_backup (vlib_main_t * vm,
				 vlib_node_runtime_t * node,
				 bond_if_t * bif, vlib_buffer_t * b0,
				 uword slave_count)
{
  /* First interface is the active, the rest is backup */
  return 0;
}

static bond_load_balance_func_t bond_load_balance_table[] = {
#define _(v,f,s, p) { bond_load_balance_##p },
  foreach_bond_lb_algo
#undef _
};

static uword
bond_tx_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
	    vlib_frame_t * frame)
{
  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
  bond_main_t *bm = &bond_main;
  bond_if_t *bif = pool_elt_at_index (bm->interfaces, rund->dev_instance);
  u32 bi0, bi1, bi2, bi3;
  vlib_buffer_t *b0, *b1, *b2, *b3;
  u32 *from = vlib_frame_vector_args (frame);
  u32 n_left_from;
  ethernet_header_t *eth;
  u32 port;
  u32 sw_if_index, sw_if_index1, sw_if_index2, sw_if_index3;
  bond_packet_trace_t *t0;
  uword n_trace = vlib_get_trace_count (vm, node);
  u16 thread_index = vm->thread_index;
  vnet_main_t *vnm = vnet_get_main ();
  u32 *to_next;
  u32 sif_if_index, sif_if_index1, sif_if_index2, sif_if_index3;
  vlib_frame_t *f;
  uword slave_count;

  if (PREDICT_FALSE (bif->admin_up == 0))
    {
      vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
      vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
				     VNET_INTERFACE_COUNTER_DROP,
				     thread_index, bif->sw_if_index,
				     frame->n_vectors);
      vlib_error_count (vm, node->node_index, BOND_TX_ERROR_IF_DOWN,
			frame->n_vectors);
      return frame->n_vectors;
    }

  clib_spinlock_lock_if_init (&bif->lockp);
  slave_count = vec_len (bif->active_slaves);
  if (PREDICT_FALSE (slave_count == 0))
    {
      bi0 = from[0];
      b0 = vlib_get_buffer (vm, bi0);
      vlib_increment_combined_counter
	(vnet_main.interface_main.combined_sw_if_counters
	 + VNET_INTERFACE_COUNTER_TX, thread_index, bif->sw_if_index,
	 frame->n_vectors, b0->current_length);

      vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
      vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters +
				     VNET_INTERFACE_COUNTER_DROP,
				     thread_index, bif->sw_if_index,
				     frame->n_vectors);
      vlib_error_count (vm, node->node_index, BOND_TX_ERROR_NO_SLAVE,
			frame->n_vectors);
      clib_spinlock_unlock_if_init (&bif->lockp);
      return frame->n_vectors;
    }

  vec_validate_aligned (bif->per_thread_info[thread_index].frame, slave_count,
			CLIB_CACHE_LINE_BYTES);

  /* Number of buffers / pkts */
  n_left_from = frame->n_vectors;

  while (n_left_from > 0)
    {
      while (n_left_from >= 4)
	{
	  u32 next0 = 0, next1 = 0, next2 = 0, next3 = 0;
	  u32 port0 = 0, port1 = 0, port2 = 0, port3 = 0;

	  // Prefetch next iteration
	  if (n_left_from >= 8)
	    {
	      vlib_buffer_t *p4, *p5, *p6, *p7;

	      p4 = vlib_get_buffer (vm, from[4]);
	      p5 = vlib_get_buffer (vm, from[5]);
	      p6 = vlib_get_buffer (vm, from[6]);
	      p7 = vlib_get_buffer (vm, from[7]);

	      vlib_prefetch_buffer_header (p4, LOAD);
	      vlib_prefetch_buffer_header (p5, LOAD);
	      vlib_prefetch_buffer_header (p6, LOAD);
	      vlib_prefetch_buffer_header (p7, LOAD);

	      CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, LOAD);
	      CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, LOAD);
	      CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, LOAD);
	      CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, LOAD);
	    }

	  bi0 = from[0];
	  bi1 = from[1];
	  bi2 = from[2];
	  bi3 = from[3];

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  b2 = vlib_get_buffer (vm, bi2);
	  b3 = vlib_get_buffer (vm, bi3);

	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);

	  sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
	  sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_TX];
	  sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_TX];

	  if (PREDICT_TRUE (slave_count != 1))
	    {
	      port0 =
		(bond_load_balance_table[bif->lb]).load_balance (vm, node,
								 bif, b0,
								 slave_count);
	      port1 =
		(bond_load_balance_table[bif->lb]).load_balance (vm, node,
								 bif, b1,
								 slave_count);
	      port2 =
		(bond_load_balance_table[bif->lb]).load_balance (vm, node,
								 bif, b2,
								 slave_count);
	      port3 =
		(bond_load_balance_table[bif->lb]).load_balance (vm, node,
								 bif, b3,
								 slave_count);
	    }

	  sif_if_index = *vec_elt_at_index (bif->active_slaves, port0);
	  sif_if_index1 = *vec_elt_at_index (bif->active_slaves, port1);
	  sif_if_index2 = *vec_elt_at_index (bif->active_slaves, port2);
	  sif_if_index3 = *vec_elt_at_index (bif->active_slaves, port3);

	  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index;
	  vnet_buffer (b1)->sw_if_index[VLIB_TX] = sif_if_index1;
	  vnet_buffer (b2)->sw_if_index[VLIB_TX] = sif_if_index2;
	  vnet_buffer (b3)->sw_if_index[VLIB_TX] = sif_if_index3;

	  if (PREDICT_FALSE ((bif->per_thread_info[thread_index].frame[port0]
			      == 0)))
	    bif->per_thread_info[thread_index].frame[port0] =
	      vnet_get_frame_to_sw_interface (vnm, sif_if_index);

	  if (PREDICT_FALSE ((bif->per_thread_info[thread_index].frame[port1]
			      == 0)))
	    bif->per_thread_info[thread_index].frame[port1] =
	      vnet_get_frame_to_sw_interface (vnm, sif_if_index1);

	  if (PREDICT_FALSE ((bif->per_thread_info[thread_index].frame[port2]
			      == 0)))
	    bif->per_thread_info[thread_index].frame[port2] =
	      vnet_get_frame_to_sw_interface (vnm, sif_if_index2);

	  if (PREDICT_FALSE ((bif->per_thread_info[thread_index].frame[port3]
			      == 0)))
	    bif->per_thread_info[thread_index].frame[port3] =
	      vnet_get_frame_to_sw_interface (vnm, sif_if_index3);

	  f = bif->per_thread_info[thread_index].frame[port0];
	  to_next = vlib_frame_vector_args (f);
	  to_next += f->n_vectors;
	  to_next[0] = vlib_get_buffer_index (vm, b0);
	  f->n_vectors++;

	  f = bif->per_thread_info[thread_index].frame[port1];
	  to_next = vlib_frame_vector_args (f);
	  to_next += f->n_vectors;
	  to_next[0] = vlib_get_buffer_index (vm, b1);
	  f->n_vectors++;

	  f = bif->per_thread_info[thread_index].frame[port2];
	  to_next = vlib_frame_vector_args (f);
	  to_next += f->n_vectors;
	  to_next[0] = vlib_get_buffer_index (vm, b2);
	  f->n_vectors++;

	  f = bif->per_thread_info[thread_index].frame[port3];
	  to_next = vlib_frame_vector_args (f);
	  to_next += f->n_vectors;
	  to_next[0] = vlib_get_buffer_index (vm, b3);
	  f->n_vectors++;

	  if (PREDICT_FALSE (n_trace > 0))
	    {
	      vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
	      vlib_set_trace_count (vm, node, --n_trace);
	      t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
	      eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
	      t0->ethernet = *eth;
	      t0->sw_if_index = sw_if_index;
	      t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];

	      if (PREDICT_TRUE (n_trace > 0))
		{
		  vlib_trace_buffer (vm, node, next1, b1,
				     0 /* follow_chain */ );
		  vlib_set_trace_count (vm, node, --n_trace);
		  t0 = vlib_add_trace (vm, node, b1, sizeof (*t0));
		  eth = (ethernet_header_t *) vlib_buffer_get_current (b1);
		  t0->ethernet = *eth;
		  t0->sw_if_index = sw_if_index1;
		  t0->bond_sw_if_index =
		    vnet_buffer (b1)->sw_if_index[VLIB_TX];

		  if (PREDICT_TRUE (n_trace > 0))
		    {
		      vlib_trace_buffer (vm, node, next2, b2,
					 0 /* follow_chain */ );
		      vlib_set_trace_count (vm, node, --n_trace);
		      t0 = vlib_add_trace (vm, node, b2, sizeof (*t0));
		      eth =
			(ethernet_header_t *) vlib_buffer_get_current (b2);
		      t0->ethernet = *eth;
		      t0->sw_if_index = sw_if_index2;
		      t0->bond_sw_if_index =
			vnet_buffer (b2)->sw_if_index[VLIB_TX];

		      if (PREDICT_TRUE (n_trace > 0))
			{
			  vlib_trace_buffer (vm, node, next3, b3,
					     0 /* follow_chain */ );
			  vlib_set_trace_count (vm, node, --n_trace);
			  t0 = vlib_add_trace (vm, node, b3, sizeof (*t0));
			  eth =
			    (ethernet_header_t *)
			    vlib_buffer_get_current (b3);
			  t0->ethernet = *eth;
			  t0->sw_if_index = sw_if_index3;
			  t0->bond_sw_if_index =
			    vnet_buffer (b3)->sw_if_index[VLIB_TX];
			}
		    }
		}
	    }
	  from += 4;
	  n_left_from -= 4;
	}

      while (n_left_from > 0)
	{
	  u32 next0 = 0;
	  u32 port0 = 0;

	  // Prefetch next iteration
	  if (n_left_from > 1)
	    {
	      vlib_buffer_t *p2;

	      p2 = vlib_get_buffer (vm, from[1]);
	      vlib_prefetch_buffer_header (p2, LOAD);
	      CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
	    }

	  bi0 = from[0];
	  b0 = vlib_get_buffer (vm, bi0);

	  VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);

	  sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];

	  if (PREDICT_TRUE (slave_count != 1))
	    port0 =
	      (bond_load_balance_table[bif->lb]).load_balance (vm, node, bif,
							       b0,
							       slave_count);
	  sif_if_index = *vec_elt_at_index (bif->active_slaves, port0);
	  vnet_buffer (b0)->sw_if_index[VLIB_TX] = sif_if_index;
	  if (PREDICT_FALSE
	      ((bif->per_thread_info[thread_index].frame[port0] == 0)))
	    bif->per_thread_info[thread_index].frame[port0] =
	      vnet_get_frame_to_sw_interface (vnm, sif_if_index);
	  f = bif->per_thread_info[thread_index].frame[port0];
	  to_next = vlib_frame_vector_args (f);
	  to_next += f->n_vectors;
	  to_next[0] = vlib_get_buffer_index (vm, b0);
	  f->n_vectors++;

	  if (PREDICT_FALSE (n_trace > 0))
	    {
	      vlib_trace_buffer (vm, node, next0, b0, 0 /* follow_chain */ );
	      vlib_set_trace_count (vm, node, --n_trace);
	      t0 = vlib_add_trace (vm, node, b0, sizeof (*t0));
	      eth = (ethernet_header_t *) vlib_buffer_get_current (b0);
	      t0->ethernet = *eth;
	      t0->sw_if_index = sw_if_index;
	      t0->bond_sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
	    }

	  from += 1;
	  n_left_from -= 1;
	}
    }

  for (port = 0; port < slave_count; port++)
    {
      f = bif->per_thread_info[thread_index].frame[port];
      if (f == 0)
	continue;

      sw_if_index = *vec_elt_at_index (bif->active_slaves, port);
      vnet_put_frame_to_sw_interface (vnm, sw_if_index, f);
      bif->per_thread_info[thread_index].frame[port] = 0;
    }

  vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters
				 + VNET_INTERFACE_COUNTER_TX, thread_index,
				 bif->sw_if_index, frame->n_vectors);

  clib_spinlock_unlock_if_init (&bif->lockp);
  return frame->n_vectors;
}

static walk_rc_t
bond_active_interface_switch_cb (vnet_main_t * vnm, u32 sw_if_index,
				 void *arg)
{
  bond_main_t *bm = &bond_main;

  send_ip4_garp (bm->vlib_main, sw_if_index);
  send_ip6_na (bm->vlib_main, sw_if_index);

  return (WALK_CONTINUE);
}

static uword
bond_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
{
  vnet_main_t *vnm = vnet_get_main ();
  uword event_type, *event_data = 0;

  while (1)
    {
      u32 i;
      u32 hw_if_index;

      vlib_process_wait_for_event (vm);
      event_type = vlib_process_get_events (vm, &event_data);
      ASSERT (event_type == BOND_SEND_GARP_NA);
      for (i = 0; i < vec_len (event_data); i++)
	{
	  hw_if_index = event_data[i];
	  /* walk hw interface to process all subinterfaces */
	  vnet_hw_interface_walk_sw (vnm, hw_if_index,
				     bond_active_interface_switch_cb, 0);
	}
      vec_reset_length (event_data);
    }
  return 0;
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (bond_process_node) = {
  .function = bond_process,
  .type = VLIB_NODE_TYPE_PROCESS,
  .name = "bond-process",
};
/* *INDENT-ON* */

/* *INDENT-OFF* */
VNET_DEVICE_CLASS (bond_dev_class) = {
  .name = "bond",
  .tx_function = bond_tx_fn,
  .tx_function_n_errors = BOND_TX_N_ERROR,
  .tx_function_error_strings = bond_tx_error_strings,
  .format_device_name = format_bond_interface_name,
  .set_l2_mode_function = bond_set_l2_mode_function,
  .admin_up_down_function = bond_interface_admin_up_down,
  .subif_add_del_function = bond_subif_add_del_function,
  .format_tx_trace = format_bond_tx_trace,
};

VLIB_DEVICE_TX_FUNCTION_MULTIARCH (bond_dev_class, bond_tx_fn)
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */