summaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan/decap.c
blob: 764dfca4820e5aefbcebef8bde16a74abd6c7525 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200

@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
#!/usr/bin/env python3
"""GSO functional tests"""

#
# Add tests for:
# - GSO
# - Verify that sending Jumbo frame without GSO enabled correctly
# - Verify that sending Jumbo frame with GSO enabled correctly
# - Verify that sending Jumbo frame with GSO enabled only on ingress interface
#
import unittest

from scapy.packet import Raw
from scapy.layers.inet6 import IPv6, Ether, IP, UDP, ICMPv6PacketTooBig
from scapy.layers.inet import TCP, ICMP
from scapy.layers.vxlan import VXLAN
from scapy.data import ETH_P_IP, ETH_P_IPV6, ETH_P_ARP

from framework import VppTestCase, VppTestRunner
from vpp_object import VppObject
from vpp_interface import VppInterface
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, FibPathProto
from socket import AF_INET, AF_INET6, inet_pton
from util import reassemble4


""" Test_gso is a subclass of VPPTestCase classes.
    GSO tests.
"""


class TestGSO(VppTestCase):
    """ GSO Test Case """

    def __init__(self, *args):
        VppTestCase.__init__(self, *args)

    @classmethod
    def setUpClass(self):
        super(TestGSO, self).setUpClass()

    @classmethod
    def tearDownClass(self):
        super(TestGSO, self).tearDownClass()

    def setUp(self):
        super(TestGSO, self).setUp()

    def tearDown(self):
        super(TestGSO, self).tearDown()
        if not self.vpp_dead:
            for i in self.pg_interfaces:
                i.unconfig_ip4()
                i.unconfig_ip6()
                i.admin_down()

    def test_gso(self):
        """ GSO test """
        #
        # Send jumbo frame with gso disabled and DF bit is set
        #
        self.create_pg_interfaces(range(2))
        for i in self.pg_interfaces:
            i.admin_up()
            i.config_ip4()
            i.config_ip6()
            i.disable_ipv6_ra()
            i.resolve_arp()
            i.resolve_ndp()

        p4 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
              IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4,
                 flags='DF') /
              TCP(sport=1234, dport=1234) /
              Raw(b'\xa5' * 65200))

        rxs = self.send_and_expect(self.pg0, [p4], self.pg0)

        for rx in rxs:
            self.assertEqual(rx[Ether].src, self.pg0.local_mac)
            self.assertEqual(rx[Ether].dst, self.pg0.remote_mac)
            self.assertEqual(rx[IP].src, self.pg0.local_ip4)
            self.assertEqual(rx[IP].dst, self.pg0.remote_ip4)
            self.assertEqual(rx[ICMP].type, 3)  # "dest-unreach"
            self.assertEqual(rx[ICMP].code, 4)  # "fragmentation-needed"

        #
        # Send jumbo frame with gso enabled and DF bit is set
        # input and output interfaces support GSO
        #
        self.create_pg_interfaces(range(2, 4), 1, 1460)
        for i in self.pg_interfaces:
            i.admin_up()
            i.config_ip4()
            i.config_ip6()
            i.disable_ipv6_ra()
            i.resolve_arp()
            i.resolve_ndp()

        p41 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
               IP(src=self.pg2.remote_ip4, dst=self.pg3.remote_ip4,
                  flags='DF') /
               TCP(sport=1234, dport=1234) /
               Raw(b'\xa5' * 65200))

        rxs = self.send_and_expect(self.pg2, [p41], self.pg3)

        for rx in rxs:
            self.assertEqual(rx[Ether].src, self.pg3.local_mac)
            self.assertEqual(rx[Ether].dst, self.pg3.remote_mac)
            self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
            self.assertEqual(rx[IP].dst, self.pg3.remote_ip4)
            self.assertEqual(rx[IP].len, 65240)  # 65200 + 20 (IP) + 20 (TCP)
            self.assertEqual(rx[TCP].sport, 1234)
            self.assertEqual(rx[TCP].dport, 1234)

        #
        # Send jumbo frame with gso enabled only on input interface
        # and DF bit is set. GSO packet will be chunked into gso_size
        # data payload
        #
        self.create_pg_interfaces(range(4, 5))
        for i in self.pg_interfaces:
            i.admin_up()
            i.config_ip4()
            i.config_ip6()
            i.disable_ipv6_ra()
            i.resolve_arp()
            i.resolve_ndp()

        self.vapi.feature_gso_enable_disable(self.pg4.sw_if_index)
        p42 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
               IP(src=self.pg2.remote_ip4, dst=self.pg4.remote_ip4,
                  flags='DF') /
               TCP(sport=1234, dport=1234) /
               Raw(b'\xa5' * 65200))

        rxs = self.send_and_expect(self.pg2, [p42], self.pg4, 45)
        size = 0
        for rx in rxs:
            self.assertEqual(rx[Ether].src, self.pg4.local_mac)
            self.assertEqual(rx[Ether].dst, self.pg4.remote_mac)
            self.assertEqual(rx[IP].src, self.pg2.remote_ip4)
            self.assertEqual(rx[IP].dst, self.pg4.remote_ip4)
            self.assertEqual(rx[TCP].sport, 1234)
            self.assertEqual(rx[TCP].dport, 1234)

        size = rxs[44][TCP].seq + rxs[44][IP].len - 20 - 20
        self.assertEqual(size, 65200)

        #
        # Send jumbo frame with gso enabled only on input interface
        # and DF bit is unset. GSO packet will be fragmented.
        #
        self.vapi.sw_interface_set_mtu(self.pg1.sw_if_index, [576, 0, 0, 0])

        p43 = (Ether(src=self.pg2.remote_mac, dst=self.pg2.local_mac) /
               IP(src=self.pg2.remote_ip4, dst=self.pg1.remote_ip4) /
               TCP(sport=1234, dport=1234) /
               Raw(b'\xa5' * 65200))

        rxs = self.send_and_expect(self.pg2, [p43], self.pg1, 119
@media only all and (prefers-color-scheme: dark) {
.highlight .hll { background-color: #49483e }
.highlight .c { color: #75715e } /* Comment */
.highlight .err { color: #960050; background-color: #1e0010 } /* Error */
.highlight .k { color: #66d9ef } /* Keyword */
.highlight .l { color: #ae81ff } /* Literal */
.highlight .n { color: #f8f8f2 } /* Name */
.highlight .o { color: #f92672 } /* Operator */
.highlight .p { color: #f8f8f2 } /* Punctuation */
.highlight .ch { color: #75715e } /* Comment.Hashbang */
.highlight .cm { color: #75715e } /* Comment.Multiline */
.highlight .cp { color: #75715e } /* Comment.Preproc */
.highlight .cpf { color: #75715e } /* Comment.PreprocFile */
.highlight .c1 { color: #75715e } /* Comment.Single */
.highlight .cs { color: #75715e } /* Comment.Special */
.highlight .gd { color: #f92672 } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gi { color: #a6e22e } /* Generic.Inserted */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #75715e } /* Generic.Subheading */
.highlight .kc { color: #66d9ef } /* Keyword.Constant */
.highlight .kd { color: #66d9ef } /* Keyword.Declaration */
.highlight .kn { color: #f92672 } /* Keyword.Namespace */
.highlight .kp { color: #66d9ef } /* Keyword.Pseudo */
.highlight .kr { color: #66d9ef } /* Keyword.Reserved */
.highlight .kt { color: #66d9ef } /* Keyword.Type */
.highlight .ld { color: #e6db74 } /* Literal.Date */
.highlight .m { color: #ae81ff } /* Literal.Number */
.highlight .s { color: #e6db74 } /* Literal.String */
.highlight .na { color: #a6e22e } /* Name.Attribute */
.highlight .nb { color: #f8f8f2 } /* Name.Builtin */
.highlight .nc { color: #a6e22e } /* Name.Class */
.highlight .no { color: #66d9ef } /* Name.Constant */
.highlight .nd { color: #a6e22e } /* Name.Decorator */
.highlight .ni { color: #f8f8f2 } /* Name.Entity */
.highlight .ne { color: #a6e22e } /* Name.Exception */
.highlight .nf { color: #a6e22e } /* Name.Function */
.highlight .nl { color: #f8f8f2 } /* Name.Label */
.highlight .nn { color: #f8f8f2 } /* Name.Namespace */
.highlight .nx { color: #a6e22e } /* Name.Other */
.highlight .py { color: #f8f8f2 } /* Name.Property */
.highlight .nt { color: #f92672 } /* Name.Tag */
.highlight .nv { color: #f8f8f2 } /* Name.Variable */
.highlight .ow { color: #f92672 } /* Operator.Word */
.highlight .w { color: #f8f8f2 } /* Text.Whitespace */
.highlight .mb { color: #ae81ff } /* Literal.Number.Bin */
.highlight .mf { color: #ae81ff } /* Literal.Number.Float */
.highlight .mh { color: #ae81ff } /* Literal.Number.Hex */
.highlight .mi { color: #ae81ff } /* Literal.Number.Integer */
.highlight .mo { color: #ae81ff } /* Literal.Number.Oct */
.highlight .sa { color: #e6db74 } /* Literal.String.Affix */
.highlight .sb { color: #e6db74 } /* Literal.String.Backtick */
.highlight .sc { color: #e6db74 } /* Literal.String.Char */
.highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */
.highlight .sd { color: #e6db74 } /* Literal.String.Doc */
.highlight .s2 { color: #e6db74 } /* Literal.String.Double */
.highlight .se { color: #ae81ff } /* Literal.String.Escape */
.highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */
.highlight .si { color: #e6db74 } /* Literal.String.Interpol */
.highlight .sx { color: #e6db74 } /* Literal.String.Other */
.highlight .sr { color: #e6db74 } /* Literal.String.Regex */
.highlight .s1 { color: #e6db74 } /* Literal.String.Single */
.highlight .ss { color: #e6db74 } /* Literal.String.Symbol */
.highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #a6e22e } /* Name.Function.Magic */
.highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */
.highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */
.highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */
.highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */
.highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */
}
@media (prefers-color-scheme: light) {
.highlight .hll { background-color: #ffffcc }
.highlight .c { color: #888888 } /* Comment */
.highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.highlight .k { color: #008800; font-weight: bold } /* Keyword */
.highlight .ch { color: #888888 } /* Comment.Hashbang */
.highlight .cm { color: #888888 } /* Comment.Multiline */
.highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */
.highlight .cpf { color: #888888 } /* Comment.PreprocFile */
.highlight .c1 { color: #888888 } /* Comment.Single */
.highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */
.highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.highlight .ge { font-style: italic } /* Generic.Emph */
.highlight .gr { color: #aa0000 } /* Generic.Error */
.highlight .gh { color: #333333 } /* Generic.Heading */
.highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.highlight .go { color: #888888 } /* Generic.Output */
.highlight .gp { color: #555555 } /* Generic.Prompt */
.highlight .gs { font-weight: bold } /* Generic.Strong */
.highlight .gu { color: #666666 } /* Generic.Subheading */
.highlight .gt { color: #aa0000 } /* Generic.Traceback */
.highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */
.highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */
.highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */
.highlight .kp { color: #008800 } /* Keyword.Pseudo */
.highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */
.highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */
.highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */
.highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */
.highlight .na { color: #336699 } /* Name.Attribute */
.highlight .nb { color: #003388 } /* Name.Builtin */
.highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */
.highlight .no { color: #003366; font-weight: bold } /* Name.Constant */
.highlight .nd { color: #555555 } /* Name.Decorator */
.highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */
.highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */
.highlight .nl { color: #336699; font-style: italic } /* Name.Label */
.highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */
.highlight .py { color: #336699; font-weight: bold } /* Name.Property */
.highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */
.highlight .nv { color: #336699 } /* Name.Variable */
.highlight .ow { color: #008800 } /* Operator.Word */
.highlight .w { color: #bbbbbb } /* Text.Whitespace */
.highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */
.highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */
.highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */
.highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */
.highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */
.highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */
.highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */
.highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */
.highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */
.highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */
.highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */
.highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */
.highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */
.highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */
.highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */
.highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */
.highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */
.highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */
.highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */
.highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */
.highlight .vc { color: #336699 } /* Name.Variable.Class */
.highlight .vg { color: #dd7700 } /* Name.Variable.Global */
.highlight .vi { color: #3333bb } /* Name.Variable.Instance */
.highlight .vm { color: #336699 } /* Name.Variable.Magic */
.highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
}
/*
 * decap.c: vxlan tunnel decap packet processing
 *
 * Copyright (c) 2013 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vlib/vlib.h>
#include <vnet/pg/pg.h>
#include <vnet/vxlan/vxlan.h>

#ifndef CLIB_MARCH_VARIANT
vlib_node_registration_t vxlan4_input_node;
vlib_node_registration_t vxlan6_input_node;
#endif

typedef struct
{
  u32 next_index;
  u32 tunnel_index;
  u32 error;
  u32 vni;
} vxlan_rx_trace_t;

static u8 *
format_vxlan_rx_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);

  if (t->tunnel_index == ~0)
    return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
		   t->vni);
  return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
		 t->tunnel_index, t->vni, t->next_index, t->error);
}

always_inline u32
buf_fib_index (vlib_buffer_t * b, u32 is_ip4)
{
  u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
  if (sw_if_index != (u32) ~ 0)
    return sw_if_index;

  u32 *fib_index_by_sw_if_index = is_ip4 ?
    ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index;
  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];

  return vec_elt (fib_index_by_sw_if_index, sw_if_index);
}

typedef vxlan4_tunnel_key_t last_tunnel_cache4;

static const vxlan_decap_info_t decap_not_found = {
  .sw_if_index = ~0,
  .next_index = VXLAN_INPUT_NEXT_DROP,
  .error = VXLAN_ERROR_NO_SUCH_TUNNEL
};

static const vxlan_decap_info_t decap_bad_flags = {
  .sw_if_index = ~0,
  .next_index = VXLAN_INPUT_NEXT_DROP,
  .error = VXLAN_ERROR_BAD_FLAGS
};

always_inline vxlan_decap_info_t
vxlan4_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache4 * cache,
		    u32 fib_index, ip4_header_t * ip4_0,
		    vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
{
  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
    return decap_bad_flags;

  /* Make sure VXLAN tunnel exist according to packet S/D IP, VRF, and VNI */
  u32 dst = ip4_0->dst_address.as_u32;
  u32 src = ip4_0->src_address.as_u32;
  vxlan4_tunnel_key_t key4 = {
    .key[0] = ((u64) dst << 32) | src,
    .key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved,
  };

  if (PREDICT_TRUE
      (key4.key[0] == cache->key[0] && key4.key[1] == cache->key[1]))
    {
      /* cache hit */
      vxlan_decap_info_t di = {.as_u64 = cache->value };
      *stats_sw_if_index = di.sw_if_index;
      return di;
    }

  int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
  if (PREDICT_TRUE (rv == 0))
    {
      *cache = key4;
      vxlan_decap_info_t di = {.as_u64 = key4.value };
      *stats_sw_if_index = di.sw_if_index;
      return di;
    }

  /* try multicast */
  if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address)))
    return decap_not_found;

  /* search for mcast decap info by mcast address */
  key4.key[0] = dst;
  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
  if (rv != 0)
    return decap_not_found;

  /* search for unicast tunnel using the mcast tunnel local(src) ip */
  vxlan_decap_info_t mdi = {.as_u64 = key4.value };
  key4.key[0] = ((u64) mdi.local_ip.as_u32 << 32) | src;
  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
  if (PREDICT_FALSE (rv != 0))
    return decap_not_found;

  /* mcast traffic does not update the cache */
  *stats_sw_if_index = mdi.sw_if_index;
  vxlan_decap_info_t di = {.as_u64 = key4.value };
  return di;
}

typedef vxlan6_tunnel_key_t last_tunnel_cache6;

always_inline vxlan_decap_info_t
vxlan6_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache6 * cache,
		    u32 fib_index, ip6_header_t * ip6_0,
		    vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
{
  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
    return decap_bad_flags;

  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
  vxlan6_tunnel_key_t key6 = {
    .key[0] = ip6_0->src_address.as_u64[0],
    .key[1] = ip6_0->src_address.as_u64[1],
    .key[2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved,
  };

  if (PREDICT_FALSE
      (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
    {
      int rv =
	clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
      if (PREDICT_FALSE (rv != 0))
	return decap_not_found;

      *cache = key6;
    }
  vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);

  /* Validate VXLAN tunnel SIP against packet DIP */
  if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
    *stats_sw_if_index = t0->sw_if_index;
  else
    {
      /* try multicast */
      if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address)))
	return decap_not_found;

      /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
      key6.key[0] = ip6_0->dst_address.as_u64[0];
      key6.key[1] = ip6_0->dst_address.as_u64[1];
      int rv =
	clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
      if (PREDICT_FALSE (rv != 0))
	return decap_not_found;

      vxlan_tunnel_t *mcast_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
      *stats_sw_if_index = mcast_t0->sw_if_index;
    }

  vxlan_decap_info_t di = {
    .sw_if_index = t0->sw_if_index,
    .next_index = t0->decap_next_index,
  };
  return di;
}

always_inline uword
vxlan_input (vlib_main_t * vm,
	     vlib_node_runtime_t * node,
	     vlib_frame_t * from_frame, u32 is_ip4)
{
  vxlan_main_t *vxm = &vxlan_main;
  vnet_main_t *vnm = vxm->vnet_main;
  vnet_interface_main_t *im = &vnm->interface_main;
  vlib_combined_counter_main_t *rx_counter =
    im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
  last_tunnel_cache4 last4;
  last_tunnel_cache6 last6;
  u32 pkts_dropped = 0;
  u32 thread_index = vlib_get_thread_index ();

  if (is_ip4)
    clib_memset (&last4, 0xff, sizeof last4);
  else
    clib_memset (&last6, 0xff, sizeof last6);

  u32 *from = vlib_frame_vector_args (from_frame);
  u32 n_left_from = from_frame->n_vectors;

  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
  vlib_get_buffers (vm, from, bufs, n_left_from);

  u32 stats_if0 = ~0, stats_if1 = ~0;
  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
  while (n_left_from >= 4)
    {
      /* Prefetch next iteration. */
      vlib_prefetch_buffer_header (b[2], LOAD);
      vlib_prefetch_buffer_header (b[3], LOAD);

      /* udp leaves current_data pointing at the vxlan header */
      void *cur0 = vlib_buffer_get_current (b[0]);
      void *cur1 = vlib_buffer_get_current (b[1]);
      vxlan_header_t *vxlan0 = cur0;
      vxlan_header_t *vxlan1 = cur1;


      ip4_header_t *ip4_0, *ip4_1;
      ip6_header_t *ip6_0, *ip6_1;
      if (is_ip4)
	{
	  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
	  ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
	}
      else
	{
	  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
	  ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
	}

      /* pop vxlan */
      vlib_buffer_advance (b[0], sizeof *vxlan0);
      vlib_buffer_advance (b[1], sizeof *vxlan1);

      u32 fi0 = buf_fib_index (b[0], is_ip4);
      u32 fi1 = buf_fib_index (b[1], is_ip4);

      vxlan_decap_info_t di0 = is_ip4 ?
	vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
	vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
      vxlan_decap_info_t di1 = is_ip4 ?
	vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_if1) :
	vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_if1);

      /* Prefetch next iteration. */
      CLIB_PREFETCH (b[2]->data, CLIB_CACHE_LINE_BYTES, LOAD);
      CLIB_PREFETCH (b[3]->data, CLIB_CACHE_LINE_BYTES, LOAD);

      u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
      u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);

      next[0] = di0.next_index;
      next[1] = di1.next_index;

      u8 any_error = di0.error | di1.error;
      if (PREDICT_TRUE (any_error == 0))
	{
	  /* Required to make the l2 tag push / pop code work on l2 subifs */
	  vnet_update_l2_len (b[0]);
	  vnet_update_l2_len (b[1]);
	  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
	  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
	  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
	  vlib_increment_combined_counter (rx_counter, thread_index,
					   stats_if0, 1, len0);
	  vlib_increment_combined_counter (rx_counter, thread_index,
					   stats_if1, 1, len1);
	}
      else
	{
	  if (di0.error == 0)
	    {
	      vnet_update_l2_len (b[0]);
	      vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
	      vlib_increment_combined_counter (rx_counter, thread_index,
					       stats_if0, 1, len0);
	    }
	  else
	    {
	      b[0]->error = node->errors[di0.error];
	      pkts_dropped++;
	    }

	  if (di1.error == 0)
	    {
	      vnet_update_l2_len (b[1]);
	      vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
	      vlib_increment_combined_counter (rx_counter, thread_index,
					       stats_if1, 1, len1);
	    }
	  else
	    {
	      b[1]->error = node->errors[di1.error];
	      pkts_dropped++;
	    }
	}

      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
	{
	  vxlan_rx_trace_t *tr =
	    vlib_add_trace (vm, node, b[0], sizeof (*tr));
	  tr->next_index = next[0];
	  tr->error = di0.error;
	  tr->tunnel_index = di0.sw_if_index == ~0 ?
	    ~0 : vxm->tunnel_index_by_sw_if_index[di0.sw_if_index];
	  tr->vni = vnet_get_vni (vxlan0);
	}
      if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
	{
	  vxlan_rx_trace_t *tr =
	    vlib_add_trace (vm, node, b[1], sizeof (*tr));
	  tr->next_index = next[1];
	  tr->error = di1.error;
	  tr->tunnel_index = di1.sw_if_index == ~0 ?
	    ~0 : vxm->tunnel_index_by_sw_if_index[di1.sw_if_index];
	  tr->vni = vnet_get_vni (vxlan1);
	}
      b += 2;
      next += 2;
      n_left_from -= 2;
    }

  while (n_left_from > 0)
    {
      /* udp leaves current_data pointing at the vxlan header */
      void *cur0 = vlib_buffer_get_current (b[0]);
      vxlan_header_t *vxlan0 = cur0;
      ip4_header_t *ip4_0;
      ip6_header_t *ip6_0;
      if (is_ip4)
	ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
      else
	ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);

      /* pop (ip, udp, vxlan) */
      vlib_buffer_advance (b[0], sizeof (*vxlan0));

      u32 fi0 = buf_fib_index (b[0], is_ip4);

      vxlan_decap_info_t di0 = is_ip4 ?
	vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
	vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);

      uword len0 = vlib_buffer_length_in_chain (vm, b[0]);

      next[0] = di0.next_index;

      /* Validate VXLAN tunnel encap-fib index against packet */
      if (di0.error == 0)
	{
	  /* Required to make the l2 tag push / pop code work on l2 subifs */
	  vnet_update_l2_len (b[0]);

	  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
	  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;

	  vlib_increment_combined_counter (rx_counter, thread_index,
					   stats_if0, 1, len0);
	}
      else
	{
	  b[0]->error = node->errors[di0.error];
	  pkts_dropped++;
	}

      if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
	{
	  vxlan_rx_trace_t *tr
	    = vlib_add_trace (vm, node, b[0], sizeof (*tr));
	  tr->next_index = next[0];
	  tr->error = di0.error;
	  tr->tunnel_index = di0.sw_if_index == ~0 ?
	    ~0 : vxm->tunnel_index_by_sw_if_index[di0.sw_if_index];
	  tr->vni = vnet_get_vni (vxlan0);
	}
      b += 1;
      next += 1;
      n_left_from -= 1;
    }
  vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
  /* Do we still need this now that tunnel tx stats is kept? */
  u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
  vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
			       from_frame->n_vectors - pkts_dropped);

  return from_frame->n_vectors;
}

VLIB_NODE_FN (vxlan4_input_node) (vlib_main_t * vm,
				  vlib_node_runtime_t * node,
				  vlib_frame_t * from_frame)
{
  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
}

VLIB_NODE_FN (vxlan6_input_node) (vlib_main_t * vm,
				  vlib_node_runtime_t * node,
				  vlib_frame_t * from_frame)
{
  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
}

static char *vxlan_error_strings[] = {
#define vxlan_error(n,s) s,
#include <vnet/vxlan/vxlan_error.def>
#undef vxlan_error
};

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vxlan4_input_node) =
{
  .name = "vxlan4-input",
  .vector_size = sizeof (u32),
  .n_errors = VXLAN_N_ERROR,
  .error_strings = vxlan_error_strings,
  .n_next_nodes = VXLAN_INPUT_N_NEXT,
  .format_trace = format_vxlan_rx_trace,
  .next_nodes = {
#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
    foreach_vxlan_input_next
#undef _
  },
};

VLIB_REGISTER_NODE (vxlan6_input_node) =
{
  .name = "vxlan6-input",
  .vector_size = sizeof (u32),
  .n_errors = VXLAN_N_ERROR,
  .error_strings = vxlan_error_strings,
  .n_next_nodes = VXLAN_INPUT_N_NEXT,
  .next_nodes = {
#define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
    foreach_vxlan_input_next
#undef _
  },
  .format_trace = format_vxlan_rx_trace,
};
/* *INDENT-ON* */

typedef enum
{
  IP_VXLAN_BYPASS_NEXT_DROP,
  IP_VXLAN_BYPASS_NEXT_VXLAN,
  IP_VXLAN_BYPASS_N_NEXT,
} ip_vxlan_bypass_next_t;

always_inline uword
ip_vxlan_bypass_inline (vlib_main_t * vm,
			vlib_node_runtime_t * node,
			vlib_frame_t * frame, u32 is_ip4)
{
  vxlan_main_t *vxm = &vxlan_main;
  u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
  vlib_node_runtime_t *error_node =
    vlib_node_get_runtime (vm, ip4_input_node.index);
  ip4_address_t addr4;		/* last IPv4 address matching a local VTEP address */
  ip6_address_t addr6;		/* last IPv6 address matching a local VTEP address */

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  if (node->flags & VLIB_NODE_FLAG_TRACE)
    ip4_forward_next_trace (vm, node, frame, VLIB_TX);

  if (is_ip4)
    addr4.data_u32 = ~0;
  else
    ip6_address_set_zero (&addr6);

  while (n_left_from > 0)
    {
      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  vlib_buffer_t *b0, *b1;
	  ip4_header_t *ip40, *ip41;
	  ip6_header_t *ip60, *ip61;
	  udp_header_t *udp0, *udp1;
	  u32 bi0, ip_len0, udp_len0, flags0, next0;
	  u32 bi1, ip_len1, udp_len1, flags1, next1;
	  i32 len_diff0, len_diff1;
	  u8 error0, good_udp0, proto0;
	  u8 error1, good_udp1, proto1;

	  /* Prefetch next iteration. */
	  {
	    vlib_buffer_t *p2, *p3;

	    p2 = vlib_get_buffer (vm, from[2]);
	    p3 = vlib_get_buffer (vm, from[3]);

	    vlib_prefetch_buffer_header (p2, LOAD);
	    vlib_prefetch_buffer_header (p3, LOAD);

	    CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	    CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
	  }

	  bi0 = to_next[0] = from[0];
	  bi1 = to_next[1] = from[1];
	  from += 2;
	  n_left_from -= 2;
	  to_next += 2;
	  n_left_to_next -= 2;

	  b0 = vlib_get_buffer (vm, bi0);
	  b1 = vlib_get_buffer (vm, bi1);
	  if (is_ip4)
	    {
	      ip40 = vlib_buffer_get_current (b0);
	      ip41 = vlib_buffer_get_current (b1);
	    }
	  else
	    {
	      ip60 = vlib_buffer_get_current (b0);
	      ip61 = vlib_buffer_get_current (b1);
	    }

	  /* Setup packet for next IP feature */
	  vnet_feature_next (&next0, b0);
	  vnet_feature_next (&next1, b1);

	  if (is_ip4)
	    {
	      /* Treat IP frag packets as "experimental" protocol for now
	         until support of IP frag reassembly is implemented */
	      proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
	      proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
	    }
	  else
	    {
	      proto0 = ip60->protocol;
	      proto1 = ip61->protocol;
	    }

	  /* Process packet 0 */
	  if (proto0 != IP_PROTOCOL_UDP)
	    goto exit0;		/* not UDP packet */

	  if (is_ip4)
	    udp0 = ip4_next_header (ip40);
	  else
	    udp0 = ip6_next_header (ip60);

	  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
	    goto exit0;		/* not VXLAN packet */

	  /* Validate DIP against VTEPs */
	  if (is_ip4)
	    {
	      if (addr4.as_u32 != ip40->dst_address.as_u32)
		{
		  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
		    goto exit0;	/* no local VTEP for VXLAN packet */
		  addr4 = ip40->dst_address;
		}
	    }
	  else
	    {
	      if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
		{
		  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
		    goto exit0;	/* no local VTEP for VXLAN packet */
		  addr6 = ip60->dst_address;
		}
	    }

	  flags0 = b0->flags;
	  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;

	  /* Don't verify UDP checksum for packets with explicit zero checksum. */
	  good_udp0 |= udp0->checksum == 0;

	  /* Verify UDP length */
	  if (is_ip4)
	    ip_len0 = clib_net_to_host_u16 (ip40->length);
	  else
	    ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
	  udp_len0 = clib_net_to_host_u16 (udp0->length);
	  len_diff0 = ip_len0 - udp_len0;

	  /* Verify UDP checksum */
	  if (PREDICT_FALSE (!good_udp0))
	    {
	      if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
		{
		  if (is_ip4)
		    flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
		  else
		    flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
		  good_udp0 =
		    (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
		}
	    }

	  if (is_ip4)
	    {
	      error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
	      error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
	    }
	  else
	    {
	      error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
	      error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
	    }

	  next0 = error0 ?
	    IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
	  b0->error = error0 ? error_node->errors[error0] : 0;

	  /* vxlan-input node expect current at VXLAN header */
	  if (is_ip4)
	    vlib_buffer_advance (b0,
				 sizeof (ip4_header_t) +
				 sizeof (udp_header_t));
	  else
	    vlib_buffer_advance (b0,
				 sizeof (ip6_header_t) +
				 sizeof (udp_header_t));

	exit0:
	  /* Process packet 1 */
	  if (proto1 != IP_PROTOCOL_UDP)
	    goto exit1;		/* not UDP packet */

	  if (is_ip4)
	    udp1 = ip4_next_header (ip41);
	  else
	    udp1 = ip6_next_header (ip61);

	  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
	    goto exit1;		/* not VXLAN packet */

	  /* Validate DIP against VTEPs */
	  if (is_ip4)
	    {
	      if (addr4.as_u32 != ip41->dst_address.as_u32)
		{
		  if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
		    goto exit1;	/* no local VTEP for VXLAN packet */
		  addr4 = ip41->dst_address;
		}
	    }
	  else
	    {
	      if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
		{
		  if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
		    goto exit1;	/* no local VTEP for VXLAN packet */
		  addr6 = ip61->dst_address;
		}
	    }

	  flags1 = b1->flags;
	  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;

	  /* Don't verify UDP checksum for packets with explicit zero checksum. */
	  good_udp1 |= udp1->checksum == 0;

	  /* Verify UDP length */
	  if (is_ip4)
	    ip_len1 = clib_net_to_host_u16 (ip41->length);
	  else
	    ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
	  udp_len1 = clib_net_to_host_u16 (udp1->length);
	  len_diff1 = ip_len1 - udp_len1;

	  /* Verify UDP checksum */
	  if (PREDICT_FALSE (!good_udp1))
	    {
	      if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
		{
		  if (is_ip4)
		    flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
		  else
		    flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
		  good_udp1 =
		    (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
		}
	    }

	  if (is_ip4)
	    {
	      error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
	      error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
	    }
	  else
	    {
	      error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
	      error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
	    }

	  next1 = error1 ?
	    IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
	  b1->error = error1 ? error_node->errors[error1] : 0;

	  /* vxlan-input node expect current at VXLAN header */
	  if (is_ip4)
	    vlib_buffer_advance (b1,
				 sizeof (ip4_header_t) +
				 sizeof (udp_header_t));
	  else
	    vlib_buffer_advance (b1,
				 sizeof (ip6_header_t) +
				 sizeof (udp_header_t));

	exit1:
	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, bi1, next0, next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  vlib_buffer_t *b0;
	  ip4_header_t *ip40;
	  ip6_header_t *ip60;
	  udp_header_t *udp0;
	  u32 bi0, ip_len0, udp_len0, flags0, next0;
	  i32 len_diff0;
	  u8 error0, good_udp0, proto0;

	  bi0 = to_next[0] = from[0];
	  from += 1;
	  n_left_from -= 1;
	  to_next += 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);
	  if (is_ip4)
	    ip40 = vlib_buffer_get_current (b0);
	  else
	    ip60 = vlib_buffer_get_current (b0);

	  /* Setup packet for next IP feature */
	  vnet_feature_next (&next0, b0);

	  if (is_ip4)
	    /* Treat IP4 frag packets as "experimental" protocol for now
	       until support of IP frag reassembly is implemented */
	    proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
	  else
	    proto0 = ip60->protocol;

	  if (proto0 != IP_PROTOCOL_UDP)
	    goto exit;		/* not UDP packet */

	  if (is_ip4)
	    udp0 = ip4_next_header (ip40);
	  else
	    udp0 = ip6_next_header (ip60);

	  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
	    goto exit;		/* not VXLAN packet */

	  /* Validate DIP against VTEPs */
	  if (is_ip4)
	    {
	      if (addr4.as_u32 != ip40->dst_address.as_u32)
		{
		  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
		    goto exit;	/* no local VTEP for VXLAN packet */
		  addr4 = ip40->dst_address;
		}
	    }
	  else
	    {
	      if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
		{
		  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
		    goto exit;	/* no local VTEP for VXLAN packet */
		  addr6 = ip60->dst_address;
		}
	    }

	  flags0 = b0->flags;
	  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;

	  /* Don't verify UDP checksum for packets with explicit zero checksum. */
	  good_udp0 |= udp0->checksum == 0;

	  /* Verify UDP length */
	  if (is_ip4)
	    ip_len0 = clib_net_to_host_u16 (ip40->length);
	  else
	    ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
	  udp_len0 = clib_net_to_host_u16 (udp0->length);
	  len_diff0 = ip_len0 - udp_len0;

	  /* Verify UDP checksum */
	  if (PREDICT_FALSE (!good_udp0))
	    {
	      if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
		{
		  if (is_ip4)
		    flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
		  else
		    flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
		  good_udp0 =
		    (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
		}
	    }

	  if (is_ip4)
	    {
	      error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
	      error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
	    }
	  else
	    {
	      error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
	      error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
	    }

	  next0 = error0 ?
	    IP_VXLAN_BYPASS_NEXT_DROP : IP_VXLAN_BYPASS_NEXT_VXLAN;
	  b0->error = error0 ? error_node->errors[error0] : 0;

	  /* vxlan-input node expect current at VXLAN header */
	  if (is_ip4)
	    vlib_buffer_advance (b0,
				 sizeof (ip4_header_t) +
				 sizeof (udp_header_t));
	  else
	    vlib_buffer_advance (b0,
				 sizeof (ip6_header_t) +
				 sizeof (udp_header_t));

	exit:
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  return frame->n_vectors;
}

VLIB_NODE_FN (ip4_vxlan_bypass_node) (vlib_main_t * vm,
				      vlib_node_runtime_t * node,
				      vlib_frame_t * frame)
{
  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_vxlan_bypass_node) =
{
  .name = "ip4-vxlan-bypass",
  .vector_size = sizeof (u32),
  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
  .next_nodes = {
	  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
	  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
  },
  .format_buffer = format_ip4_header,
  .format_trace = format_ip4_forward_next_trace,
};

/* *INDENT-ON* */

/* Dummy init function to get us linked in. */
static clib_error_t *
ip4_vxlan_bypass_init (vlib_main_t * vm)
{
  return 0;
}

VLIB_INIT_FUNCTION (ip4_vxlan_bypass_init);

VLIB_NODE_FN (ip6_vxlan_bypass_node) (vlib_main_t * vm,
				      vlib_node_runtime_t * node,
				      vlib_frame_t * frame)
{
  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_vxlan_bypass_node) =
{
  .name = "ip6-vxlan-bypass",
  .vector_size = sizeof (u32),
  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
  .next_nodes = {
    [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
    [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
  },
  .format_buffer = format_ip6_header,
  .format_trace = format_ip6_forward_next_trace,
};

/* *INDENT-ON* */

/* Dummy init function to get us linked in. */
static clib_error_t *
ip6_vxlan_bypass_init (vlib_main_t * vm)
{
  return 0;
}

VLIB_INIT_FUNCTION (ip6_vxlan_bypass_init);

#define foreach_vxlan_flow_input_next        \
_(DROP, "error-drop")                           \
_(L2_INPUT, "l2-input")

typedef enum
{
#define _(s,n) VXLAN_FLOW_NEXT_##s,
  foreach_vxlan_flow_input_next
#undef _
    VXLAN_FLOW_N_NEXT,
} vxlan_flow_input_next_t;

#define foreach_vxlan_flow_error					\
  _(NONE, "no error")							\
  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors")				\
  _(IP_HEADER_ERROR, "Rx ip header errors")				\
  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors")				\
  _(UDP_LENGTH_ERROR, "Rx udp length errors")

typedef enum
{
#define _(f,s) VXLAN_FLOW_ERROR_##f,
  foreach_vxlan_flow_error
#undef _
    VXLAN_FLOW_N_ERROR,
} vxlan_flow_error_t;

static char *vxlan_flow_error_strings[] = {
#define _(n,s) s,
  foreach_vxlan_flow_error
#undef _
};


static_always_inline u8
vxlan_validate_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
{
  u32 flags = b->flags;
  enum
  { offset =
      sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
  };

  /* Verify UDP checksum */
  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
    {
      vlib_buffer_advance (b, -offset);
      flags = ip4_tcp_udp_validate_checksum (vm, b);
      vlib_buffer_advance (b, offset);
    }

  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
}

static_always_inline u8
vxlan_check_udp_csum (vlib_main_t * vm, vlib_buffer_t * b)
{
  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
  udp_header_t *udp = &hdr->udp;
  /* Don't verify UDP checksum for packets with explicit zero checksum. */
  u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
    udp->checksum == 0;

  return !good_csum;
}

static_always_inline u8
vxlan_check_ip (vlib_buffer_t * b, u16 payload_len)
{
  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
  u16 expected = payload_len + sizeof *hdr;
  return ip_len > expected || hdr->ip4.ttl == 0
    || hdr->ip4.ip_version_and_header_length != 0x45;
}

static_always_inline u8
vxlan_check_ip_udp_len (vlib_buffer_t * b)
{
  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
  u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
  return udp_len > ip_len;
}

static_always_inline u8
vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
{
  u8 error0 = VXLAN_FLOW_ERROR_NONE;
  if (ip_err0)
    error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
  if (udp_err0)
    error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
  if (csum_err0)
    error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
  return error0;
}

VLIB_NODE_FN (vxlan4_flow_input_node) (vlib_main_t * vm,
				       vlib_node_runtime_t * node,
				       vlib_frame_t * f)
{
  enum
  { payload_offset = sizeof (ip4_vxlan_header_t) };

  vxlan_main_t *vxm = &vxlan_main;
  vnet_interface_main_t *im = &vnet_main.interface_main;
  vlib_combined_counter_main_t *rx_counter[VXLAN_FLOW_N_NEXT] = {
    [VXLAN_FLOW_NEXT_DROP] =
      im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP,
    [VXLAN_FLOW_NEXT_L2_INPUT] =
      im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
  };
  u32 thread_index = vlib_get_thread_index ();

  u32 *from = vlib_frame_vector_args (f);
  u32 n_left_from = f->n_vectors;
  u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;

  while (n_left_from > 0)
    {
      u32 n_left_to_next, *to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from > 3 && n_left_to_next > 3)
	{
	  u32 bi0 = to_next[0] = from[0];
	  u32 bi1 = to_next[1] = from[1];
	  u32 bi2 = to_next[2] = from[2];
	  u32 bi3 = to_next[3] = from[3];
	  from += 4;
	  n_left_from -= 4;
	  to_next += 4;
	  n_left_to_next -= 4;

	  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
	  vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
	  vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
	  vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);

	  vlib_buffer_advance (b0, payload_offset);
	  vlib_buffer_advance (b1, payload_offset);
	  vlib_buffer_advance (b2, payload_offset);
	  vlib_buffer_advance (b3, payload_offset);

	  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
	  u16 len1 = vlib_buffer_length_in_chain (vm, b1);
	  u16 len2 = vlib_buffer_length_in_chain (vm, b2);
	  u16 len3 = vlib_buffer_length_in_chain (vm, b3);

	  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
	    VXLAN_FLOW_NEXT_L2_INPUT, next2 =
	    VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;

	  u8 ip_err0 = vxlan_check_ip (b0, len0);
	  u8 ip_err1 = vxlan_check_ip (b1, len1);
	  u8 ip_err2 = vxlan_check_ip (b2, len2);
	  u8 ip_err3 = vxlan_check_ip (b3, len3);
	  u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;

	  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
	  u8 udp_err1 = vxlan_check_ip_udp_len (b1);
	  u8 udp_err2 = vxlan_check_ip_udp_len (b2);
	  u8 udp_err3 = vxlan_check_ip_udp_len (b3);
	  u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;

	  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
	  u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
	  u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
	  u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
	  u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;

	  if (PREDICT_FALSE (csum_err))
	    {
	      if (csum_err0)
		csum_err0 = !vxlan_validate_udp_csum (vm, b0);
	      if (csum_err1)
		csum_err1 = !vxlan_validate_udp_csum (vm, b1);
	      if (csum_err2)
		csum_err2 = !vxlan_validate_udp_csum (vm, b2);
	      if (csum_err3)
		csum_err3 = !vxlan_validate_udp_csum (vm, b3);
	      csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
	    }

	  if (PREDICT_FALSE (ip_err || udp_err || csum_err))
	    {
	      if (ip_err0 || udp_err0 || csum_err0)
		{
		  next0 = VXLAN_FLOW_NEXT_DROP;
		  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
		  b0->error = node->errors[error0];
		}
	      if (ip_err1 || udp_err1 || csum_err1)
		{
		  next1 = VXLAN_FLOW_NEXT_DROP;
		  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
		  b1->error = node->errors[error1];
		}
	      if (ip_err2 || udp_err2 || csum_err2)
		{
		  next2 = VXLAN_FLOW_NEXT_DROP;
		  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
		  b2->error = node->errors[error2];
		}
	      if (ip_err3 || udp_err3 || csum_err3)
		{
		  next3 = VXLAN_FLOW_NEXT_DROP;
		  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
		  b3->error = node->errors[error3];
		}
	    }

	  vnet_update_l2_len (b0);
	  vnet_update_l2_len (b1);
	  vnet_update_l2_len (b2);
	  vnet_update_l2_len (b3);

	  ASSERT (b0->flow_id != 0);
	  ASSERT (b1->flow_id != 0);
	  ASSERT (b2->flow_id != 0);
	  ASSERT (b3->flow_id != 0);

	  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
	  u32 t_index1 = b1->flow_id - vxm->flow_id_start;
	  u32 t_index2 = b2->flow_id - vxm->flow_id_start;
	  u32 t_index3 = b3->flow_id - vxm->flow_id_start;

	  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
	  vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
	  vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
	  vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];

	  /* flow id consumed */
	  b0->flow_id = 0;
	  b1->flow_id = 0;
	  b2->flow_id = 0;
	  b3->flow_id = 0;

	  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
	    t0->sw_if_index;
	  u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
	    t1->sw_if_index;
	  u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
	    t2->sw_if_index;
	  u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
	    t3->sw_if_index;

	  vlib_increment_combined_counter (rx_counter[next0], thread_index,
					   sw_if_index0, 1, len0);
	  vlib_increment_combined_counter (rx_counter[next1], thread_index,
					   sw_if_index1, 1, len1);
	  vlib_increment_combined_counter (rx_counter[next2], thread_index,
					   sw_if_index2, 1, len2);
	  vlib_increment_combined_counter (rx_counter[next3], thread_index,
					   sw_if_index3, 1, len3);

	  u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;

	  if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
	    {
	      if (b0->flags & VLIB_BUFFER_IS_TRACED)
		{
		  vxlan_rx_trace_t *tr =
		    vlib_add_trace (vm, node, b0, sizeof *tr);
		  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
		  tr->next_index = next0;
		  tr->error = error0;
		  tr->tunnel_index = t_index0;
		  tr->vni = t0->vni;
		}
	      if (b1->flags & VLIB_BUFFER_IS_TRACED)
		{
		  vxlan_rx_trace_t *tr =
		    vlib_add_trace (vm, node, b1, sizeof *tr);
		  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
		  tr->next_index = next1;
		  tr->error = error1;
		  tr->tunnel_index = t_index1;
		  tr->vni = t1->vni;
		}
	      if (b2->flags & VLIB_BUFFER_IS_TRACED)
		{
		  vxlan_rx_trace_t *tr =
		    vlib_add_trace (vm, node, b2, sizeof *tr);
		  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
		  tr->next_index = next2;
		  tr->error = error2;
		  tr->tunnel_index = t_index2;
		  tr->vni = t2->vni;
		}
	      if (b3->flags & VLIB_BUFFER_IS_TRACED)
		{
		  vxlan_rx_trace_t *tr =
		    vlib_add_trace (vm, node, b3, sizeof *tr);
		  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
		  tr->next_index = next3;
		  tr->error = error3;
		  tr->tunnel_index = t_index3;
		  tr->vni = t3->vni;
		}
	    }
	  vlib_validate_buffer_enqueue_x4
	    (vm, node, next_index, to_next, n_left_to_next,
	     bi0, bi1, bi2, bi3, next0, next1, next2, next3);
	}
      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0 = to_next[0] = from[0];
	  from++;
	  n_left_from--;
	  to_next++;
	  n_left_to_next--;

	  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
	  vlib_buffer_advance (b0, payload_offset);

	  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
	  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;

	  u8 ip_err0 = vxlan_check_ip (b0, len0);
	  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
	  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);

	  if (csum_err0)
	    csum_err0 = !vxlan_validate_udp_csum (vm, b0);
	  if (ip_err0 || udp_err0 || csum_err0)
	    {
	      next0 = VXLAN_FLOW_NEXT_DROP;
	      u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
	      b0->error = node->errors[error0];
	    }

	  vnet_update_l2_len (b0);

	  ASSERT (b0->flow_id != 0);
	  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
	  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
	  b0->flow_id = 0;

	  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
	    t0->sw_if_index;
	  vlib_increment_combined_counter (rx_counter[next0], thread_index,
					   sw_if_index0, 1, len0);

	  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
	    {
	      vxlan_rx_trace_t *tr =
		vlib_add_trace (vm, node, b0, sizeof *tr);
	      u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
	      tr->next_index = next0;
	      tr->error = error0;
	      tr->tunnel_index = t_index0;
	      tr->vni = t0->vni;
	    }
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  return f->n_vectors;
}

/* *INDENT-OFF* */
#ifndef CLIB_MULTIARCH_VARIANT
VLIB_REGISTER_NODE (vxlan4_flow_input_node) = {
  .name = "vxlan-flow-input",
  .type = VLIB_NODE_TYPE_INTERNAL,
  .vector_size = sizeof (u32),

  .format_trace = format_vxlan_rx_trace,

  .n_errors = VXLAN_FLOW_N_ERROR,
  .error_strings = vxlan_flow_error_strings,

  .n_next_nodes = VXLAN_FLOW_N_NEXT,
  .next_nodes = {
#define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
    foreach_vxlan_flow_input_next
#undef _
  },
};
#endif
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */