summaryrefslogtreecommitdiffstats
path: root/src/scripts
AgeCommit message (Expand)AuthorFilesLines
2019-03-26Convert GRE nodes to new buffer APIs and multiarchBenoît Ganne2-0/+124
2019-03-26Simplify adjacency rewrite codeBenoît Ganne1-0/+67
2019-03-04IPSEC: script to bounce IPSEC traffic through a pipe to test encrypt and decrpytNeale Ranns1-0/+66
2019-02-20pg: remove no-recycle optionDamjan Marion39-77/+0
2019-01-29cmake: fix out-of-git-tree buildDamjan Marion1-1/+1
2019-01-20Rework of debian packagingDamjan Marion1-0/+37
2018-08-17CMake as an alternative to autotools (experimental)Damjan Marion1-0/+28
2018-06-15NAT44: endpoint dependent mode (VPP-1273)Matus Fabian2-1/+49
2018-03-12License text cleanupDave Barach1-0/+13
2017-12-13NAT64: multi-thread support (VPP-891)Matus Fabian2-0/+86
2017-10-30Remove old Python vppctl scriptChris Luke1-134/+0
2017-10-16udp: refactor udp codeFlorin Coras2-25/+72
2017-08-23NAT: Rename snat plugin to nat (VPP-955)Matus Fabian4-10/+10
2017-08-04SNAT: fix address and port allocation for multiple worker threads (VPP-925)Matus Fabian1-5/+12
2017-05-09Add support for tcp/session buffer chainsFlorin Coras1-2/+17
2017-05-05First commit SR MPLSPablo Camarillo7-118/+11
2017-04-19Fix "make dist" to include version number, docouple it from rpm packagingDamjan Marion1-8/+4
2017-04-13Session layer refactoringFlorin Coras1-1/+2
2017-03-13VPP-659 Improve tcp/session debugging and testingFlorin Coras1-0/+4
2017-03-10VPP-659 TCP improvementsFlorin Coras3-3/+28
2017-03-07DHCP Multiple Servers (VPP-602, VPP-605)Neale Ranns1-1/+2
2017-03-07CGN: Deterministic NAT (VPP-623)Matus Fabian1-0/+108
2017-03-04Cleanup URI code and TCP bugfixingFlorin Coras2-0/+66
2017-03-01VPP-598: tcp stack initial commitDave Barach5-8/+91
2017-02-28vlib: add buffer cloning supportDamjan Marion1-8/+11
2017-02-21dhcp: multiple additionsNeale Ranns1-0/+21
2017-02-02Fix SR multicast post mfib commitNeale Ranns1-0/+58
2017-01-27IP Multicast FIB (mfib)Neale Ranns1-0/+22
2017-01-27Add multi-vpp support back into pythonic vppctlEd Warnicke1-7/+20
2017-01-25[re]Enable per-Adjacency/neighbour countersNeale Ranns1-2/+16
2017-01-21Fix issue in rpm versioning for release buildsDamjan Marion1-1/+1
2017-01-13vppctl: new bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-10Revert "vppctl: bash completion for vppctl commands"Damjan Marion1-30/+0
2017-01-09vppctl: bash completion for vppctl commandsPadraig Connolly1-0/+30
2017-01-03fix version.h generation for out-of-tree buildsDamjan Marion1-0/+54
2016-12-28Reorganize source tree to use single autotools instanceDamjan Marion78-0/+3766
olor-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
#!/usr/bin/env python3

import unittest

from framework import VppTestCase, VppTestRunner
from vpp_sub_interface import VppDot1QSubint
from vpp_ip import DpoProto
from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \
    VppMplsLabel, VppMplsTable, FibPathProto

import scapy.compat
from scapy.packet import Raw
from scapy.layers.l2 import Ether, Dot1Q
from scapy.layers.inet import IP, UDP
from scapy.layers.inet6 import IPv6
from scapy.contrib.mpls import MPLS
from vpp_papi import VppEnum
from vpp_qos import VppQosRecord, VppQosEgressMap, VppQosMark, VppQosStore

NUM_PKTS = 67


class TestQOS(VppTestCase):
    """ QOS Test Case """

    # Note: Since the enums aren't created dynamically until after
    #       the papi client attaches to VPP, we put it in a property to
    #       ensure it is the value at runtime, not at module load time.
    @property
    def QOS_SOURCE(self):
        return VppEnum.vl_api_qos_source_t

    @classmethod
    def setUpClass(cls):
        super(TestQOS, cls).setUpClass()

    @classmethod
    def tearDownClass(cls):
        super(TestQOS, cls).tearDownClass()

    def setUp(self):
        super(TestQOS, self).setUp()

        self.create_pg_interfaces(range(5))

        tbl = VppMplsTable(self, 0)
        tbl.add_vpp_config()

        for i in self.pg_interfaces:
            i.admin_up()
            i.config_ip4()
            i.resolve_arp()
            i.config_ip6()
            i.resolve_ndp()
            i.enable_mpls()

    def tearDown(self):
        for i in self.pg_interfaces:
            i.unconfig_ip4()
            i.unconfig_ip6()
            i.disable_mpls()

        super(TestQOS, self).tearDown()

    def test_qos_ip(self):
        """ QoS Mark/Record/Store IP """

        #
        # for table 1 map the n=0xff possible values of input QoS mark,
        # n to 1-n
        #
        output = [scapy.compat.chb(0)] * 256
        for i in range(0, 255):
            output[i] = scapy.compat.chb(255 - i)
        os = b''.join(output)
        rows = [{'outputs': os},
                {'outputs': os},
                {'outputs': os},
                {'outputs': os}]

        qem1 = VppQosEgressMap(self, 1, rows).add_vpp_config()

        #
        # For table 2 (and up) use the value n for everything
        #
        output = [scapy.compat.chb(2)] * 256
        os = b''.join(output)
        rows = [{'outputs': os},
                {'outputs': os},
                {'outputs': os},
                {'outputs': os}]

        qem2 = VppQosEgressMap(self, 2, rows).add_vpp_config()

        output = [scapy.compat.chb(3)] * 256
        os = b''.join(output)
        rows = [{'outputs': os},
                {'outputs': os},
                {'outputs': os},
                {'outputs': os}]

        qem3 = VppQosEgressMap(self, 3, rows).add_vpp_config()

        output = [scapy.compat.chb(4)] * 256
        os = b''.join(output)
        rows = [{'outputs': os},
                {'outputs': os},
                {'outputs': os},
                {'outputs': os}]

        qem4 = VppQosEgressMap(self, 4, rows).add_vpp_config()
        qem5 = VppQosEgressMap(self, 5, rows).add_vpp_config()
        qem6 = VppQosEgressMap(self, 6, rows).add_vpp_config()
        qem7 = VppQosEgressMap(self, 7, rows).add_vpp_config()

        self.assertTrue(qem7.query_vpp_config())
        self.logger.info(self.vapi.cli("sh qos eg map"))

        #
        # Bind interface pgN to table n
        #
        qm1 = VppQosMark(self, self.pg1, qem1,
                         self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        qm2 = VppQosMark(self, self.pg2, qem2,
                         self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        qm3 = VppQosMark(self, self.pg3, qem3,
                         self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        qm4 = VppQosMark(self, self.pg4, qem4,
                         self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        self.assertTrue(qm3.query_vpp_config())

        self.logger.info(self.vapi.cli("sh qos mark"))

        #
        # packets ingress on Pg0
        #
        p_v4 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4, tos=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))
        p_v6 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                IPv6(src=self.pg0.remote_ip6, dst=self.pg1.remote_ip6,
                     tc=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        #
        # Since we have not yet enabled the recording of the input QoS
        # from the input iP header, the egress packet's ToS will be unchanged
        #
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 1)
        rx = self.send_and_expect(self.pg0, p_v6 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IPv6].tc, 1)

        #
        # Enable QoS recording on IP input for pg0
        #
        qr1 = VppQosRecord(self, self.pg0,
                           self.QOS_SOURCE.QOS_API_SOURCE_IP)
        qr1.add_vpp_config()
        self.logger.info(self.vapi.cli("sh qos record"))

        #
        # send the same packets, this time expect the input TOS of 1
        # to be mapped to pg1's egress value of 254
        #
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)
        rx = self.send_and_expect(self.pg0, p_v6 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IPv6].tc, 254)

        #
        # different input ToS to test the mapping
        #
        p_v4[IP].tos = 127
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 128)
        p_v6[IPv6].tc = 127
        rx = self.send_and_expect(self.pg0, p_v6 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IPv6].tc, 128)

        p_v4[IP].tos = 254
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 1)
        p_v6[IPv6].tc = 254
        rx = self.send_and_expect(self.pg0, p_v6 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IPv6].tc, 1)

        #
        # send packets out the other interfaces to test the maps are
        # correctly applied
        #
        p_v4[IP].dst = self.pg2.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg2)
        for p in rx:
            self.assertEqual(p[IP].tos, 2)

        p_v4[IP].dst = self.pg3.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg3)
        for p in rx:
            self.assertEqual(p[IP].tos, 3)

        p_v6[IPv6].dst = self.pg3.remote_ip6
        rx = self.send_and_expect(self.pg0, p_v6 * NUM_PKTS, self.pg3)
        for p in rx:
            self.assertEqual(p[IPv6].tc, 3)

        #
        # remove the map on pg2 and pg3, now expect an unchanged IP tos
        #
        qm2.remove_vpp_config()
        qm3.remove_vpp_config()
        self.logger.info(self.vapi.cli("sh qos mark"))

        self.assertFalse(qm3.query_vpp_config())
        self.logger.info(self.vapi.cli("sh int feat pg2"))

        p_v4[IP].dst = self.pg2.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg2)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)

        p_v4[IP].dst = self.pg3.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg3)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)

        #
        # still mapping out of pg1
        #
        p_v4[IP].dst = self.pg1.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 1)

        #
        # disable the input recording on pg0
        #
        self.assertTrue(qr1.query_vpp_config())
        qr1.remove_vpp_config()

        #
        # back to an unchanged TOS value
        #
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)

        #
        # enable QoS stroe instead of record
        #
        qst1 = VppQosStore(self, self.pg0,
                           self.QOS_SOURCE.QOS_API_SOURCE_IP,
                           5).add_vpp_config()
        self.logger.info(self.vapi.cli("sh qos store"))

        p_v4[IP].dst = self.pg1.remote_ip4
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 250)

        #
        # disable the input storing on pg0
        #
        self.assertTrue(qst1.query_vpp_config())
        qst1.remove_vpp_config()

        #
        # back to an unchanged TOS value
        #
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)

        #
        # disable the egress map on pg1 and pg4
        #
        qm1.remove_vpp_config()
        qm4.remove_vpp_config()

        #
        # unchanged Tos on pg1
        #
        rx = self.send_and_expect(self.pg0, p_v4 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[IP].tos, 254)

    def test_qos_mpls(self):
        """ QoS Mark/Record MPLS """

        #
        # 255 QoS for all input values
        #
        from_ext = 7
        from_ip = 6
        from_mpls = 5
        from_vlan = 4
        output = [scapy.compat.chb(from_ext)] * 256
        os1 = b''.join(output)
        output = [scapy.compat.chb(from_vlan)] * 256
        os2 = b''.join(output)
        output = [scapy.compat.chb(from_mpls)] * 256
        os3 = b''.join(output)
        output = [scapy.compat.chb(from_ip)] * 256
        os4 = b''.join(output)
        rows = [{'outputs': os1},
                {'outputs': os2},
                {'outputs': os3},
                {'outputs': os4}]

        qem1 = VppQosEgressMap(self, 1, rows).add_vpp_config()

        #
        # a route with 1 MPLS label
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[32])])
        route_10_0_0_1.add_vpp_config()

        #
        # a route with 3 MPLS labels
        #
        route_10_0_0_3 = VppIpRoute(self, "10.0.0.3", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[63, 33, 34])])
        route_10_0_0_3.add_vpp_config()

        #
        # enable IP QoS recording on the input Pg0 and MPLS egress marking
        # on Pg1
        #
        qr1 = VppQosRecord(self, self.pg0,
                           self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        qm1 = VppQosMark(self, self.pg1, qem1,
                         self.QOS_SOURCE.QOS_API_SOURCE_MPLS).add_vpp_config()

        #
        # packet that will get one label added and 3 labels added resp.
        #
        p_1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
               IP(src=self.pg0.remote_ip4, dst="10.0.0.1", tos=1) /
               UDP(sport=1234, dport=1234) /
               Raw(scapy.compat.chb(100) * NUM_PKTS))
        p_3 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
               IP(src=self.pg0.remote_ip4, dst="10.0.0.3", tos=1) /
               UDP(sport=1234, dport=1234) /
               Raw(scapy.compat.chb(100) * NUM_PKTS))

        rx = self.send_and_expect(self.pg0, p_1 * NUM_PKTS, self.pg1)

        #
        # only 3 bits of ToS value in MPLS make sure tos is correct
        # and the label and EOS bit have not been corrupted
        #
        for p in rx:
            self.assertEqual(p[MPLS].cos, from_ip)
            self.assertEqual(p[MPLS].label, 32)
            self.assertEqual(p[MPLS].s, 1)
        rx = self.send_and_expect(self.pg0, p_3 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[MPLS].cos, from_ip)
            self.assertEqual(p[MPLS].label, 63)
            self.assertEqual(p[MPLS].s, 0)
            h = p[MPLS].payload
            self.assertEqual(h[MPLS].cos, from_ip)
            self.assertEqual(h[MPLS].label, 33)
            self.assertEqual(h[MPLS].s, 0)
            h = h[MPLS].payload
            self.assertEqual(h[MPLS].cos, from_ip)
            self.assertEqual(h[MPLS].label, 34)
            self.assertEqual(h[MPLS].s, 1)

        #
        # enable MPLS QoS recording on the input Pg0 and IP egress marking
        # on Pg1
        #
        qr2 = VppQosRecord(
            self, self.pg0,
            self.QOS_SOURCE.QOS_API_SOURCE_MPLS).add_vpp_config()
        qm2 = VppQosMark(
            self, self.pg1, qem1,
            self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()

        #
        # MPLS x-connect - COS according to pg1 map
        #
        route_32_eos = VppMplsRoute(self, 32, 1,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index,
                                                  labels=[VppMplsLabel(33)])])
        route_32_eos.add_vpp_config()

        p_m1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                MPLS(label=32, cos=3, ttl=2) /
                IP(src=self.pg0.remote_ip4, dst="10.0.0.1", tos=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        rx = self.send_and_expect(self.pg0, p_m1 * NUM_PKTS, self.pg1)
        for p in rx:
            self.assertEqual(p[MPLS].cos, from_mpls)
            self.assertEqual(p[MPLS].label, 33)
            self.assertEqual(p[MPLS].s, 1)

        #
        # MPLS deag - COS is copied from MPLS to IP
        #
        route_33_eos = VppMplsRoute(self, 33, 1,
                                    [VppRoutePath("0.0.0.0",
                                                  0xffffffff,
                                                  nh_table_id=0)])
        route_33_eos.add_vpp_config()

        route_10_0_0_4 = VppIpRoute(self, "10.0.0.4", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index)])
        route_10_0_0_4.add_vpp_config()

        p_m2 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                MPLS(label=33, ttl=2, cos=3) /
                IP(src=self.pg0.remote_ip4, dst="10.0.0.4", tos=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        rx = self.send_and_expect(self.pg0, p_m2 * NUM_PKTS, self.pg1)

        for p in rx:
            self.assertEqual(p[IP].tos, from_mpls)

    def test_qos_vlan(self):
        """QoS mark/record VLAN """

        #
        # QoS for all input values
        #
        output = [scapy.compat.chb(0)] * 256
        for i in range(0, 255):
            output[i] = scapy.compat.chb(255 - i)
        os = b''.join(output)
        rows = [{'outputs': os},
                {'outputs': os},
                {'outputs': os},
                {'outputs': os}]

        qem1 = VppQosEgressMap(self, 1, rows).add_vpp_config()

        sub_if = VppDot1QSubint(self, self.pg0, 11)

        sub_if.admin_up()
        sub_if.config_ip4()
        sub_if.resolve_arp()
        sub_if.config_ip6()
        sub_if.resolve_ndp()

        #
        # enable VLAN QoS recording/marking on the input Pg0 subinterface and
        #
        qr_v = VppQosRecord(
            self, sub_if,
            self.QOS_SOURCE.QOS_API_SOURCE_VLAN).add_vpp_config()
        qm_v = VppQosMark(
            self, sub_if, qem1,
            self.QOS_SOURCE.QOS_API_SOURCE_VLAN).add_vpp_config()

        #
        # IP marking/recording on pg1
        #
        qr_ip = VppQosRecord(
            self, self.pg1,
            self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()
        qm_ip = VppQosMark(
            self, self.pg1, qem1,
            self.QOS_SOURCE.QOS_API_SOURCE_IP).add_vpp_config()

        #
        # a routes to/from sub-interface
        #
        route_10_0_0_1 = VppIpRoute(self, "10.0.0.1", 32,
                                    [VppRoutePath(sub_if.remote_ip4,
                                                  sub_if.sw_if_index)])
        route_10_0_0_1.add_vpp_config()
        route_10_0_0_2 = VppIpRoute(self, "10.0.0.2", 32,
                                    [VppRoutePath(self.pg1.remote_ip4,
                                                  self.pg1.sw_if_index)])
        route_10_0_0_2.add_vpp_config()
        route_2001_1 = VppIpRoute(self, "2001::1", 128,
                                  [VppRoutePath(sub_if.remote_ip6,
                                                sub_if.sw_if_index)])
        route_2001_1.add_vpp_config()
        route_2001_2 = VppIpRoute(self, "2001::2", 128,
                                  [VppRoutePath(self.pg1.remote_ip6,
                                                self.pg1.sw_if_index)])
        route_2001_2.add_vpp_config()

        p_v1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                Dot1Q(vlan=11, prio=1) /
                IP(src="1.1.1.1", dst="10.0.0.2", tos=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        p_v2 = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
                IP(src="1.1.1.1", dst="10.0.0.1", tos=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        p_v3 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                Dot1Q(vlan=11, prio=1, id=1) /
                IP(src="1.1.1.1", dst="10.0.0.2", tos=2) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        rx = self.send_and_expect(self.pg1, p_v2 * NUM_PKTS, self.pg0)

        for p in rx:
            self.assertEqual(p[Dot1Q].prio, 7)
            self.assertEqual(p[Dot1Q].id, 0)

        rx = self.send_and_expect(self.pg0, p_v3 * NUM_PKTS, self.pg1)

        for p in rx:
            self.assertEqual(p[IP].tos, 252)

        rx = self.send_and_expect(self.pg0, p_v1 * NUM_PKTS, self.pg1)

        for p in rx:
            self.assertEqual(p[IP].tos, 253)

        p_v1 = (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
                Dot1Q(vlan=11, prio=2) /
                IPv6(src="2001::1", dst="2001::2", tc=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        p_v2 = (Ether(src=self.pg1.remote_mac, dst=self.pg1.local_mac) /
                IPv6(src="3001::1", dst="2001::1", tc=1) /
                UDP(sport=1234, dport=1234) /
                Raw(scapy.compat.chb(100) * NUM_PKTS))

        rx = self.send_and_expect(self.pg1, p_v2 * NUM_PKTS, self.pg0)

        for p in rx:
            self.assertEqual(p[Dot1Q].prio, 7)
            self.assertEqual(p[Dot1Q].id, 0)

        rx = self.send_and_expect(self.pg0, p_v1 * NUM_PKTS, self.pg1)

        for p in rx:
            self.assertEqual(p[IPv6].tc, 251)

        #
        # cleanup
        #
        sub_if.unconfig_ip4()
        sub_if.unconfig_ip6()


if __name__ == '__main__':
    unittest.main(testRunner=VppTestRunner)
int i; vlib_set_queue_signal_callback (vm, memclnt_queue_callback); if ((rv = memory_api_init (am->region_name)) < 0) { clib_warning ("memory_api_init returned %d, wait for godot...", rv); vlib_process_suspend (vm, 1e70); } shm = am->shmem_hdr; ASSERT (shm); q = shm->vl_input_queue; ASSERT (q); e = vlib_call_init_exit_functions (vm, vm->api_init_function_registrations, 1 /* call_once */ ); if (e) clib_error_report (e); sleep_time = 20.0; dead_client_scan_time = vlib_time_now (vm) + 20.0; /* * Send plugin message range messages for each plugin we loaded */ for (i = 0; i < vec_len (am->msg_ranges); i++) { vl_api_msg_range_t *rp = am->msg_ranges + i; send_one_plugin_msg_ids_msg (rp->name, rp->first_msg_id, rp->last_msg_id); } /* $$$ pay attention to frame size, control CPU usage */ while (1) { uword event_type __attribute__ ((unused)); i8 *headp; int need_broadcast; /* * There's a reason for checking the queue before * sleeping. If the vlib application crashes, it's entirely * possible for a client to enqueue a connect request * during the process restart interval. * * Unless some force of physics causes the new incarnation * of the application to process the request, the client will * sit and wait for Godot... */ vector_rate = vlib_last_vector_length_per_node (vm); start_time = vlib_time_now (vm); while (1) { pthread_mutex_lock (&q->mutex); if (q->cursize == 0) { vm->api_queue_nonempty = 0; pthread_mutex_unlock (&q->mutex); if (TRACE_VLIB_MEMORY_QUEUE) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "q-underflow: len %d", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 len; } *ed; ed = ELOG_DATA (&vm->elog_main, e); ed->len = 0; } sleep_time = 20.0; break; } headp = (i8 *) (q->data + sizeof (uword) * q->head); clib_memcpy (&mp, headp, sizeof (uword)); q->head++; need_broadcast = (q->cursize == q->maxsize / 2); q->cursize--; if (PREDICT_FALSE (q->head == q->maxsize)) q->head = 0; pthread_mutex_unlock (&q->mutex); if (need_broadcast) (void) pthread_cond_broadcast (&q->condvar); vl_msg_api_handler_with_vm_node (am, (void *) mp, vm, node); /* Allow no more than 10us without a pause */ if (vlib_time_now (vm) > start_time + 10e-6) { int index = SLEEP_400_US; if (vector_rate > 40.0) sleep_time = 400e-6; else if (vector_rate > 20.0) { index = SLEEP_200_US; sleep_time = 200e-6; } else if (vector_rate >= 1.0) { index = SLEEP_100_US; sleep_time = 100e-6; } else { index = SLEEP_10_US; sleep_time = 10e-6; } vector_rate_histogram[index] += 1; break; } } event_type = vlib_process_wait_for_event_or_clock (vm, sleep_time); vm->queue_signal_pending = 0; vlib_process_get_events (vm, 0 /* event_data */ ); if (vlib_time_now (vm) > dead_client_scan_time) { vl_api_registration_t **regpp; vl_api_registration_t *regp; unix_shared_memory_queue_t *q; static u32 *dead_indices; static u32 *confused_indices; vec_reset_length (dead_indices); vec_reset_length (confused_indices); /* *INDENT-OFF* */ pool_foreach (regpp, am->vl_clients, ({ regp = *regpp; if (regp) { q = regp->vl_input_queue; if (kill (q->consumer_pid, 0) < 0) { vec_add1(dead_indices, regpp - am->vl_clients); } } else { clib_warning ("NULL client registration index %d", regpp - am->vl_clients); vec_add1 (confused_indices, regpp - am->vl_clients); } })); /* *INDENT-ON* */ /* This should "never happen," but if it does, fix it... */ if (PREDICT_FALSE (vec_len (confused_indices) > 0)) { int i; for (i = 0; i < vec_len (confused_indices); i++) { pool_put_index (am->vl_clients, confused_indices[i]); } } if (PREDICT_FALSE (vec_len (dead_indices) > 0)) { int i; svm_region_t *svm; void *oldheap; /* Allow the application to clean up its registrations */ for (i = 0; i < vec_len (dead_indices); i++) { regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]); if (regpp) { u32 handle; handle = vl_msg_api_handle_from_index_and_epoch (dead_indices[i], shm->application_restarts); (void) call_reaper_functions (handle); } } svm = am->vlib_rp; pthread_mutex_lock (&svm->mutex); oldheap = svm_push_data_heap (svm); for (i = 0; i < vec_len (dead_indices); i++) { regpp = pool_elt_at_index (am->vl_clients, dead_indices[i]); if (regpp) { /* Poison the old registration */ memset (*regpp, 0xF3, sizeof (**regpp)); clib_mem_free (*regpp); /* no dangling references, please */ *regpp = 0; } else { svm_pop_heap (oldheap); clib_warning ("Duplicate free, client index %d", regpp - am->vl_clients); oldheap = svm_push_data_heap (svm); } } svm_client_scan_this_region_nolock (am->vlib_rp); pthread_mutex_unlock (&svm->mutex); svm_pop_heap (oldheap); for (i = 0; i < vec_len (dead_indices); i++) pool_put_index (am->vl_clients, dead_indices[i]); } dead_client_scan_time = vlib_time_now (vm) + 20.0; } if (TRACE_VLIB_MEMORY_QUEUE) { /* *INDENT-OFF* */ ELOG_TYPE_DECLARE (e) = { .format = "q-awake: len %d", .format_args = "i4", }; /* *INDENT-ON* */ struct { u32 len; } *ed; ed = ELOG_DATA (&vm->elog_main, e); ed->len = q->cursize; } } return 0; } static clib_error_t * vl_api_show_histogram_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { u64 total_counts = 0; int i; for (i = 0; i < SLEEP_N_BUCKETS; i++) { total_counts += vector_rate_histogram[i]; } if (total_counts == 0) { vlib_cli_output (vm, "No control-plane activity."); return 0; } #define _(n) \ do { \ f64 percent; \ percent = ((f64) vector_rate_histogram[SLEEP_##n##_US]) \ / (f64) total_counts; \ percent *= 100.0; \ vlib_cli_output (vm, "Sleep %3d us: %llu, %.2f%%",n, \ vector_rate_histogram[SLEEP_##n##_US], \ percent); \ } while (0); foreach_histogram_bucket; #undef _ return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_histogram_command, static) = { .path = "show api histogram", .short_help = "show api histogram", .function = vl_api_show_histogram_command, }; /* *INDENT-ON* */ static clib_error_t * vl_api_clear_histogram_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { int i; for (i = 0; i < SLEEP_N_BUCKETS; i++) vector_rate_histogram[i] = 0; return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_clear_api_histogram_command, static) = { .path = "clear api histogram", .short_help = "clear api histogram", .function = vl_api_clear_histogram_command, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_REGISTER_NODE (memclnt_node,static) = { .function = memclnt_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "api-rx-from-ring", .state = VLIB_NODE_STATE_DISABLED, }; /* *INDENT-ON* */ static void memclnt_queue_callback (vlib_main_t * vm) { static volatile int *cursizep; if (PREDICT_FALSE (cursizep == 0)) { api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; unix_shared_memory_queue_t *q; if (shmem_hdr == 0) return; q = shmem_hdr->vl_input_queue; if (q == 0) return; cursizep = &q->cursize; } if (*cursizep >= 1) { vm->queue_signal_pending = 1; vm->api_queue_nonempty = 1; vlib_process_signal_event (vm, memclnt_node.index, /* event_type */ 0, /* event_data */ 0); } } void vl_enable_disable_memory_api (vlib_main_t * vm, int enable) { vlib_node_set_state (vm, memclnt_node.index, (enable ? VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_DISABLED)); } static uword api_rx_from_node (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { uword n_packets = frame->n_vectors; uword n_left_from; u32 *from; static u8 *long_msg; vec_validate (long_msg, 4095); n_left_from = frame->n_vectors; from = vlib_frame_args (frame); while (n_left_from > 0) { u32 bi0; vlib_buffer_t *b0; void *msg; uword msg_len; bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); from += 1; n_left_from -= 1; msg = b0->data + b0->current_data; msg_len = b0->current_length; if (b0->flags & VLIB_BUFFER_NEXT_PRESENT) { ASSERT (long_msg != 0); _vec_len (long_msg) = 0; vec_add (long_msg, msg, msg_len); while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) { b0 = vlib_get_buffer (vm, b0->next_buffer); msg = b0->data + b0->current_data; msg_len = b0->current_length; vec_add (long_msg, msg, msg_len); } msg = long_msg; } vl_msg_api_handler_no_trace_no_free (msg); } /* Free what we've been given. */ vlib_buffer_free (vm, vlib_frame_args (frame), n_packets); return n_packets; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (api_rx_from_node_node,static) = { .function = api_rx_from_node, .type = VLIB_NODE_TYPE_INTERNAL, .vector_size = 4, .name = "api-rx-from-node", }; /* *INDENT-ON* */ static clib_error_t * setup_memclnt_exit (vlib_main_t * vm) { atexit (vl_unmap_shmem); return 0; } VLIB_INIT_FUNCTION (setup_memclnt_exit); static clib_error_t * vl_api_ring_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { int i; ring_alloc_t *ap; vl_shmem_hdr_t *shmem_hdr; api_main_t *am = &api_main; shmem_hdr = am->shmem_hdr; if (shmem_hdr == 0) { vlib_cli_output (vm, "Shared memory segment not initialized...\n"); return 0; } vlib_cli_output (vm, "%8s %8s %8s %8s %8s\n", "Owner", "Size", "Nitems", "Hits", "Misses"); ap = shmem_hdr->vl_rings; for (i = 0; i < vec_len (shmem_hdr->vl_rings); i++) { vlib_cli_output (vm, "%8s %8d %8d %8d %8d\n", "vlib", ap->size, ap->nitems, ap->hits, ap->misses); ap++; } ap = shmem_hdr->client_rings; for (i = 0; i < vec_len (shmem_hdr->client_rings); i++) { vlib_cli_output (vm, "%8s %8d %8d %8d %8d\n", "clnt", ap->size, ap->nitems, ap->hits, ap->misses); ap++; } vlib_cli_output (vm, "%d ring miss fallback allocations\n", am->ring_misses); vlib_cli_output (vm, "%d application restarts, %d reclaimed msgs, %d garbage collects\n", shmem_hdr->application_restarts, shmem_hdr->restart_reclaims, shmem_hdr->garbage_collects); return 0; } void dump_socket_clients (vlib_main_t * vm, api_main_t * am) __attribute__ ((weak)); void dump_socket_clients (vlib_main_t * vm, api_main_t * am) { } static clib_error_t * vl_api_client_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { vl_api_registration_t **regpp, *regp; unix_shared_memory_queue_t *q; char *health; api_main_t *am = &api_main; u32 *confused_indices = 0; if (!pool_elts (am->vl_clients)) goto socket_clients; vlib_cli_output (vm, "Shared memory clients"); vlib_cli_output (vm, "%16s %8s %14s %18s %s", "Name", "PID", "Queue Length", "Queue VA", "Health"); /* *INDENT-OFF* */ pool_foreach (regpp, am->vl_clients, ({ regp = *regpp; if (regp) { q = regp->vl_input_queue; if (kill (q->consumer_pid, 0) < 0) { health = "DEAD"; } else { health = "alive"; } vlib_cli_output (vm, "%16s %8d %14d 0x%016llx %s\n", regp->name, q->consumer_pid, q->cursize, q, health); } else { clib_warning ("NULL client registration index %d", regpp - am->vl_clients); vec_add1 (confused_indices, regpp - am->vl_clients); } })); /* *INDENT-ON* */ /* This should "never happen," but if it does, fix it... */ if (PREDICT_FALSE (vec_len (confused_indices) > 0)) { int i; for (i = 0; i < vec_len (confused_indices); i++) { pool_put_index (am->vl_clients, confused_indices[i]); } } vec_free (confused_indices); if (am->missing_clients) vlib_cli_output (vm, "%u messages with missing clients", am->missing_clients); socket_clients: dump_socket_clients (vm, am); return 0; } static clib_error_t * vl_api_status_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { api_main_t *am = &api_main; // check if rx_trace and tx_trace are not null pointers if (am->rx_trace == 0) { vlib_cli_output (vm, "RX Trace disabled\n"); } else { if (am->rx_trace->enabled == 0) vlib_cli_output (vm, "RX Trace disabled\n"); else vlib_cli_output (vm, "RX Trace enabled\n"); } if (am->tx_trace == 0) { vlib_cli_output (vm, "TX Trace disabled\n"); } else { if (am->tx_trace->enabled == 0) vlib_cli_output (vm, "TX Trace disabled\n"); else vlib_cli_output (vm, "TX Trace enabled\n"); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_command, static) = { .path = "show api", .short_help = "Show API information", }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_ring_command, static) = { .path = "show api ring-stats", .short_help = "Message ring statistics", .function = vl_api_ring_command, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_clients_command, static) = { .path = "show api clients", .short_help = "Client information", .function = vl_api_client_command, }; /* *INDENT-ON* */ /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_status_command, static) = { .path = "show api status", .short_help = "Show API trace status", .function = vl_api_status_command, }; /* *INDENT-ON* */ static clib_error_t * vl_api_message_table_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { api_main_t *am = &api_main; int i; int verbose = 0; if (unformat (input, "verbose")) verbose = 1; if (verbose == 0) vlib_cli_output (vm, "%-4s %s", "ID", "Name"); else vlib_cli_output (vm, "%-4s %-40s %6s %7s", "ID", "Name", "Bounce", "MP-safe"); for (i = 1; i < vec_len (am->msg_names); i++) { if (verbose == 0) { vlib_cli_output (vm, "%-4d %s", i, am->msg_names[i] ? am->msg_names[i] : " [no handler]"); } else { vlib_cli_output (vm, "%-4d %-40s %6d %7d", i, am->msg_names[i] ? am->msg_names[i] : " [no handler]", am->message_bounce[i], am->is_mp_safe[i]); } } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_message_table_command, static) = { .path = "show api message-table", .short_help = "Message Table", .function = vl_api_message_table_command, }; /* *INDENT-ON* */ static clib_error_t * vl_api_trace_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { u32 nitems = 1024; vl_api_trace_which_t which = VL_API_TRACE_RX; api_main_t *am = &api_main; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "rx nitems %u", &nitems) || unformat (input, "rx")) goto configure; else if (unformat (input, "tx nitems %u", &nitems) || unformat (input, "tx")) { which = VL_API_TRACE_RX; goto configure; } else if (unformat (input, "on rx")) { vl_msg_api_trace_onoff (am, VL_API_TRACE_RX, 1); } else if (unformat (input, "on tx")) { vl_msg_api_trace_onoff (am, VL_API_TRACE_TX, 1); } else if (unformat (input, "on")) { vl_msg_api_trace_onoff (am, VL_API_TRACE_RX, 1); } else if (unformat (input, "off")) { vl_msg_api_trace_onoff (am, VL_API_TRACE_RX, 0); vl_msg_api_trace_onoff (am, VL_API_TRACE_TX, 0); } else if (unformat (input, "free")) { vl_msg_api_trace_onoff (am, VL_API_TRACE_RX, 0); vl_msg_api_trace_onoff (am, VL_API_TRACE_TX, 0); vl_msg_api_trace_free (am, VL_API_TRACE_RX); vl_msg_api_trace_free (am, VL_API_TRACE_TX); } else if (unformat (input, "debug on")) { am->msg_print_flag = 1; } else if (unformat (input, "debug off")) { am->msg_print_flag = 0; } else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } return 0; configure: if (vl_msg_api_trace_configure (am, which, nitems)) { vlib_cli_output (vm, "warning: trace configure error (%d, %d)", which, nitems); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (trace, static) = { .path = "set api-trace", .short_help = "API trace", .function = vl_api_trace_command, }; /* *INDENT-ON* */ clib_error_t * vlibmemory_init (vlib_main_t * vm) { api_main_t *am = &api_main; svm_map_region_args_t _a, *a = &_a; memset (a, 0, sizeof (*a)); a->root_path = am->root_path; a->name = SVM_GLOBAL_REGION_NAME; a->baseva = (am->global_baseva != 0) ? am->global_baseva : SVM_GLOBAL_REGION_BASEVA; a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE; a->flags = SVM_FLAGS_NODATA; a->uid = am->api_uid; a->gid = am->api_gid; a->pvt_heap_size = (am->global_pvt_heap_size != 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE; svm_region_init_args (a); return 0; } VLIB_INIT_FUNCTION (vlibmemory_init); void vl_set_memory_region_name (const char *name) { api_main_t *am = &api_main; am->region_name = name; } static int range_compare (vl_api_msg_range_t * a0, vl_api_msg_range_t * a1) { int len0, len1, clen; len0 = vec_len (a0->name); len1 = vec_len (a1->name); clen = len0 < len1 ? len0 : len1; return (strncmp ((char *) a0->name, (char *) a1->name, clen)); } static u8 * format_api_msg_range (u8 * s, va_list * args) { vl_api_msg_range_t *rp = va_arg (*args, vl_api_msg_range_t *); if (rp == 0) s = format (s, "%-20s%9s%9s", "Name", "First-ID", "Last-ID"); else s = format (s, "%-20s%9d%9d", rp->name, rp->first_msg_id, rp->last_msg_id); return s; } static clib_error_t * vl_api_show_plugin_command (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cli_cmd) { api_main_t *am = &api_main; vl_api_msg_range_t *rp = 0; int i; if (vec_len (am->msg_ranges) == 0) { vlib_cli_output (vm, "No plugin API message ranges configured..."); return 0; } rp = vec_dup (am->msg_ranges); vec_sort_with_function (rp, range_compare); vlib_cli_output (vm, "Plugin API message ID ranges...\n"); vlib_cli_output (vm, "%U", format_api_msg_range, 0 /* header */ ); for (i = 0; i < vec_len (rp); i++) vlib_cli_output (vm, "%U", format_api_msg_range, rp + i); vec_free (rp); return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (cli_show_api_plugin_command, static) = { .path = "show api plugin", .short_help = "show api plugin", .function = vl_api_show_plugin_command, }; /* *INDENT-ON* */ static void vl_api_rpc_call_t_handler (vl_api_rpc_call_t * mp) { vl_api_rpc_call_reply_t *rmp; int (*fp) (void *); i32 rv = 0; vlib_main_t *vm = vlib_get_main (); if (mp->function == 0) { rv = -1; clib_warning ("rpc NULL function pointer"); } else { if (mp->need_barrier_sync) vlib_worker_thread_barrier_sync (vm); fp = uword_to_pointer (mp->function, int (*)(void *)); rv = fp (mp->data); if (mp->need_barrier_sync) vlib_worker_thread_barrier_release (vm); } if (mp->send_reply) { unix_shared_memory_queue_t *q = vl_api_client_index_to_input_queue (mp->client_index); if (q) { rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp)); rmp->_vl_msg_id = ntohs (VL_API_RPC_CALL_REPLY); rmp->context = mp->context; rmp->retval = rv; vl_msg_api_send_shmem (q, (u8 *) & rmp); } } if (mp->multicast) { clib_warning ("multicast not yet implemented..."); } } static void vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) { clib_warning ("unimplemented"); } void vl_api_rpc_call_main_thread (void *fp, u8 * data, u32 data_length) { vl_api_rpc_call_t *mp; api_main_t *am = &api_main; vl_shmem_hdr_t *shmem_hdr = am->shmem_hdr; unix_shared_memory_queue_t *q; /* Main thread: call the function directly */ if (vlib_get_thread_index () == 0) { vlib_main_t *vm = vlib_get_main (); void (*call_fp) (void *); vlib_worker_thread_barrier_sync (vm); call_fp = fp; call_fp (data); vlib_worker_thread_barrier_release (vm); return; } /* Any other thread, actually do an RPC call... */ mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + data_length); memset (mp, 0, sizeof (*mp)); clib_memcpy (mp->data, data, data_length); mp->_vl_msg_id = ntohs (VL_API_RPC_CALL); mp->function = pointer_to_uword (fp); mp->need_barrier_sync = 1; /* * Use the "normal" control-plane mechanism for the main thread. * Well, almost. if the main input queue is full, we cannot * block. Otherwise, we can expect a barrier sync timeout. */ q = shmem_hdr->vl_input_queue; while (pthread_mutex_trylock (&q->mutex)) vlib_worker_thread_barrier_check (); while (PREDICT_FALSE (unix_shared_memory_queue_is_full (q))) { pthread_mutex_unlock (&q->mutex); vlib_worker_thread_barrier_check (); while (pthread_mutex_trylock (&q->mutex)) vlib_worker_thread_barrier_check (); } vl_msg_api_send_shmem_nolock (q, (u8 *) & mp); pthread_mutex_unlock (&q->mutex); } static void vl_api_trace_plugin_msg_ids_t_handler (vl_api_trace_plugin_msg_ids_t * mp) { api_main_t *am = &api_main; vl_api_msg_range_t *rp; uword *p; /* Noop (except for tracing) during normal operation */ if (am->replay_in_progress == 0) return; p = hash_get_mem (am->msg_range_by_name, mp->plugin_name); if (p == 0) { clib_warning ("WARNING: traced plugin '%s' not in current image", mp->plugin_name); return; } rp = vec_elt_at_index (am->msg_ranges, p[0]); if (rp->first_msg_id != clib_net_to_host_u16 (mp->first_msg_id)) { clib_warning ("WARNING: traced plugin '%s' first message id %d not %d", mp->plugin_name, clib_net_to_host_u16 (mp->first_msg_id), rp->first_msg_id); } if (rp->last_msg_id != clib_net_to_host_u16 (mp->last_msg_id)) { clib_warning ("WARNING: traced plugin '%s' last message id %d not %d", mp->plugin_name, clib_net_to_host_u16 (mp->last_msg_id), rp->last_msg_id); } } #define foreach_rpc_api_msg \ _(RPC_CALL,rpc_call) \ _(RPC_CALL_REPLY,rpc_call_reply) #define foreach_plugin_trace_msg \ _(TRACE_PLUGIN_MSG_IDS,trace_plugin_msg_ids) static clib_error_t * rpc_api_hookup (vlib_main_t * vm) { api_main_t *am = &api_main; #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_noop_handler, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 0 /* do not trace */); foreach_rpc_api_msg; #undef _ #define _(N,n) \ vl_msg_api_set_handlers(VL_API_##N, #n, \ vl_api_##n##_t_handler, \ vl_noop_handler, \ vl_noop_handler, \ vl_api_##n##_t_print, \ sizeof(vl_api_##n##_t), 1 /* do trace */); foreach_plugin_trace_msg; #undef _ /* No reason to halt the parade to create a trace record... */ am->is_mp_safe[VL_API_TRACE_PLUGIN_MSG_IDS] = 1; return 0; } VLIB_API_INIT_FUNCTION (rpc_api_hookup); typedef enum { DUMP, CUSTOM_DUMP, REPLAY, INITIALIZERS, } vl_api_replay_t; u8 * format_vl_msg_api_trace_status (u8 * s, va_list * args) { api_main_t *am = va_arg (*args, api_main_t *); vl_api_trace_which_t which = va_arg (*args, vl_api_trace_which_t); vl_api_trace_t *tp; char *trace_name; switch (which) { case VL_API_TRACE_TX: tp = am->tx_trace; trace_name = "TX trace"; break; case VL_API_TRACE_RX: tp = am->rx_trace; trace_name = "RX trace"; break; default: abort (); } if (tp == 0) { s = format (s, "%s: not yet configured.\n", trace_name); return s; } s = format (s, "%s: used %d of %d items, %s enabled, %s wrapped\n", trace_name, vec_len (tp->traces), tp->nitems, tp->enabled ? "is" : "is not", tp->wrapped ? "has" : "has not"); return s; } void vl_msg_api_custom_dump_configure (api_main_t * am) __attribute__ ((weak)); void vl_msg_api_custom_dump_configure (api_main_t * am) { } static void vl_msg_api_process_file (vlib_main_t * vm, u8 * filename, u32 first_index, u32 last_index, vl_api_replay_t which) { vl_api_trace_file_header_t *hp; int i, fd; struct stat statb; size_t file_size; u8 *msg; u8 endian_swap_needed = 0; api_main_t *am = &api_main; u8 *tmpbuf = 0; u32 nitems; void **saved_print_handlers = 0; fd = open ((char *) filename, O_RDONLY); if (fd < 0) { vlib_cli_output (vm, "Couldn't open %s\n", filename); return; } if (fstat (fd, &statb) < 0) { vlib_cli_output (vm, "Couldn't stat %s\n", filename); close (fd); return; } if (!(statb.st_mode & S_IFREG) || (statb.st_size < sizeof (*hp))) { vlib_cli_output (vm, "File not plausible: %s\n", filename); close (fd); return; } file_size = statb.st_size; file_size = (file_size + 4095) & ~(4096); hp = mmap (0, file_size, PROT_READ, MAP_PRIVATE, fd, 0); if (hp == (vl_api_trace_file_header_t *) MAP_FAILED) { vlib_cli_output (vm, "mmap failed: %s\n", filename); close (fd); return; } close (fd); if ((clib_arch_is_little_endian && hp->endian == VL_API_BIG_ENDIAN) || (clib_arch_is_big_endian && hp->endian == VL_API_LITTLE_ENDIAN)) endian_swap_needed = 1; if (endian_swap_needed) nitems = ntohl (hp->nitems); else nitems = hp->nitems; if (last_index == (u32) ~ 0) { last_index = nitems - 1; } if (first_index >= nitems || last_index >= nitems) { vlib_cli_output (vm, "Range (%d, %d) outside file range (0, %d)\n", first_index, last_index, nitems - 1); munmap (hp, file_size); return; } if (hp->wrapped) vlib_cli_output (vm, "Note: wrapped/incomplete trace, results may vary\n"); if (which == CUSTOM_DUMP) { saved_print_handlers = (void **) vec_dup (am->msg_print_handlers); vl_msg_api_custom_dump_configure (am); } msg = (u8 *) (hp + 1); for (i = 0; i < first_index; i++) { trace_cfg_t *cfgp; int size; u16 msg_id; size = clib_host_to_net_u32 (*(u32 *) msg); msg += sizeof (u32); if (clib_arch_is_little_endian) msg_id = ntohs (*((u16 *) msg)); else msg_id = *((u16 *) msg); cfgp = am->api_trace_cfg + msg_id; if (!cfgp) { vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id); munmap (hp, file_size); return; } msg += size; } if (which == REPLAY) am->replay_in_progress = 1; for (; i <= last_index; i++) { trace_cfg_t *cfgp; u16 *msg_idp; u16 msg_id; int size; if (which == DUMP) vlib_cli_output (vm, "---------- trace %d -----------\n", i); size = clib_host_to_net_u32 (*(u32 *) msg); msg += sizeof (u32); if (clib_arch_is_little_endian) msg_id = ntohs (*((u16 *) msg)); else msg_id = *((u16 *) msg); cfgp = am->api_trace_cfg + msg_id; if (!cfgp) { vlib_cli_output (vm, "Ugh: msg id %d no trace config\n", msg_id); munmap (hp, file_size); vec_free (tmpbuf); am->replay_in_progress = 0; return; } /* Copy the buffer (from the read-only mmap'ed file) */ vec_validate (tmpbuf, size - 1 + sizeof (uword)); clib_memcpy (tmpbuf + sizeof (uword), msg, size); memset (tmpbuf, 0xf, sizeof (uword)); /* * Endian swap if needed. All msg data is supposed to be * in network byte order. All msg handlers are supposed to * know that. The generic message dumpers don't know that. * One could fix apigen, I suppose. */ if ((which == DUMP && clib_arch_is_little_endian) || endian_swap_needed) { void (*endian_fp) (void *); if (msg_id >= vec_len (am->msg_endian_handlers) || (am->msg_endian_handlers[msg_id] == 0)) { vlib_cli_output (vm, "Ugh: msg id %d no endian swap\n", msg_id); munmap (hp, file_size); vec_free (tmpbuf); am->replay_in_progress = 0; return; } endian_fp = am->msg_endian_handlers[msg_id]; (*endian_fp) (tmpbuf + sizeof (uword)); } /* msg_id always in network byte order */ if (clib_arch_is_little_endian) { msg_idp = (u16 *) (tmpbuf + sizeof (uword)); *msg_idp = msg_id; } switch (which) { case CUSTOM_DUMP: case DUMP: if (msg_id < vec_len (am->msg_print_handlers) && am->msg_print_handlers[msg_id]) { u8 *(*print_fp) (void *, void *); print_fp = (void *) am->msg_print_handlers[msg_id]; (*print_fp) (tmpbuf + sizeof (uword), vm); } else { vlib_cli_output (vm, "Skipping msg id %d: no print fcn\n", msg_id); break; } break; case INITIALIZERS: if (msg_id < vec_len (am->msg_print_handlers) && am->msg_print_handlers[msg_id]) { u8 *s; int j; u8 *(*print_fp) (void *, void *); print_fp = (void *) am->msg_print_handlers[msg_id]; vlib_cli_output (vm, "/*"); (*print_fp) (tmpbuf + sizeof (uword), vm); vlib_cli_output (vm, "*/\n"); s = format (0, "static u8 * vl_api_%s_%d[%d] = {", am->msg_names[msg_id], i, am->api_trace_cfg[msg_id].size); for (j = 0; j < am->api_trace_cfg[msg_id].size; j++) { if ((j & 7) == 0) s = format (s, "\n "); s = format (s, "0x%02x,", tmpbuf[sizeof (uword) + j]); } s = format (s, "\n};\n%c", 0); vlib_cli_output (vm, (char *) s); vec_free (s); } break; case REPLAY: if (msg_id < vec_len (am->msg_print_handlers) && am->msg_print_handlers[msg_id] && cfgp->replay_enable) { void (*handler) (void *); handler = (void *) am->msg_handlers[msg_id]; if (!am->is_mp_safe[msg_id]) vl_msg_api_barrier_sync (); (*handler) (tmpbuf + sizeof (uword)); if (!am->is_mp_safe[msg_id]) vl_msg_api_barrier_release (); } else { if (cfgp->replay_enable) vlib_cli_output (vm, "Skipping msg id %d: no handler\n", msg_id); break; } break; } _vec_len (tmpbuf) = 0; msg += size; } if (saved_print_handlers) { clib_memcpy (am->msg_print_handlers, saved_print_handlers, vec_len (am->msg_print_handlers) * sizeof (void *)); vec_free (saved_print_handlers); } munmap (hp, file_size); vec_free (tmpbuf); am->replay_in_progress = 0; } static clib_error_t * api_trace_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { u32 nitems = 256 << 10; api_main_t *am = &api_main; vl_api_trace_which_t which = VL_API_TRACE_RX; u8 *filename; u32 first = 0; u32 last = (u32) ~ 0; FILE *fp; int rv; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "on") || unformat (input, "enable")) { if (unformat (input, "nitems %d", &nitems)) ; vl_msg_api_trace_configure (am, which, nitems); vl_msg_api_trace_onoff (am, which, 1 /* on */ ); } else if (unformat (input, "off")) { vl_msg_api_trace_onoff (am, which, 0); } else if (unformat (input, "save %s", &filename)) { u8 *chroot_filename; if (strstr ((char *) filename, "..") || index ((char *) filename, '/')) { vlib_cli_output (vm, "illegal characters in filename '%s'", filename); return 0; } chroot_filename = format (0, "/tmp/%s%c", filename, 0); vec_free (filename); fp = fopen ((char *) chroot_filename, "w"); if (fp == NULL) { vlib_cli_output (vm, "Couldn't create %s\n", chroot_filename); return 0; } rv = vl_msg_api_trace_save (am, which, fp); fclose (fp); if (rv == -1) vlib_cli_output (vm, "API Trace data not present\n"); else if (rv == -2) vlib_cli_output (vm, "File for writing is closed\n"); else if (rv == -10) vlib_cli_output (vm, "Error while writing header to file\n"); else if (rv == -11) vlib_cli_output (vm, "Error while writing trace to file\n"); else if (rv == -12) vlib_cli_output (vm, "Error while writing end of buffer trace to file\n"); else if (rv == -13) vlib_cli_output (vm, "Error while writing start of buffer trace to file\n"); else if (rv < 0) vlib_cli_output (vm, "Unkown error while saving: %d", rv); else vlib_cli_output (vm, "API trace saved to %s\n", chroot_filename); vec_free (chroot_filename); } else if (unformat (input, "dump %s", &filename)) { vl_msg_api_process_file (vm, filename, first, last, DUMP); } else if (unformat (input, "custom-dump %s", &filename)) { vl_msg_api_process_file (vm, filename, first, last, CUSTOM_DUMP); } else if (unformat (input, "replay %s", &filename)) { vl_msg_api_process_file (vm, filename, first, last, REPLAY); } else if (unformat (input, "initializers %s", &filename)) { vl_msg_api_process_file (vm, filename, first, last, INITIALIZERS); } else if (unformat (input, "tx")) { which = VL_API_TRACE_TX; } else if (unformat (input, "first %d", &first)) { ; } else if (unformat (input, "last %d", &last)) { ; } else if (unformat (input, "status")) { vlib_cli_output (vm, "%U", format_vl_msg_api_trace_status, am, which); } else if (unformat (input, "free")) { vl_msg_api_trace_onoff (am, which, 0); vl_msg_api_trace_free (am, which); } else if (unformat (input, "post-mortem-on")) vl_msg_api_post_mortem_dump_enable_disable (1 /* enable */ ); else if (unformat (input, "post-mortem-off")) vl_msg_api_post_mortem_dump_enable_disable (0 /* enable */ ); else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (api_trace_command, static) = { .path = "api trace", .short_help = "api trace [on|off][dump|save|replay <file>][status][free][post-mortem-on]", .function = api_trace_command_fn, }; /* *INDENT-ON* */ static clib_error_t * api_config_fn (vlib_main_t * vm, unformat_input_t * input) { u32 nitems = 256 << 10; vl_api_trace_which_t which = VL_API_TRACE_RX; api_main_t *am = &api_main; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "on") || unformat (input, "enable")) { if (unformat (input, "nitems %d", &nitems)) ; vl_msg_api_trace_configure (am, which, nitems); vl_msg_api_trace_onoff (am, which, 1 /* on */ ); vl_msg_api_post_mortem_dump_enable_disable (1 /* enable */ ); } else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } return 0; } VLIB_CONFIG_FUNCTION (api_config_fn, "api-trace"); static clib_error_t * api_queue_config_fn (vlib_main_t * vm, unformat_input_t * input) { api_main_t *am = &api_main; u32 nitems; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "length %d", &nitems) || (unformat (input, "len %d", &nitems))) { if (nitems >= 1024) am->vlib_input_queue_length = nitems; else clib_warning ("vlib input queue length %d too small, ignored", nitems); } else return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } return 0; } VLIB_CONFIG_FUNCTION (api_queue_config_fn, "api-queue"); /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */