diff options
author | Klement Sekera <ksekera@cisco.com> | 2016-09-29 14:43:44 +0200 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2016-12-07 19:54:12 +0000 |
commit | 0e3c0de1ed87f3cdf16e26e05e39ea6eebeafb18 (patch) | |
tree | dd5f11c08311a95ce56eb8c33d4e8681940bf877 | |
parent | d171d48edc1672564db8bb920586b8ea220df14c (diff) |
BFD: basic asynchronous session up/down
This is a work-in-progress basic BFD session handling. Only
asynchronous mode is supported at the moment. Setting the session flags
doesn't work.
Change-Id: Idba27f721b5c35be5a66a6d202a63d23ff7ecf6f
Signed-off-by: Klement Sekera <ksekera@cisco.com>
30 files changed, 3504 insertions, 201 deletions
@@ -214,6 +214,8 @@ build-vpp-api: $(BR)/.bootstrap.ok VPP_PYTHON_PREFIX=$(BR)/python + + #$(if $(filter-out $(3),retest),make -C $(BR) PLATFORM=$(1) TAG=$(2) vpp-install ,) define test $(if $(filter-out $(3),retest),make -C $(BR) PLATFORM=$(1) TAG=$(2) vpp-api-install plugins-install vpp-install,) make -C test \ diff --git a/test/bfd.py b/test/bfd.py new file mode 100644 index 00000000000..beacd80f687 --- /dev/null +++ b/test/bfd.py @@ -0,0 +1,216 @@ +from socket import AF_INET, AF_INET6 +from scapy.all import * +from scapy.packet import * +from scapy.fields import * +from framework import * +from vpp_object import * +from util import NumericConstant + + +class BFDDiagCode(NumericConstant): + """ BFD Diagnostic Code """ + no_diagnostic = 0 + control_detection_time_expired = 1 + echo_function_failed = 2 + neighbor_signaled_session_down = 3 + forwarding_plane_reset = 4 + path_down = 5 + concatenated_path_down = 6 + administratively_down = 7 + reverse_concatenated_path_down = 8 + + desc_dict = { + no_diagnostic: "No diagnostic", + control_detection_time_expired: "Control Detection Time Expired", + echo_function_failed: "Echo Function Failed", + neighbor_signaled_session_down: "Neighbor Signaled Session Down", + forwarding_plane_reset: "Forwarding Plane Reset", + path_down: "Path Down", + concatenated_path_down: "Concatenated Path Down", + administratively_down: "Administratively Down", + reverse_concatenated_path_down: "Reverse Concatenated Path Down", + } + + def __init__(self, value): + NumericConstant.__init__(self, value) + + +class BFDState(NumericConstant): + """ BFD State """ + admin_down = 0 + down = 1 + init = 2 + up = 3 + + desc_dict = { + admin_down: "AdminDown", + down: "Down", + init: "Init", + up: "Up", + } + + def __init__(self, value): + NumericConstant.__init__(self, value) + + +class BFD(Packet): + + udp_dport = 3784 #: BFD destination port per RFC 5881 + udp_sport_min = 49152 #: BFD source port min value per RFC 5881 + udp_sport_max = 65535 #: BFD source port max value per RFC 5881 + + name = "BFD" + + fields_desc = [ + BitField("version", 1, 3), + BitEnumField("diag", 0, 5, BFDDiagCode.desc_dict), + BitEnumField("state", 0, 2, BFDState.desc_dict), + FlagsField("flags", 0, 6, ['P', 'F', 'C', 'A', 'D', 'M']), + XByteField("detect_mult", 0), + XByteField("length", 24), + BitField("my_discriminator", 0, 32), + BitField("your_discriminator", 0, 32), + BitField("desired_min_tx_interval", 0, 32), + BitField("required_min_rx_interval", 0, 32), + BitField("required_min_echo_rx_interval", 0, 32)] + + def mysummary(self): + return self.sprintf("BFD(my_disc=%BFD.my_discriminator%," + "your_disc=%BFD.your_discriminator%)") + +# glue the BFD packet class to scapy parser +bind_layers(UDP, BFD, dport=BFD.udp_dport) + + +class VppBFDUDPSession(VppObject): + """ Represents BFD UDP session in VPP """ + + @property + def test(self): + """ Test which created this session """ + return self._test + + @property + def interface(self): + """ Interface on which this session lives """ + return self._interface + + @property + def af(self): + """ Address family - AF_INET or AF_INET6 """ + return self._af + + @property + def bs_index(self): + """ BFD session index from VPP """ + if self._bs_index is not None: + return self._bs_index + raise NotConfiguredException("not configured") + + @property + def local_addr(self): + """ BFD session local address (VPP address) """ + if self._local_addr is None: + return self._interface.local_ip4 + return self._local_addr + + @property + def local_addr_n(self): + """ BFD session local address (VPP address) - raw, suitable for API """ + if self._local_addr is None: + return self._interface.local_ip4n + return self._local_addr_n + + @property + def peer_addr(self): + """ BFD session peer address """ + return self._peer_addr + + @property + def peer_addr_n(self): + """ BFD session peer address - raw, suitable for API """ + return self._peer_addr_n + + @property + def state(self): + """ BFD session state """ + result = self.test.vapi.bfd_udp_session_dump() + session = None + for s in result: + if s.sw_if_index == self.interface.sw_if_index: + if self.af == AF_INET \ + and s.is_ipv6 == 0 \ + and self.interface.local_ip4n == s.local_addr[:4] \ + and self.interface.remote_ip4n == s.peer_addr[:4]: + session = s + break + if session is None: + raise Exception( + "Could not find BFD session in VPP response: %s" % repr(result)) + return session.state + + @property + def desired_min_tx(self): + return self._desired_min_tx + + @property + def required_min_rx(self): + return self._required_min_rx + + @property + def detect_mult(self): + return self._detect_mult + + def __init__(self, test, interface, peer_addr, local_addr=None, af=AF_INET): + self._test = test + self._interface = interface + self._af = af + self._local_addr = local_addr + self._peer_addr = peer_addr + self._peer_addr_n = socket.inet_pton(af, peer_addr) + self._bs_index = None + self._desired_min_tx = 200000 # 0.2s + self._required_min_rx = 200000 # 0.2s + self._detect_mult = 3 # 3 packets need to be missed + + def add_vpp_config(self): + is_ipv6 = 1 if AF_INET6 == self.af else 0 + result = self.test.vapi.bfd_udp_add( + self._interface.sw_if_index, + self.desired_min_tx, + self.required_min_rx, + self.detect_mult, + self.local_addr_n, + self.peer_addr_n, + is_ipv6=is_ipv6) + self._bs_index = result.bs_index + + def query_vpp_config(self): + result = self.test.vapi.bfd_udp_session_dump() + session = None + for s in result: + if s.sw_if_index == self.interface.sw_if_index: + if self.af == AF_INET \ + and s.is_ipv6 == 0 \ + and self.interface.local_ip4n == s.local_addr[:4] \ + and self.interface.remote_ip4n == s.peer_addr[:4]: + session = s + break + if session is None: + return False + return True + + def remove_vpp_config(self): + if hasattr(self, '_bs_index'): + is_ipv6 = 1 if AF_INET6 == self._af else 0 + self.test.vapi.bfd_udp_del( + self._interface.sw_if_index, + self.local_addr_n, + self.peer_addr_n, + is_ipv6=is_ipv6) + + def object_id(self): + return "bfd-udp-%d" % self.bs_index + + def admin_up(self): + self.test.vapi.bfd_session_set_flags(self.bs_index, 1) diff --git a/test/framework.py b/test/framework.py index b3cbb08a4b5..aa4f2fdf8fc 100644 --- a/test/framework.py +++ b/test/framework.py @@ -462,6 +462,34 @@ class VppTestCase(unittest.TestCase): if info.dst == dst_index: return info + def assert_equal(self, real_value, expected_value, name_or_class=None): + if name_or_class is None: + self.assertEqual(real_value, expected_value, msg) + return + try: + msg = "Invalid %s: %d('%s') does not match expected value %d('%s')" + msg = msg % (getdoc(name_or_class).strip(), + real_value, str(name_or_class(real_value)), + expected_value, str(name_or_class(expected_value))) + except: + msg = "Invalid %s: %s does not match expected value %s" % ( + name_or_class, real_value, expected_value) + + self.assertEqual(real_value, expected_value, msg) + + def assert_in_range( + self, + real_value, + expected_min, + expected_max, + name=None): + if name is None: + msg = None + else: + msg = "Invalid %s: %s out of range <%s,%s>" % ( + name, real_value, expected_min, expected_max) + self.assertTrue(expected_min <= real_value <= expected_max, msg) + class VppTestResult(unittest.TestResult): """ diff --git a/test/hook.py b/test/hook.py index 90e9bbf56b0..f3e5f8808bd 100644 --- a/test/hook.py +++ b/test/hook.py @@ -100,9 +100,13 @@ class PollHook(Hook): signaldict = dict( (k, v) for v, k in reversed(sorted(signal.__dict__.items())) if v.startswith('SIG') and not v.startswith('SIG_')) + + if self.testcase.vpp.returncode in signaldict: + s = signaldict[abs(self.testcase.vpp.returncode)] + else: + s = "unknown" msg = "VPP subprocess died unexpectedly with returncode %d [%s]" % ( - self.testcase.vpp.returncode, - signaldict[abs(self.testcase.vpp.returncode)]) + self.testcase.vpp.returncode, s) self.logger.critical(msg) core_path = self.testcase.tempdir + '/core' if os.path.isfile(core_path): @@ -110,27 +114,27 @@ class PollHook(Hook): self.testcase.vpp_dead = True raise VppDiedError(msg) - def after_api(self, api_name, api_args): + def before_api(self, api_name, api_args): """ - Check if VPP died after executing an API + Check if VPP died before executing an API :param api_name: name of the API :param api_args: tuple containing the API arguments :raises VppDiedError: exception if VPP is not running anymore """ - super(PollHook, self).after_api(api_name, api_args) + super(PollHook, self).before_api(api_name, api_args) self.poll_vpp() - def after_cli(self, cli): + def before_cli(self, cli): """ - Check if VPP died after executing a CLI + Check if VPP died before executing a CLI :param cli: CLI string :raises Exception: exception if VPP is not running anymore """ - super(PollHook, self).after_cli(cli) + super(PollHook, self).before_cli(cli) self.poll_vpp() diff --git a/test/template_bd.py b/test/template_bd.py index 6c6fb3da200..01e8b855004 100644 --- a/test/template_bd.py +++ b/test/template_bd.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -from abc import abstractmethod +from abc import abstractmethod, ABCMeta from scapy.layers.l2 import Ether, Raw from scapy.layers.inet import IP, UDP @@ -8,6 +8,7 @@ from scapy.layers.inet import IP, UDP class BridgeDomain(object): """ Bridge domain abstraction """ + __metaclass__ = ABCMeta @property def frame_pg0_to_pg1(self): diff --git a/test/test_bfd.py b/test/test_bfd.py new file mode 100644 index 00000000000..20d3aea5e0c --- /dev/null +++ b/test/test_bfd.py @@ -0,0 +1,273 @@ +#!/usr/bin/env python + +import unittest +import time +from random import randint +from bfd import * +from framework import * +from util import ppp + + +class BFDCLITestCase(VppTestCase): + """Bidirectional Forwarding Detection (BFD) - CLI""" + + @classmethod + def setUpClass(cls): + super(BFDCLITestCase, cls).setUpClass() + + try: + cls.create_pg_interfaces([0]) + cls.pg0.config_ip4() + cls.pg0.resolve_arp() + + except Exception: + super(BFDCLITestCase, cls).tearDownClass() + raise + + def test_add_bfd(self): + """ create a BFD session """ + session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4) + session.add_vpp_config() + self.logger.debug("Session state is %s" % str(session.state)) + session.remove_vpp_config() + session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4) + session.add_vpp_config() + self.logger.debug("Session state is %s" % str(session.state)) + session.remove_vpp_config() + + def test_double_add(self): + """ create the same BFD session twice (negative case) """ + session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4) + session.add_vpp_config() + try: + session.add_vpp_config() + except: + session.remove_vpp_config() + return + session.remove_vpp_config() + raise Exception("Expected failure while adding duplicate " + "configuration") + + +def create_packet(interface, ttl=255, src_port=50000, **kwargs): + p = (Ether(src=interface.remote_mac, dst=interface.local_mac) / + IP(src=interface.remote_ip4, dst=interface.local_ip4, ttl=ttl) / + UDP(sport=src_port, dport=BFD.udp_dport) / + BFD(*kwargs)) + return p + + +def verify_ip(test, packet, local_ip, remote_ip): + """ Verify correctness of IP layer. """ + ip = packet[IP] + test.assert_equal(ip.src, local_ip, "IP source address") + test.assert_equal(ip.dst, remote_ip, "IP destination address") + test.assert_equal(ip.ttl, 255, "IP TTL") + + +def verify_udp(test, packet): + """ Verify correctness of UDP layer. """ + udp = packet[UDP] + test.assert_equal(udp.dport, BFD.udp_dport, "UDP destination port") + test.assert_in_range(udp.sport, BFD.udp_sport_min, BFD.udp_sport_max, + "UDP source port") + + +class BFDTestSession(object): + + def __init__(self, test, interface, detect_mult=3): + self.test = test + self.interface = interface + self.bfd_values = { + 'my_discriminator': 0, + 'desired_min_tx_interval': 500000, + 'detect_mult': detect_mult, + 'diag': BFDDiagCode.no_diagnostic, + } + + def update(self, **kwargs): + self.bfd_values.update(kwargs) + + def create_packet(self): + packet = create_packet(self.interface) + for name, value in self.bfd_values.iteritems(): + packet[BFD].setfieldval(name, value) + return packet + + def send_packet(self): + p = self.create_packet() + self.test.logger.debug(ppp("Sending packet:", p)) + self.test.pg0.add_stream([p]) + self.test.pg_start() + + def verify_packet(self, packet): + """ Verify correctness of BFD layer. """ + bfd = packet[BFD] + self.test.assert_equal(bfd.version, 1, "BFD version") + self.test.assert_equal(bfd.your_discriminator, + self.bfd_values['my_discriminator'], + "BFD - your discriminator") + + +class BFDTestCase(VppTestCase): + """Bidirectional Forwarding Detection (BFD)""" + + @classmethod + def setUpClass(cls): + super(BFDTestCase, cls).setUpClass() + try: + cls.create_pg_interfaces([0, 1]) + cls.pg0.config_ip4() + cls.pg0.generate_remote_hosts() + cls.pg0.configure_ipv4_neighbors() + cls.pg0.admin_up() + cls.pg0.resolve_arp() + + except Exception: + super(BFDTestCase, cls).tearDownClass() + raise + + def setUp(self): + self.vapi.want_bfd_events() + self.vpp_session = VppBFDUDPSession(self, self.pg0, self.pg0.remote_ip4) + self.vpp_session.add_vpp_config() + self.vpp_session.admin_up() + self.test_session = BFDTestSession(self, self.pg0) + + def tearDown(self): + self.vapi.want_bfd_events(enable_disable=0) + if not self.vpp_dead: + self.vpp_session.remove_vpp_config() + super(BFDTestCase, self).tearDown() + + def verify_event(self, event, expected_state): + """ Verify correctness of event values. """ + e = event + self.logger.debug("Event: %s" % repr(e)) + self.assert_equal(e.bs_index, self.vpp_session.bs_index, + "BFD session index") + self.assert_equal(e.sw_if_index, self.vpp_session.interface.sw_if_index, + "BFD interface index") + is_ipv6 = 0 + if self.vpp_session.af == AF_INET6: + is_ipv6 = 1 + self.assert_equal(e.is_ipv6, is_ipv6, "is_ipv6") + if self.vpp_session.af == AF_INET: + self.assert_equal(e.local_addr[:4], self.vpp_session.local_addr_n, + "Local IPv4 address") + self.assert_equal(e.peer_addr[:4], self.vpp_session.peer_addr_n, + "Peer IPv4 address") + else: + self.assert_equal(e.local_addr, self.vpp_session.local_addr_n, + "Local IPv6 address") + self.assert_equal(e.peer_addr, self.vpp_session.peer_addr_n, + "Peer IPv6 address") + self.assert_equal(e.state, expected_state, BFDState) + + def wait_for_bfd_packet(self, timeout=1): + p = self.pg0.wait_for_packet(timeout=timeout) + bfd = p[BFD] + if bfd is None: + raise Exception(ppp("Unexpected or invalid BFD packet:", p)) + if bfd.payload: + raise Exception(ppp("Unexpected payload in BFD packet:", bfd)) + verify_ip(self, p, self.pg0.local_ip4, self.pg0.remote_ip4) + verify_udp(self, p) + self.test_session.verify_packet(p) + return p + + def test_slow_timer(self): + """ Slow timer """ + + self.pg_enable_capture([self.pg0]) + expected_packets = 10 + self.logger.info("Waiting for %d BFD packets" % expected_packets) + self.wait_for_bfd_packet() + for i in range(expected_packets): + before = time.time() + self.wait_for_bfd_packet() + after = time.time() + self.assert_in_range( + after - before, 0.75, 1, "time between slow packets") + before = after + + def test_zero_remote_min_rx(self): + """ Zero RemoteMinRxInterval """ + self.pg_enable_capture([self.pg0]) + p = self.wait_for_bfd_packet() + self.test_session.update(my_discriminator=randint(0, 40000000), + your_discriminator=p[BFD].my_discriminator, + state=BFDState.init, + required_min_rx_interval=0) + self.test_session.send_packet() + e = self.vapi.wait_for_event(1, "bfd_udp_session_details") + self.verify_event(e, expected_state=BFDState.up) + + try: + p = self.pg0.wait_for_packet(timeout=1) + except: + return + raise Exception(ppp("Received unexpected BFD packet:", p)) + + def bfd_conn_up(self): + self.pg_enable_capture([self.pg0]) + self.logger.info("Waiting for slow hello") + p = self.wait_for_bfd_packet() + self.logger.info("Sending Init") + self.test_session.update(my_discriminator=randint(0, 40000000), + your_discriminator=p[BFD].my_discriminator, + state=BFDState.init, + required_min_rx_interval=500000) + self.test_session.send_packet() + self.logger.info("Waiting for event") + e = self.vapi.wait_for_event(1, "bfd_udp_session_details") + self.verify_event(e, expected_state=BFDState.up) + self.logger.info("Session is Up") + self.test_session.update(state=BFDState.up) + + def test_conn_up(self): + """ Basic connection up """ + self.bfd_conn_up() + + def test_hold_up(self): + """ Hold BFD up """ + self.bfd_conn_up() + for i in range(5): + self.wait_for_bfd_packet() + self.test_session.send_packet() + + def test_conn_down(self): + """ Session down after inactivity """ + self.bfd_conn_up() + self.wait_for_bfd_packet() + self.assert_equal( + 0, len(self.vapi.collect_events()), + "number of bfd events") + self.wait_for_bfd_packet() + self.assert_equal( + 0, len(self.vapi.collect_events()), + "number of bfd events") + e = self.vapi.wait_for_event(1, "bfd_udp_session_details") + self.verify_event(e, expected_state=BFDState.down) + + @unittest.skip("this test is not working yet") + def test_large_required_min_rx(self): + self.bfd_conn_up() + interval = 5000000 + self.test_session.update(required_min_rx_interval=interval) + self.test_session.send_packet() + now = time.time() + count = 1 + while time.time() < now + interval / 1000000: + try: + p = self.wait_for_bfd_packet() + if count > 1: + self.logger.error(ppp("Received unexpected packet:", p)) + count += 1 + except: + pass + self.assert_equal(count, 1, "number of packets received") + + +if __name__ == '__main__': + unittest.main(testRunner=VppTestRunner) diff --git a/test/util.py b/test/util.py index 643377f5740..f6c6acd41e9 100644 --- a/test/util.py +++ b/test/util.py @@ -1,5 +1,6 @@ import socket import sys +from abc import abstractmethod, ABCMeta from cStringIO import StringIO @@ -14,6 +15,27 @@ def ppp(headline, packet): return o.getvalue() +class NumericConstant(object): + __metaclass__ = ABCMeta + + desc_dict = {} + + @abstractmethod + def __init__(self, value): + self._value = value + + def __int__(self): + return self._value + + def __long__(self): + return self._value + + def __str__(self): + if self._value in self.desc_dict: + return self.desc_dict[self._value] + return "" + + class Host(object): """ Generic test host "connected" to VPPs interface. """ diff --git a/test/vpp_object.py b/test/vpp_object.py new file mode 100644 index 00000000000..2b71fc1fd39 --- /dev/null +++ b/test/vpp_object.py @@ -0,0 +1,79 @@ +from abc import ABCMeta, abstractmethod + + +class VppObject(object): + """ Abstract vpp object """ + __metaclass__ = ABCMeta + + def __init__(self): + VppObjectRegistry().register(self) + + @abstractmethod + def add_vpp_config(self): + """ Add the configuration for this object to vpp. """ + pass + + @abstractmethod + def query_vpp_config(self): + """Query the vpp configuration. + + :return: True if the object is configured""" + pass + + @abstractmethod + def remove_vpp_config(self): + """ Remove the configuration for this object from vpp. """ + pass + + @abstractmethod + def object_id(self): + """ Return a unique string representing this object. """ + pass + + +class VppObjectRegistry(object): + """ Class which handles automatic configuration cleanup. """ + _shared_state = {} + + def __init__(self): + self.__dict__ = self._shared_state + if not hasattr(self, "_object_registry"): + self._object_registry = [] + if not hasattr(self, "_object_dict"): + self._object_dict = dict() + + def register(self, o): + """ Register an object in the registry. """ + if not o.unique_id() in self._object_dict: + self._object_registry.append(o) + self._object_dict[o.unique_id()] = o + else: + print "not adding duplicate %s" % o + + def remove_vpp_config(self, logger): + """ + Remove configuration (if present) from vpp and then remove all objects + from the registry. + """ + if not self._object_registry: + logger.info("No objects registered for auto-cleanup.") + return + logger.info("Removing VPP configuration for registered objects") + for o in reversed(self._object_registry): + if o.query_vpp_config(): + logger.info("Removing %s", o) + o.remove_vpp_config() + else: + logger.info("Skipping %s, configuration not present", o) + failed = [] + for o in self._object_registry: + if o.query_vpp_config(): + failed.append(o) + self._object_registry = [] + self._object_dict = dict() + if failed: + logger.error("Couldn't remove configuration for object(s):") + for x in failed: + logger.error(repr(x)) + raise Exception("Couldn't remove configuration for object(s): %s" % + (", ".join(str(x) for x in failed))) diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 2f931803e14..5e80a03e2c5 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -1,7 +1,12 @@ -import os, fnmatch -import array +import os +import fnmatch +import time from hook import Hook +# Sphinx creates auto-generated documentation by importing the python source +# files and collecting the docstrings from them. The NO_VPP_PAPI flag allows the +# vpp_papi_provider.py file to be importable without having to build the whole +# vpp api if the user only wishes to generate the test documentation. do_import = True try: no_vpp_papi = os.getenv("NO_VPP_PAPI") @@ -17,9 +22,11 @@ if do_import: MPLS_IETF_MAX_LABEL = 0xfffff MPLS_LABEL_INVALID = MPLS_IETF_MAX_LABEL + 1 + class L2_VTR_OP: L2_POP_1 = 3 + class VppPapiProvider(object): """VPP-api provider using vpp-papi @@ -35,12 +42,13 @@ class VppPapiProvider(object): self.test_class = test_class jsonfiles = [] - install_dir=os.getenv('VPP_TEST_INSTALL_PATH') + install_dir = os.getenv('VPP_TEST_INSTALL_PATH') for root, dirnames, filenames in os.walk(install_dir): for filename in fnmatch.filter(filenames, '*.api.json'): jsonfiles.append(os.path.join(root, filename)) self.papi = VPP(jsonfiles) + self._events = list() def register_hook(self, hook): """Replace hook registration with new hook @@ -50,9 +58,35 @@ class VppPapiProvider(object): """ self.hook = hook + def collect_events(self): + e = self._events + self._events = list() + return e + + def wait_for_event(self, timeout, name=None): + limit = time.time() + timeout + while time.time() < limit: + if self._events: + e = self._events.pop(0) + if name and type(e).__name__ != name: + raise Exception( + "Unexpected event received: %s, expected: %s" % + (type(e).__name__, name)) + return e + time.sleep(0) # yield + if name is not None: + raise Exception("Event %s did not occur within timeout" % name) + raise Exception("Event did not occur within timeout") + + def __call__(self, name, event): + # FIXME use the name instead of relying on type(e).__name__ ? + # FIXME #2 if this throws, it is eaten silently, Ole? + self._events.append(event) + def connect(self): """Connect the API to VPP""" self.papi.connect(self.name, self.shm_prefix) + self.papi.register_event_callback(self) def disconnect(self): """Disconnect the API from VPP""" @@ -73,7 +107,7 @@ class VppPapiProvider(object): if hasattr(reply, 'retval') and reply.retval != expected_retval: msg = "API call failed, expected retval == %d, got %s" % ( expected_retval, repr(reply)) - self.test_class.test_instance.logger.error(msg) + self.test_class.logger.error(msg) raise Exception(msg) self.hook.after_api(api_fn.__name__, api_args) return reply @@ -116,7 +150,7 @@ class VppPapiProvider(object): """ return self.api(self.papi.pg_create_interface, - { "interface_id" : pg_index }) + {"interface_id": pg_index}) def sw_interface_dump(self, filter=None): """ @@ -125,7 +159,7 @@ class VppPapiProvider(object): """ if filter is not None: - args = {"name_filter_valid" : 1, "name_filter" : filter} + args = {"name_filter_valid": 1, "name_filter": filter} else: args = {} return self.api(self.papi.sw_interface_dump, args) @@ -140,8 +174,8 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_set_table, - { 'sw_if_index' : sw_if_index, 'is_ipv6' : is_ipv6, - 'vrf_id' : table_id}) + {'sw_if_index': sw_if_index, 'is_ipv6': is_ipv6, + 'vrf_id': table_id}) def sw_interface_add_del_address(self, sw_if_index, addr, addr_len, is_ipv6=0, is_add=1, del_all=0): @@ -156,12 +190,12 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_add_del_address, - { 'sw_if_index' : sw_if_index, - 'is_add' : is_add, - 'is_ipv6' : is_ipv6, - 'del_all' : del_all, - 'address_length' : addr_len, - 'address' : addr}) + {'sw_if_index': sw_if_index, + 'is_add': is_add, + 'is_ipv6': is_ipv6, + 'del_all': del_all, + 'address_length': addr_len, + 'address': addr}) def sw_interface_enable_disable_mpls(self, sw_if_index, is_enable=1): @@ -172,12 +206,12 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_set_mpls_enable, - {'sw_if_index' : sw_if_index, - 'enable' : is_enable }) + {'sw_if_index': sw_if_index, + 'enable': is_enable}) def sw_interface_ra_suppress(self, sw_if_index): return self.api(self.papi.sw_interface_ip6nd_ra_config, - {'sw_if_index' : sw_if_index }) + {'sw_if_index': sw_if_index}) def vxlan_add_del_tunnel( self, @@ -202,14 +236,14 @@ class VppPapiProvider(object): """ return self.api(self.papi.vxlan_add_del_tunnel, - {'is_add' : is_add, - 'is_ipv6' : is_ipv6, - 'src_address' : src_addr, - 'dst_address' : dst_addr, - 'mcast_sw_if_index' : mcast_sw_if_index, - 'encap_vrf_id' : encap_vrf_id, - 'decap_next_index' : decap_next_index, - 'vni' : vni}) + {'is_add': is_add, + 'is_ipv6': is_ipv6, + 'src_address': src_addr, + 'dst_address': dst_addr, + 'mcast_sw_if_index': mcast_sw_if_index, + 'encap_vrf_id': encap_vrf_id, + 'decap_next_index': decap_next_index, + 'vni': vni}) def bridge_domain_add_del(self, bd_id, flood=1, uu_flood=1, forward=1, learn=1, arp_term=0, is_add=1): @@ -229,13 +263,13 @@ class VppPapiProvider(object): :param int is_add: Add or delete flag. (Default value = 1) """ return self.api(self.papi.bridge_domain_add_del, - { 'bd_id' : bd_id, - 'flood' : flood, - 'uu_flood' : uu_flood, - 'forward' : forward, - 'learn' : learn, - 'arp_term' : arp_term, - 'is_add' : is_add}) + {'bd_id': bd_id, + 'flood': flood, + 'uu_flood': uu_flood, + 'forward': forward, + 'learn': learn, + 'arp_term': arp_term, + 'is_add': is_add}) def l2fib_add_del(self, mac, bd_id, sw_if_index, is_add=1, static_mac=0, filter_mac=0, bvi_mac=0): @@ -254,13 +288,13 @@ class VppPapiProvider(object): interface. (Default value = 0) """ return self.api(self.papi.l2fib_add_del, - { 'mac' : self._convert_mac(mac), - 'bd_id' : bd_id, - 'sw_if_index' : sw_if_index, - 'is_add' : is_add, - 'static_mac' : static_mac, - 'filter_mac' : filter_mac, - 'bvi_mac' : bvi_mac }) + {'mac': self._convert_mac(mac), + 'bd_id': bd_id, + 'sw_if_index': sw_if_index, + 'is_add': is_add, + 'static_mac': static_mac, + 'filter_mac': filter_mac, + 'bvi_mac': bvi_mac}) def sw_interface_set_l2_bridge(self, sw_if_index, bd_id, shg=0, bvi=0, enable=1): @@ -274,11 +308,11 @@ class VppPapiProvider(object): :param int enable: Add or remove interface. (Default value = 1) """ return self.api(self.papi.sw_interface_set_l2_bridge, - { 'rx_sw_if_index' : sw_if_index, - 'bd_id' : bd_id, - 'shg' : shg, - 'bvi' : bvi, - 'enable' : enable }) + {'rx_sw_if_index': sw_if_index, + 'bd_id': bd_id, + 'shg': shg, + 'bvi': bvi, + 'enable': enable}) def bridge_flags(self, bd_id, is_set, feature_bitmap): """Enable/disable required feature of the bridge domain with defined ID. @@ -293,9 +327,9 @@ class VppPapiProvider(object): - arp-term (1 << 4). """ return self.api(self.papi.bridge_flags, - {'bd_id' : bd_id, - 'is_set' : is_set, - 'feature_bitmap' : feature_bitmap }) + {'bd_id': bd_id, + 'is_set': is_set, + 'feature_bitmap': feature_bitmap}) def bridge_domain_dump(self, bd_id=0): """ @@ -305,7 +339,7 @@ class VppPapiProvider(object): :return: Dictionary of bridge domain(s) data. """ return self.api(self.papi.bridge_domain_dump, - {'bd_id' : bd_id }) + {'bd_id': bd_id}) def sw_interface_set_l2_xconnect(self, rx_sw_if_index, tx_sw_if_index, enable): @@ -319,11 +353,17 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_set_l2_xconnect, - { 'rx_sw_if_index' : rx_sw_if_index, - 'tx_sw_if_index' : tx_sw_if_index, - 'enable' : enable }) + {'rx_sw_if_index': rx_sw_if_index, + 'tx_sw_if_index': tx_sw_if_index, + 'enable': enable}) - def sw_interface_set_l2_tag_rewrite(self, sw_if_index, vtr_oper, push=0, tag1=0, tag2=0): + def sw_interface_set_l2_tag_rewrite( + self, + sw_if_index, + vtr_oper, + push=0, + tag1=0, + tag2=0): """L2 interface vlan tag rewrite configure request :param client_index - opaque cookie to identify the sender :param context - sender context, to match reply w/ request @@ -335,11 +375,11 @@ class VppPapiProvider(object): """ return self.api(self.papi.l2_interface_vlan_tag_rewrite, - { 'sw_if_index' : sw_if_index, - 'vtr_op' : vtr_oper, - 'push_dot1q' : push, - 'tag1' : tag1, - 'tag2' : tag2 }) + {'sw_if_index': sw_if_index, + 'vtr_op': vtr_oper, + 'push_dot1q': push, + 'tag1': tag1, + 'tag2': tag2}) def sw_interface_set_flags(self, sw_if_index, admin_up_down, link_up_down=0, deleted=0): @@ -352,10 +392,10 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_set_flags, - { 'sw_if_index' : sw_if_index, - 'admin_up_down' : admin_up_down, - 'link_up_down' : link_up_down, - 'deleted' : deleted }) + {'sw_if_index': sw_if_index, + 'admin_up_down': admin_up_down, + 'link_up_down': link_up_down, + 'deleted': deleted}) def create_subif(self, sw_if_index, sub_id, outer_vlan, inner_vlan, no_tags=0, one_tag=0, two_tags=0, dot1ad=0, exact_match=0, @@ -379,18 +419,18 @@ class VppPapiProvider(object): """ return self.api( self.papi.create_subif, - { 'sw_if_index' : sw_if_index, - 'sub_id' : sub_id, - 'no_tags' : no_tags, - 'one_tag' : one_tag, - 'two_tags' : two_tags, - 'dot1ad' : dot1ad, - 'exact_match' : exact_match, - 'default_sub' : default_sub, - 'outer_vlan_id_any' : outer_vlan_id_any, - 'inner_vlan_id_any' : inner_vlan_id_any, - 'outer_vlan_id' : outer_vlan, - 'inner_vlan_id' : inner_vlan }) + {'sw_if_index': sw_if_index, + 'sub_id': sub_id, + 'no_tags': no_tags, + 'one_tag': one_tag, + 'two_tags': two_tags, + 'dot1ad': dot1ad, + 'exact_match': exact_match, + 'default_sub': default_sub, + 'outer_vlan_id_any': outer_vlan_id_any, + 'inner_vlan_id_any': inner_vlan_id_any, + 'outer_vlan_id': outer_vlan, + 'inner_vlan_id': inner_vlan}) def delete_subif(self, sw_if_index): """Delete subinterface @@ -398,7 +438,7 @@ class VppPapiProvider(object): :param sw_if_index: """ return self.api(self.papi.delete_subif, - { 'sw_if_index' : sw_if_index }) + {'sw_if_index': sw_if_index}) def create_vlan_subif(self, sw_if_index, vlan): """ @@ -408,8 +448,8 @@ class VppPapiProvider(object): """ return self.api(self.papi.create_vlan_subif, - {'sw_if_index' : sw_if_index, - 'vlan_id' : vlan }) + {'sw_if_index': sw_if_index, + 'vlan_id': vlan}) def create_loopback(self, mac=''): """ @@ -417,7 +457,7 @@ class VppPapiProvider(object): :param mac: (Optional) """ return self.api(self.papi.create_loopback, - { 'mac_address' : mac }) + {'mac_address': mac}) def ip_add_del_route( self, @@ -428,9 +468,9 @@ class VppPapiProvider(object): table_id=0, next_hop_table_id=0, next_hop_weight=1, - next_hop_n_out_labels = 0, - next_hop_out_label_stack = [], - next_hop_via_label = MPLS_LABEL_INVALID, + next_hop_n_out_labels=0, + next_hop_out_label_stack=[], + next_hop_via_label=MPLS_LABEL_INVALID, create_vrf_if_needed=0, is_resolve_host=0, is_resolve_attached=0, @@ -470,29 +510,29 @@ class VppPapiProvider(object): return self.api( self.papi.ip_add_del_route, - { 'next_hop_sw_if_index' : next_hop_sw_if_index, - 'table_id' : table_id, - 'classify_table_index' : classify_table_index, - 'next_hop_table_id' : next_hop_table_id, - 'create_vrf_if_needed' : create_vrf_if_needed, - 'is_add' : is_add, - 'is_drop' : is_drop, - 'is_unreach' : is_unreach, - 'is_prohibit' : is_prohibit, - 'is_ipv6' : is_ipv6, - 'is_local' : is_local, - 'is_classify' : is_classify, - 'is_multipath' : is_multipath, - 'is_resolve_host' : is_resolve_host, - 'is_resolve_attached' : is_resolve_attached, - 'not_last' : not_last, - 'next_hop_weight' : next_hop_weight, - 'dst_address_length' : dst_address_length, - 'dst_address' : dst_address, - 'next_hop_address' : next_hop_address, - 'next_hop_n_out_labels' : next_hop_n_out_labels, - 'next_hop_via_label' : next_hop_via_label, - 'next_hop_out_label_stack' : next_hop_out_label_stack }) + {'next_hop_sw_if_index': next_hop_sw_if_index, + 'table_id': table_id, + 'classify_table_index': classify_table_index, + 'next_hop_table_id': next_hop_table_id, + 'create_vrf_if_needed': create_vrf_if_needed, + 'is_add': is_add, + 'is_drop': is_drop, + 'is_unreach': is_unreach, + 'is_prohibit': is_prohibit, + 'is_ipv6': is_ipv6, + 'is_local': is_local, + 'is_classify': is_classify, + 'is_multipath': is_multipath, + 'is_resolve_host': is_resolve_host, + 'is_resolve_attached': is_resolve_attached, + 'not_last': not_last, + 'next_hop_weight': next_hop_weight, + 'dst_address_length': dst_address_length, + 'dst_address': dst_address, + 'next_hop_address': next_hop_address, + 'next_hop_n_out_labels': next_hop_n_out_labels, + 'next_hop_via_label': next_hop_via_label, + 'next_hop_out_label_stack': next_hop_out_label_stack}) def ip_neighbor_add_del(self, sw_if_index, @@ -516,13 +556,13 @@ class VppPapiProvider(object): return self.api( self.papi.ip_neighbor_add_del, - { 'vrf_id' : vrf_id, - 'sw_if_index' : sw_if_index, - 'is_add' : is_add, - 'is_ipv6' : is_ipv6, - 'is_static' : is_static, - 'mac_address' : mac_address, - 'dst_address' : dst_address + {'vrf_id': vrf_id, + 'sw_if_index': sw_if_index, + 'is_add': is_add, + 'is_ipv6': is_ipv6, + 'is_static': is_static, + 'mac_address': mac_address, + 'dst_address': dst_address } ) @@ -536,9 +576,9 @@ class VppPapiProvider(object): """ return self.api(self.papi.sw_interface_span_enable_disable, - { 'sw_if_index_from' : sw_if_index_from, - 'sw_if_index_to' : sw_if_index_to, - 'state' : state }) + {'sw_if_index_from': sw_if_index_from, + 'sw_if_index_to': sw_if_index_to, + 'state': state}) def gre_tunnel_add_del(self, src_address, @@ -559,12 +599,12 @@ class VppPapiProvider(object): return self.api( self.papi.gre_add_del_tunnel, - { 'is_add' : is_add, - 'is_ipv6' : is_ip6, - 'teb' : is_teb, - 'src_address' : src_address, - 'dst_address' : dst_address, - 'outer_fib_id' : outer_fib_id } + {'is_add': is_add, + 'is_ipv6': is_ip6, + 'teb': is_teb, + 'src_address': src_address, + 'dst_address': dst_address, + 'outer_fib_id': outer_fib_id} ) def mpls_route_add_del( @@ -577,9 +617,9 @@ class VppPapiProvider(object): table_id=0, next_hop_table_id=0, next_hop_weight=1, - next_hop_n_out_labels = 0, - next_hop_out_label_stack = [], - next_hop_via_label = MPLS_LABEL_INVALID, + next_hop_n_out_labels=0, + next_hop_out_label_stack=[], + next_hop_via_label=MPLS_LABEL_INVALID, create_vrf_if_needed=0, is_resolve_host=0, is_resolve_attached=0, @@ -615,24 +655,24 @@ class VppPapiProvider(object): return self.api( self.papi.mpls_route_add_del, - { 'mr_label' : label, - 'mr_eos' : eos, - 'mr_table_id' : table_id, - 'mr_classify_table_index' : classify_table_index, - 'mr_create_table_if_needed' : create_vrf_if_needed, - 'mr_is_add' : is_add, - 'mr_is_classify' : is_classify, - 'mr_is_multipath' : is_multipath, - 'mr_is_resolve_host' : is_resolve_host, - 'mr_is_resolve_attached' : is_resolve_attached, - 'mr_next_hop_proto_is_ip4' : next_hop_proto_is_ip4, - 'mr_next_hop_weight' : next_hop_weight, - 'mr_next_hop' : next_hop_address, - 'mr_next_hop_n_out_labels' : next_hop_n_out_labels, - 'mr_next_hop_sw_if_index' : next_hop_sw_if_index, - 'mr_next_hop_table_id' : next_hop_table_id, - 'mr_next_hop_via_label' : next_hop_via_label, - 'mr_next_hop_out_label_stack' : next_hop_out_label_stack }) + {'mr_label': label, + 'mr_eos': eos, + 'mr_table_id': table_id, + 'mr_classify_table_index': classify_table_index, + 'mr_create_table_if_needed': create_vrf_if_needed, + 'mr_is_add': is_add, + 'mr_is_classify': is_classify, + 'mr_is_multipath': is_multipath, + 'mr_is_resolve_host': is_resolve_host, + 'mr_is_resolve_attached': is_resolve_attached, + 'mr_next_hop_proto_is_ip4': next_hop_proto_is_ip4, + 'mr_next_hop_weight': next_hop_weight, + 'mr_next_hop': next_hop_address, + 'mr_next_hop_n_out_labels': next_hop_n_out_labels, + 'mr_next_hop_sw_if_index': next_hop_sw_if_index, + 'mr_next_hop_table_id': next_hop_table_id, + 'mr_next_hop_via_label': next_hop_via_label, + 'mr_next_hop_out_label_stack': next_hop_out_label_stack}) def mpls_ip_bind_unbind( self, @@ -648,14 +688,14 @@ class VppPapiProvider(object): """ return self.api( self.papi.mpls_ip_bind_unbind, - {'mb_mpls_table_id' : table_id, - 'mb_label' : label, - 'mb_ip_table_id' : ip_table_id, - 'mb_create_table_if_needed' : create_vrf_if_needed, - 'mb_is_bind' : is_bind, - 'mb_is_ip4' : is_ip4, - 'mb_address_length' : dst_address_length, - 'mb_address' : dst_address}) + {'mb_mpls_table_id': table_id, + 'mb_label': label, + 'mb_ip_table_id': ip_table_id, + 'mb_create_table_if_needed': create_vrf_if_needed, + 'mb_is_bind': is_bind, + 'mb_is_ip4': is_ip4, + 'mb_address_length': dst_address_length, + 'mb_address': dst_address}) def mpls_tunnel_add_del( self, @@ -665,9 +705,9 @@ class VppPapiProvider(object): next_hop_sw_if_index=0xFFFFFFFF, next_hop_table_id=0, next_hop_weight=1, - next_hop_n_out_labels = 0, - next_hop_out_label_stack = [], - next_hop_via_label = MPLS_LABEL_INVALID, + next_hop_n_out_labels=0, + next_hop_out_label_stack=[], + next_hop_via_label=MPLS_LABEL_INVALID, create_vrf_if_needed=0, is_add=1, l2_only=0): @@ -696,19 +736,16 @@ class VppPapiProvider(object): """ return self.api( self.papi.mpls_tunnel_add_del, - {'mt_sw_if_index' : tun_sw_if_index, - 'mt_is_add' : is_add, - 'mt_l2_only' : l2_only, - 'mt_next_hop_proto_is_ip4' : next_hop_proto_is_ip4, - 'mt_next_hop_weight' : next_hop_weight, - 'mt_next_hop' : next_hop_address, - 'mt_next_hop_n_out_labels' : next_hop_n_out_labels, - 'mt_next_hop_sw_if_index' :next_hop_sw_if_index, - 'mt_next_hop_table_id' : next_hop_table_id, - 'mt_next_hop_out_label_stack' : next_hop_out_label_stack }) - - return self.api(vpp_papi.sw_interface_span_enable_disable, - (sw_if_index_from, sw_if_index_to, enable)) + {'mt_sw_if_index': tun_sw_if_index, + 'mt_is_add': is_add, + 'mt_l2_only': l2_only, + 'mt_next_hop_proto_is_ip4': next_hop_proto_is_ip4, + 'mt_next_hop_weight': next_hop_weight, + 'mt_next_hop': next_hop_address, + 'mt_next_hop_n_out_labels': next_hop_n_out_labels, + 'mt_next_hop_sw_if_index': next_hop_sw_if_index, + 'mt_next_hop_table_id': next_hop_table_id, + 'mt_next_hop_out_label_stack': next_hop_out_label_stack}) def snat_interface_add_del_feature( self, @@ -723,9 +760,9 @@ class VppPapiProvider(object): """ return self.api( self.papi.snat_interface_add_del_feature, - {'is_add' : is_add, - 'is_inside' : is_inside, - 'sw_if_index' : sw_if_index}) + {'is_add': is_add, + 'is_inside': is_inside, + 'sw_if_index': sw_if_index}) def snat_add_static_mapping( self, @@ -750,14 +787,14 @@ class VppPapiProvider(object): """ return self.api( self.papi.snat_add_static_mapping, - {'is_add' : is_add, - 'is_ip4' : is_ip4, - 'addr_only' : addr_only, - 'local_ip_address' : local_ip, - 'external_ip_address' : external_ip, - 'local_port' : local_port, - 'external_port' : external_port, - 'vrf_id' : vrf_id}) + {'is_add': is_add, + 'is_ip4': is_ip4, + 'addr_only': addr_only, + 'local_ip_address': local_ip, + 'external_ip_address': external_ip, + 'local_port': local_port, + 'external_port': external_port, + 'vrf_id': vrf_id}) def snat_add_address_range( self, @@ -774,10 +811,10 @@ class VppPapiProvider(object): """ return self.api( self.papi.snat_add_address_range, - {'is_ip4' : is_ip4, - 'first_ip_address' : first_ip_address, - 'last_ip_address' : last_ip_address, - 'is_add' : is_add}) + {'is_ip4': is_ip4, + 'first_ip_address': first_ip_address, + 'last_ip_address': last_ip_address, + 'is_add': is_add}) def snat_address_dump(self): """Dump S-NAT addresses @@ -796,3 +833,43 @@ class VppPapiProvider(object): :return: Dictionary of S-NAT static mappings """ return self.api(self.papi.snat_static_mapping_dump, {}) + + def control_ping(self): + self.api(self.papi.control_ping) + + def bfd_udp_add(self, sw_if_index, desired_min_tx, required_min_rx, + detect_mult, local_addr, peer_addr, is_ipv6=0): + return self.api(self.papi.bfd_udp_add, + { + 'sw_if_index': sw_if_index, + 'desired_min_tx': desired_min_tx, + 'required_min_rx': required_min_rx, + 'local_addr': local_addr, + 'peer_addr': peer_addr, + 'is_ipv6': is_ipv6, + 'detect_mult': detect_mult, + }) + + def bfd_udp_del(self, sw_if_index, local_addr, peer_addr, is_ipv6=0): + return self.api(self.papi.bfd_udp_del, + { + 'sw_if_index': sw_if_index, + 'local_addr': local_addr, + 'peer_addr': peer_addr, + 'is_ipv6': is_ipv6, + }) + + def bfd_udp_session_dump(self): + return self.api(self.papi.bfd_udp_session_dump, {}) + + def bfd_session_set_flags(self, bs_idx, admin_up_down): + return self.api(self.papi.bfd_session_set_flags, { + 'bs_index': bs_idx, + 'admin_up_down': admin_up_down, + }) + + def want_bfd_events(self, enable_disable=1): + return self.api(self.papi.want_bfd_events, { + 'enable_disable': enable_disable, + 'pid': os.getpid(), + }) diff --git a/test/vpp_pg_interface.py b/test/vpp_pg_interface.py index 533c4603a2e..012f5768353 100644 --- a/test/vpp_pg_interface.py +++ b/test/vpp_pg_interface.py @@ -1,10 +1,10 @@ import os import time -from scapy.utils import wrpcap, rdpcap +from scapy.utils import wrpcap, rdpcap, PcapReader from vpp_interface import VppInterface from scapy.layers.l2 import Ether, ARP -from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6ND_NA, \ +from scapy.layers.inet6 import IPv6, ICMPv6ND_NS, ICMPv6ND_NA,\ ICMPv6NDOptSrcLLAddr, ICMPv6NDOptDstLLAddr from util import ppp @@ -93,6 +93,7 @@ class VppPGInterface(VppInterface): pass # FIXME this should be an API, but no such exists atm self.test.vapi.cli(self.capture_cli) + self._pcap_reader = None def add_stream(self, pkts): """ @@ -132,6 +133,41 @@ class VppPGInterface(VppInterface): return [] return output + def wait_for_packet(self, timeout): + """ + Wait for next packet captured with a timeout + + :param timeout: How long to wait for the packet + + :returns: Captured packet if no packet arrived within timeout + :raises Exception: if no packet arrives within timeout + """ + limit = time.time() + timeout + if self._pcap_reader is None: + self.test.logger.debug("Waiting for the capture file to appear") + while time.time() < limit: + if os.path.isfile(self.out_path): + break + time.sleep(0) # yield + if os.path.isfile(self.out_path): + self.test.logger.debug("Capture file appeared after %fs" % + (time.time() - (limit - timeout))) + self._pcap_reader = PcapReader(self.out_path) + else: + self.test.logger.debug("Timeout - capture file still nowhere") + raise Exception("Packet didn't arrive within timeout") + + self.test.logger.debug("Waiting for packet") + while time.time() < limit: + p = self._pcap_reader.recv() + if p is not None: + self.test.logger.debug("Packet received after %fs", + (time.time() - (limit - timeout))) + return p + time.sleep(0) # yield + self.test.logger.debug("Timeout - no packets received") + raise Exception("Packet didn't arrive within timeout") + def create_arp_req(self): """Create ARP request applicable for this interface""" return (Ether(dst="ff:ff:ff:ff:ff:ff", src=self.remote_mac) / diff --git a/vlib-api/vlibapi/api_helper_macros.h b/vlib-api/vlibapi/api_helper_macros.h index a14756565af..16f34cfc762 100644 --- a/vlib-api/vlibapi/api_helper_macros.h +++ b/vlib-api/vlibapi/api_helper_macros.h @@ -197,7 +197,8 @@ _(to_netconf_server) \ _(from_netconf_server) \ _(to_netconf_client) \ _(from_netconf_client) \ -_(oam_events) +_(oam_events) \ +_(bfd_events) /* WARNING: replicated in vpp/stats.h */ typedef struct diff --git a/vnet/Makefile.am b/vnet/Makefile.am index eb7f8ef1a01..4e4b2c0acb3 100644 --- a/vnet/Makefile.am +++ b/vnet/Makefile.am @@ -350,6 +350,22 @@ nobase_include_HEADERS += \ vnet/ip/udp_packet.h ######################################## +# Bidirectional Forwarding Detection +######################################## + +nobase_include_HEADERS += \ + vnet/bfd/bfd_protocol.h \ + vnet/bfd/bfd_main.h \ + vnet/bfd/bfd_api.h \ + vnet/bfd/bfd_udp.h + +libvnet_la_SOURCES += \ + vnet/bfd/bfd_api.h \ + vnet/bfd/bfd_udp.c \ + vnet/bfd/bfd_main.c \ + vnet/bfd/bfd_protocol.c + +######################################## # Layer 3 protocol: IPSec ######################################## if WITH_IPSEC diff --git a/vnet/vnet/api_errno.h b/vnet/vnet/api_errno.h index 50d6f731035..65e3e59121e 100644 --- a/vnet/vnet/api_errno.h +++ b/vnet/vnet/api_errno.h @@ -90,7 +90,9 @@ _(EXCEEDED_NUMBER_OF_PORTS_CAPACITY, -96, "Operation would exceed capacity of nu _(INVALID_ADDRESS_FAMILY, -97, "Invalid address family") \ _(INVALID_SUB_SW_IF_INDEX, -98, "Invalid sub-interface sw_if_index") \ _(TABLE_TOO_BIG, -99, "Table too big") \ -_(CANNOT_ENABLE_DISABLE_FEATURE, -100, "Cannot enable/disable feature") +_(CANNOT_ENABLE_DISABLE_FEATURE, -100, "Cannot enable/disable feature") \ +_(BFD_EEXIST, -101, "Duplicate BFD session") \ +_(BFD_NOENT, -102, "No such BFD session") typedef enum { diff --git a/vnet/vnet/bfd/bfd_api.h b/vnet/vnet/bfd/bfd_api.h new file mode 100644 index 00000000000..cfcd04f3f50 --- /dev/null +++ b/vnet/vnet/bfd/bfd_api.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief BFD global declarations + */ +#ifndef __included_bfd_api_h__ +#define __included_bfd_api_h__ + +#include <vnet/api_errno.h> +#include <vnet/vnet.h> +#include <vnet/ip/ip6_packet.h> +#include <vnet/bfd/bfd_udp.h> + +vnet_api_error_t bfd_udp_add_session (u32 sw_if_index, u32 desired_min_tx_us, + u32 required_min_rx_us, u8 detect_mult, + const ip46_address_t * local_addr, + const ip46_address_t * peer_addr); + +vnet_api_error_t bfd_udp_del_session (u32 sw_if_index, + const ip46_address_t * local_addr, + const ip46_address_t * peer_addr); + +vnet_api_error_t bfd_session_set_flags (u32 bs_index, u8 admin_up_down); + +#endif /* __included_bfd_api_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/bfd_debug.h b/vnet/vnet/bfd/bfd_debug.h new file mode 100644 index 00000000000..c11e6d9f499 --- /dev/null +++ b/vnet/vnet/bfd/bfd_debug.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief BFD global declarations + */ +#ifndef __included_bfd_debug_h__ +#define __included_bfd_debug_h__ + +/* controls debug prints */ +#define BFD_DEBUG (0) + +#if BFD_DEBUG +#define BFD_DEBUG_FILE_DEF \ + static const char *__file = NULL; \ + if (!__file) \ + { \ + __file = strrchr (__FILE__, '/'); \ + if (__file) \ + { \ + ++__file; \ + } \ + else \ + { \ + __file = __FILE__; \ + } \ + } + +#define BFD_DBG(fmt, ...) \ + do \ + { \ + BFD_DEBUG_FILE_DEF \ + u8 *_s = NULL; \ + vlib_main_t *vm = vlib_get_main (); \ + _s = format (_s, "%6.02f:DBG:%s:%d:%s():" fmt, vlib_time_now (vm), \ + __file, __LINE__, __func__, ##__VA_ARGS__); \ + printf ("%s\n", _s); \ + vec_free (_s); \ + } \ + while (0); + +#define BFD_ERR(fmt, ...) \ + do \ + { \ + BFD_DEBUG_FILE_DEF \ + u8 *_s = NULL; \ + vlib_main_t *vm = vlib_get_main (); \ + _s = format (_s, "%6.02f:ERR:%s:%d:%s():" fmt, vlib_time_now (vm), \ + __file, __LINE__, __func__, ##__VA_ARGS__); \ + printf ("%s\n", _s); \ + vec_free (_s); \ + } \ + while (0); + +#else +#define BFD_DBG(...) +#define BFD_ERR(...) +#endif + +#endif /* __included_bfd_debug_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/bfd_doc.md b/vnet/vnet/bfd/bfd_doc.md new file mode 100644 index 00000000000..1333ed77b7e --- /dev/null +++ b/vnet/vnet/bfd/bfd_doc.md @@ -0,0 +1 @@ +TODO diff --git a/vnet/vnet/bfd/bfd_main.c b/vnet/vnet/bfd/bfd_main.c new file mode 100644 index 00000000000..a72d6fed9bb --- /dev/null +++ b/vnet/vnet/bfd/bfd_main.c @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief BFD nodes implementation + */ + +#include <vppinfra/random.h> +#include <vppinfra/error.h> +#include <vppinfra/hash.h> +#include <vnet/ethernet/ethernet.h> +#include <vnet/ethernet/packet.h> +#include <vnet/bfd/bfd_debug.h> +#include <vnet/bfd/bfd_protocol.h> +#include <vnet/bfd/bfd_main.h> + +static u64 +bfd_us_to_clocks (bfd_main_t * bm, u64 us) +{ + return bm->cpu_cps * ((f64) us / USEC_PER_SECOND); +} + +static vlib_node_registration_t bfd_process_node; + +typedef enum +{ +#define F(t, n) BFD_OUTPUT_##t, + foreach_bfd_transport (F) +#undef F + BFD_OUTPUT_N_NEXT, +} bfd_output_next_t; + +static u32 bfd_next_index_by_transport[] = { +#define F(t, n) [BFD_TRANSPORT_##t] = BFD_OUTPUT_##t, + foreach_bfd_transport (F) +#undef F +}; + +/* + * We actually send all bfd pkts to the "error" node after scanning + * them, so the graph node has only one next-index. The "error-drop" + * node automatically bumps our per-node packet counters for us. + */ +typedef enum +{ + BFD_INPUT_NEXT_NORMAL, + BFD_INPUT_N_NEXT, +} bfd_input_next_t; + +static void bfd_on_state_change (bfd_main_t * bm, bfd_session_t * bs, + u64 now); + +static void +bfd_set_defaults (bfd_main_t * bm, bfd_session_t * bs) +{ + bs->local_state = BFD_STATE_down; + bs->local_diag = BFD_DIAG_CODE_no_diag; + bs->remote_state = BFD_STATE_down; + bs->local_demand = 0; + bs->remote_discr = 0; + bs->desired_min_tx_us = BFD_DEFAULT_DESIRED_MIN_TX_US; + bs->desired_min_tx_clocks = bfd_us_to_clocks (bm, bs->desired_min_tx_us); + bs->remote_min_rx_us = 1; + bs->remote_demand = 0; +} + +static void +bfd_set_diag (bfd_session_t * bs, bfd_diag_code_e code) +{ + if (bs->local_diag != code) + { + BFD_DBG ("set local_diag, bs_idx=%d: '%d:%s'", bs->bs_idx, code, + bfd_diag_code_string (code)); + bs->local_diag = code; + } +} + +static void +bfd_set_state (bfd_main_t * bm, bfd_session_t * bs, bfd_state_e new_state) +{ + if (bs->local_state != new_state) + { + BFD_DBG ("Change state, bs_idx=%d: %s->%s", bs->bs_idx, + bfd_state_string (bs->local_state), + bfd_state_string (new_state)); + bs->local_state = new_state; + bfd_on_state_change (bm, bs, clib_cpu_time_now ()); + } +} + +static void +bfd_recalc_tx_interval (bfd_main_t * bm, bfd_session_t * bs) +{ + if (!bs->local_demand) + { + bs->transmit_interval_clocks = + clib_max (bs->desired_min_tx_clocks, bs->remote_min_rx_clocks); + } + else + { + /* TODO */ + } + BFD_DBG ("Recalculated transmit interval %lu clocks/%.2fs", + bs->transmit_interval_clocks, + bs->transmit_interval_clocks / bm->cpu_cps); +} + +static void +bfd_calc_next_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now) +{ + if (!bs->local_demand) + { + if (bs->local_detect_mult > 1) + { + /* common case - 75-100% of transmit interval */ + bs->tx_timeout_clocks = now + + (1 - .25 * (random_f64 (&bm->random_seed))) * + bs->transmit_interval_clocks; + } + else + { + /* special case - 75-90% of transmit interval */ + bs->tx_timeout_clocks = + now + + (.9 - .15 * (random_f64 (&bm->random_seed))) * + bs->transmit_interval_clocks; + } + } + else + { + /* TODO */ + } + if (bs->tx_timeout_clocks) + { + BFD_DBG ("Next transmit in %lu clocks/%.02fs@%lu", + bs->tx_timeout_clocks - now, + (bs->tx_timeout_clocks - now) / bm->cpu_cps, + bs->tx_timeout_clocks); + } +} + +static void +bfd_recalc_detection_time (bfd_main_t * bm, bfd_session_t * bs) +{ + if (!bs->local_demand) + { + bs->detection_time_clocks = + bs->remote_detect_mult * + bfd_us_to_clocks (bm, clib_max (bs->required_min_rx_us, + bs->remote_desired_min_tx_us)); + } + else + { + bs->detection_time_clocks = + bs->local_detect_mult * + bfd_us_to_clocks (bm, + clib_max (bs->desired_min_tx_us, + bs->remote_min_rx_us)); + } + BFD_DBG ("Recalculated detection time %lu clocks/%.2fs", + bs->detection_time_clocks, + bs->detection_time_clocks / bm->cpu_cps); +} + +static void +bfd_set_timer (bfd_main_t * bm, bfd_session_t * bs, u64 now) +{ + u64 next = 0; + u64 rx_timeout = 0; + if (BFD_STATE_up == bs->local_state) + { + rx_timeout = bs->last_rx_clocks + bs->detection_time_clocks; + } + if (bs->tx_timeout_clocks && rx_timeout) + { + next = clib_min (bs->tx_timeout_clocks, rx_timeout); + } + else if (bs->tx_timeout_clocks) + { + next = bs->tx_timeout_clocks; + } + else if (rx_timeout) + { + next = rx_timeout; + } + BFD_DBG ("bs_idx=%u, tx_timeout=%lu, rx_timeout=%lu, next=%s", bs->bs_idx, + bs->tx_timeout_clocks, rx_timeout, + next == bs->tx_timeout_clocks ? "tx" : "rx"); + if (next && (next < bs->wheel_time_clocks || !bs->wheel_time_clocks)) + { + if (bs->wheel_time_clocks) + { + timing_wheel_delete (&bm->wheel, bs->bs_idx); + } + bs->wheel_time_clocks = next; + BFD_DBG ("timing_wheel_insert(%p, %lu (%ld clocks/%.2fs in the " + "future), %u);", + &bm->wheel, bs->wheel_time_clocks, + (i64) bs->wheel_time_clocks - clib_cpu_time_now (), + (i64) (bs->wheel_time_clocks - clib_cpu_time_now ()) / + bm->cpu_cps, bs->bs_idx); + timing_wheel_insert (&bm->wheel, bs->wheel_time_clocks, bs->bs_idx); + } +} + +static void +bfd_set_desired_min_tx (bfd_main_t * bm, bfd_session_t * bs, u64 now, + u32 desired_min_tx_us) +{ + bs->desired_min_tx_us = desired_min_tx_us; + bs->desired_min_tx_clocks = bfd_us_to_clocks (bm, bs->desired_min_tx_us); + BFD_DBG ("Set desired min tx to %uus/%lu clocks/%.2fs", + bs->desired_min_tx_us, bs->desired_min_tx_clocks, + bs->desired_min_tx_clocks / bm->cpu_cps); + bfd_recalc_detection_time (bm, bs); + bfd_recalc_tx_interval (bm, bs); + bfd_calc_next_tx (bm, bs, now); + bfd_set_timer (bm, bs, now); +} + +void +bfd_session_start (bfd_main_t * bm, bfd_session_t * bs) +{ + BFD_DBG ("%U", format_bfd_session, bs); + bfd_recalc_tx_interval (bm, bs); + vlib_process_signal_event (bm->vlib_main, bm->bfd_process_node_index, + BFD_EVENT_NEW_SESSION, bs->bs_idx); +} + +vnet_api_error_t +bfd_del_session (uword bs_idx) +{ + const bfd_main_t *bm = &bfd_main; + if (!pool_is_free_index (bm->sessions, bs_idx)) + { + bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx); + pool_put (bm->sessions, bs); + return 0; + } + else + { + BFD_ERR ("no such session"); + return VNET_API_ERROR_BFD_NOENT; + } + return 0; +} + +const char * +bfd_diag_code_string (bfd_diag_code_e diag) +{ +#define F(n, t, s) \ + case BFD_DIAG_CODE_NAME (t): \ + return s; + switch (diag) + { + foreach_bfd_diag_code (F)} + return "UNKNOWN"; +#undef F +} + +const char * +bfd_state_string (bfd_state_e state) +{ +#define F(n, t, s) \ + case BFD_STATE_NAME (t): \ + return s; + switch (state) + { + foreach_bfd_state (F)} + return "UNKNOWN"; +#undef F +} + +vnet_api_error_t +bfd_session_set_flags (u32 bs_idx, u8 admin_up_down) +{ + bfd_main_t *bm = &bfd_main; + if (pool_is_free_index (bm->sessions, bs_idx)) + { + BFD_ERR ("invalid bs_idx=%u", bs_idx); + return VNET_API_ERROR_BFD_NOENT; + } + bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx); + if (admin_up_down) + { + bfd_set_state (bm, bs, BFD_STATE_down); + } + else + { + bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down); + bfd_set_state (bm, bs, BFD_STATE_admin_down); + } + return 0; +} + +u8 * +bfd_input_format_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + const bfd_input_trace_t *t = va_arg (*args, bfd_input_trace_t *); + const bfd_pkt_t *pkt = (bfd_pkt_t *) t->data; + if (t->len > STRUCT_SIZE_OF (bfd_pkt_t, head)) + { + s = format (s, "BFD v%u, diag=%u(%s), state=%u(%s),\n" + " flags=(P:%u, F:%u, C:%u, A:%u, D:%u, M:%u), detect_mult=%u, " + "length=%u\n", + bfd_pkt_get_version (pkt), bfd_pkt_get_diag_code (pkt), + bfd_diag_code_string (bfd_pkt_get_diag_code (pkt)), + bfd_pkt_get_state (pkt), + bfd_state_string (bfd_pkt_get_state (pkt)), + bfd_pkt_get_poll (pkt), bfd_pkt_get_final (pkt), + bfd_pkt_get_control_plane_independent (pkt), + bfd_pkt_get_auth_present (pkt), bfd_pkt_get_demand (pkt), + bfd_pkt_get_multipoint (pkt), pkt->head.detect_mult, + pkt->head.length); + if (t->len >= sizeof (bfd_pkt_t) + && pkt->head.length >= sizeof (bfd_pkt_t)) + { + s = format (s, " my discriminator: %u\n", pkt->my_disc); + s = format (s, " your discriminator: %u\n", pkt->your_disc); + s = format (s, " desired min tx interval: %u\n", + clib_net_to_host_u32 (pkt->des_min_tx)); + s = format (s, " required min rx interval: %u\n", + clib_net_to_host_u32 (pkt->req_min_rx)); + s = format (s, " required min echo rx interval: %u\n", + clib_net_to_host_u32 (pkt->req_min_echo_rx)); + } + } + + return s; +} + +static void +bfd_on_state_change (bfd_main_t * bm, bfd_session_t * bs, u64 now) +{ + BFD_DBG ("State changed: %U", format_bfd_session, bs); + bfd_event (bm, bs); + switch (bs->local_state) + { + case BFD_STATE_admin_down: + bfd_set_desired_min_tx (bm, bs, now, + clib_max (bs->config_desired_min_tx_us, + BFD_DEFAULT_DESIRED_MIN_TX_US)); + break; + case BFD_STATE_down: + bfd_set_desired_min_tx (bm, bs, now, + clib_max (bs->config_desired_min_tx_us, + BFD_DEFAULT_DESIRED_MIN_TX_US)); + break; + case BFD_STATE_init: + bfd_set_desired_min_tx (bm, bs, now, + clib_max (bs->config_desired_min_tx_us, + BFD_DEFAULT_DESIRED_MIN_TX_US)); + break; + case BFD_STATE_up: + bfd_set_desired_min_tx (bm, bs, now, bs->config_desired_min_tx_us); + break; + } +} + +static void +bfd_add_transport_layer (vlib_main_t * vm, vlib_buffer_t * b, + bfd_session_t * bs) +{ + switch (bs->transport) + { + case BFD_TRANSPORT_UDP4: + /* fallthrough */ + case BFD_TRANSPORT_UDP6: + BFD_DBG ("Transport bfd via udp, bs_idx=%u", bs->bs_idx); + bfd_add_udp_transport (vm, b, &bs->udp); + break; + } +} + +static vlib_buffer_t * +bfd_create_frame (vlib_main_t * vm, vlib_node_runtime_t * rt, + bfd_session_t * bs) +{ + u32 bi; + if (vlib_buffer_alloc (vm, &bi, 1) != 1) + { + clib_warning ("buffer allocation failure"); + return NULL; + } + + vlib_buffer_t *b = vlib_get_buffer (vm, bi); + ASSERT (b->current_data == 0); + + u32 *to_next; + u32 n_left_to_next; + + vlib_get_next_frame (vm, rt, bfd_next_index_by_transport[bs->transport], + to_next, n_left_to_next); + + to_next[0] = bi; + n_left_to_next -= 1; + + vlib_put_next_frame (vm, rt, bfd_next_index_by_transport[bs->transport], + n_left_to_next); + return b; +} + +static void +bfd_init_control_frame (vlib_buffer_t * b, bfd_session_t * bs) +{ + bfd_pkt_t *pkt = vlib_buffer_get_current (b); + const u32 bfd_length = 24; + memset (pkt, 0, sizeof (*pkt)); + + bfd_pkt_set_version (pkt, 1); + bfd_pkt_set_diag_code (pkt, bs->local_diag); + bfd_pkt_set_state (pkt, bs->local_state); + if (bs->local_demand && BFD_STATE_up == bs->local_state && + BFD_STATE_up == bs->remote_state) + { + bfd_pkt_set_demand (pkt); + } + pkt->head.detect_mult = bs->local_detect_mult; + pkt->head.length = clib_host_to_net_u32 (bfd_length); + pkt->my_disc = bs->local_discr; + pkt->your_disc = bs->remote_discr; + pkt->des_min_tx = clib_host_to_net_u32 (bs->desired_min_tx_us); + pkt->req_min_rx = clib_host_to_net_u32 (bs->required_min_rx_us); + pkt->req_min_echo_rx = clib_host_to_net_u32 (0); /* FIXME */ + b->current_length = bfd_length; +} + +static void +bfd_send_periodic (vlib_main_t * vm, vlib_node_runtime_t * rt, + bfd_main_t * bm, bfd_session_t * bs, u64 now) +{ + if (!bs->remote_min_rx_us) + { + BFD_DBG + ("bfd.RemoteMinRxInterval is zero, not sending periodic control " + "frame"); + return; + } + /* FIXME + A system MUST NOT periodically transmit BFD Control packets if Demand + mode is active on the remote system (bfd.RemoteDemandMode is 1, + bfd.SessionState is Up, and bfd.RemoteSessionState is Up) and a Poll + Sequence is not being transmitted. + */ + if (now >= bs->tx_timeout_clocks) + { + BFD_DBG ("Send periodic control frame for bs_idx=%lu", bs->bs_idx); + vlib_buffer_t *b = bfd_create_frame (vm, rt, bs); + if (!b) + { + return; + } + bfd_init_control_frame (b, bs); + bfd_add_transport_layer (vm, b, bs); + bfd_calc_next_tx (bm, bs, now); + } + else + { + BFD_DBG ("No need to send control frame now"); + } + bfd_set_timer (bm, bs, now); +} + +void +bfd_send_final (vlib_main_t * vm, vlib_buffer_t * b, bfd_session_t * bs) +{ + BFD_DBG ("Send final control frame for bs_idx=%lu", bs->bs_idx); + bfd_init_control_frame (b, bs); + bfd_pkt_set_final (vlib_buffer_get_current (b)); + bfd_add_transport_layer (vm, b, bs); +} + +static void +bfd_check_rx_timeout (bfd_main_t * bm, bfd_session_t * bs, u64 now) +{ + if (bs->last_rx_clocks + bs->detection_time_clocks < now) + { + BFD_DBG ("Rx timeout, session goes down"); + bfd_set_diag (bs, BFD_DIAG_CODE_det_time_exp); + bfd_set_state (bm, bs, BFD_STATE_down); + } +} + +void +bfd_on_timeout (vlib_main_t * vm, vlib_node_runtime_t * rt, bfd_main_t * bm, + bfd_session_t * bs, u64 now) +{ + BFD_DBG ("Timeout for bs_idx=%lu", bs->bs_idx); + switch (bs->local_state) + { + case BFD_STATE_admin_down: + BFD_ERR ("Unexpected timeout when in %s state", + bfd_state_string (bs->local_state)); + abort (); + break; + case BFD_STATE_down: + bfd_send_periodic (vm, rt, bm, bs, now); + break; + case BFD_STATE_init: + BFD_ERR ("Unexpected timeout when in %s state", + bfd_state_string (bs->local_state)); + abort (); + break; + case BFD_STATE_up: + bfd_check_rx_timeout (bm, bs, now); + bfd_send_periodic (vm, rt, bm, bs, now); + break; + } +} + +/* + * bfd input routine + */ +bfd_error_t +bfd_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0) +{ + // bfd_main_t *bm = &bfd_main; + bfd_error_t e; + + /* find our interface */ + bfd_session_t *s = NULL; + // bfd_get_intf (lm, vnet_buffer (b0)->sw_if_index[VLIB_RX]); + + if (!s) + { + /* bfd disabled on this interface, we're done */ + return BFD_ERROR_DISABLED; + } + + /* Actually scan the packet */ + e = BFD_ERROR_NONE; // bfd_packet_scan (lm, n, vlib_buffer_get_current (b0)); + + return e; +} + +/* + * bfd process node function + */ +static uword +bfd_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) +{ + bfd_main_t *bm = &bfd_main; + u32 *expired = 0; + uword event_type, *event_data = 0; + + /* So we can send events to the bfd process */ + bm->bfd_process_node_index = bfd_process_node.index; + + while (1) + { + u64 now = clib_cpu_time_now (); + u64 next_expire = timing_wheel_next_expiring_elt_time (&bm->wheel); + BFD_DBG ("timing_wheel_next_expiring_elt_time(%p) returns %lu", + &bm->wheel, next_expire); + if ((i64) next_expire < 0) + { + BFD_DBG ("wait for event without timeout"); + (void) vlib_process_wait_for_event (vm); + } + else + { + f64 timeout = ((i64) next_expire - (i64) now) / bm->cpu_cps; + BFD_DBG ("wait for event with timeout %.02f", timeout); + ASSERT (timeout > 0); + (void) vlib_process_wait_for_event_or_clock (vm, timeout); + } + event_type = vlib_process_get_events (vm, &event_data); + now = clib_cpu_time_now (); + switch (event_type) + { + case ~0: /* no events => timeout */ + /* nothing to do here */ + break; + case BFD_EVENT_RESCHEDULE: + /* nothing to do here - reschedule is done automatically after + * each event or timeout */ + break; + case BFD_EVENT_NEW_SESSION: + do + { + bfd_session_t *bs = + pool_elt_at_index (bm->sessions, *event_data); + bfd_send_periodic (vm, rt, bm, bs, now); + } + while (0); + break; + default: + clib_warning ("BUG: event type 0x%wx", event_type); + break; + } + BFD_DBG ("advancing wheel, now is %lu", now); + BFD_DBG ("timing_wheel_advance (%p, %lu, %p, 0);", &bm->wheel, now, + expired); + expired = timing_wheel_advance (&bm->wheel, now, expired, 0); + BFD_DBG ("Expired %d elements", vec_len (expired)); + u32 *p = NULL; + vec_foreach (p, expired) + { + const u32 bs_idx = *p; + if (!pool_is_free_index (bm->sessions, bs_idx)) + { + bfd_session_t *bs = pool_elt_at_index (bm->sessions, bs_idx); + bs->wheel_time_clocks = 0; /* no longer scheduled */ + bfd_on_timeout (vm, rt, bm, bs, now); + } + } + if (expired) + { + _vec_len (expired) = 0; + } + if (event_data) + { + _vec_len (event_data) = 0; + } + } + + return 0; +} + +/* + * bfd process node declaration + */ +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (bfd_process_node, static) = { + .function = bfd_process, + .type = VLIB_NODE_TYPE_PROCESS, + .name = "bfd-process", + .n_next_nodes = BFD_OUTPUT_N_NEXT, + .next_nodes = + { +#define F(t, n) [BFD_OUTPUT_##t] = n, + foreach_bfd_transport (F) +#undef F + }, +}; +/* *INDENT-ON* */ + +static clib_error_t * +bfd_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags) +{ + // bfd_main_t *bm = &bfd_main; + // vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index); + if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)) + { + /* TODO */ + } + return 0; +} + +VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bfd_sw_interface_up_down); + +static clib_error_t * +bfd_hw_interface_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) +{ + // bfd_main_t *bm = &bfd_main; + if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP) + { + /* TODO */ + } + return 0; +} + +VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bfd_hw_interface_up_down); + +/* + * setup function + */ +static clib_error_t * +bfd_main_init (vlib_main_t * vm) +{ + bfd_main_t *bm = &bfd_main; + bm->random_seed = random_default_seed (); + bm->vlib_main = vm; + bm->vnet_main = vnet_get_main (); + memset (&bm->wheel, 0, sizeof (bm->wheel)); + bm->cpu_cps = 2590000000; // vm->clib_time.clocks_per_second; + BFD_DBG ("cps is %.2f", bm->cpu_cps); + const u64 now = clib_cpu_time_now (); + timing_wheel_init (&bm->wheel, now, bm->cpu_cps); + + return 0; +} + +VLIB_INIT_FUNCTION (bfd_main_init); + +bfd_session_t * +bfd_get_session (bfd_main_t * bm, bfd_transport_t t) +{ + bfd_session_t *result; + pool_get (bm->sessions, result); + result->bs_idx = result - bm->sessions; + result->transport = t; + result->local_discr = random_u32 (&bm->random_seed); + bfd_set_defaults (bm, result); + hash_set (bm->session_by_disc, result->local_discr, result->bs_idx); + return result; +} + +void +bfd_put_session (bfd_main_t * bm, bfd_session_t * bs) +{ + hash_unset (bm->session_by_disc, bs->local_discr); + pool_put (bm->sessions, bs); +} + +bfd_session_t * +bfd_find_session_by_idx (bfd_main_t * bm, uword bs_idx) +{ + if (!pool_is_free_index (bm->sessions, bs_idx)) + { + return pool_elt_at_index (bm->sessions, bs_idx); + } + return NULL; +} + +bfd_session_t * +bfd_find_session_by_disc (bfd_main_t * bm, u32 disc) +{ + uword *p = hash_get (bfd_main.session_by_disc, disc); + if (p) + { + return pool_elt_at_index (bfd_main.sessions, *p); + } + return NULL; +} + +/** + * @brief verify bfd packet - common checks + * + * @param pkt + * + * @return 1 if bfd packet is valid + */ +int +bfd_verify_pkt_common (const bfd_pkt_t * pkt) +{ + if (1 != bfd_pkt_get_version (pkt)) + { + BFD_ERR ("BFD verification failed - unexpected version: '%d'", + bfd_pkt_get_version (pkt)); + return 0; + } + if (pkt->head.length < sizeof (bfd_pkt_t) || + (bfd_pkt_get_auth_present (pkt) && + pkt->head.length < sizeof (bfd_pkt_with_auth_t))) + { + BFD_ERR ("BFD verification failed - unexpected length: '%d' (auth " + "present: %d)", + pkt->head.length, bfd_pkt_get_auth_present (pkt)); + return 0; + } + if (!pkt->head.detect_mult) + { + BFD_ERR ("BFD verification failed - unexpected detect-mult: '%d'", + pkt->head.detect_mult); + return 0; + } + if (bfd_pkt_get_multipoint (pkt)) + { + BFD_ERR ("BFD verification failed - unexpected multipoint: '%d'", + bfd_pkt_get_multipoint (pkt)); + return 0; + } + if (!pkt->my_disc) + { + BFD_ERR ("BFD verification failed - unexpected my-disc: '%d'", + pkt->my_disc); + return 0; + } + if (!pkt->your_disc) + { + const u8 pkt_state = bfd_pkt_get_state (pkt); + if (pkt_state != BFD_STATE_down && pkt_state != BFD_STATE_admin_down) + { + BFD_ERR ("BFD verification failed - unexpected state: '%s' " + "(your-disc is zero)", bfd_state_string (pkt_state)); + return 0; + } + } + return 1; +} + +/** + * @brief verify bfd packet - authentication + * + * @param pkt + * + * @return 1 if bfd packet is valid + */ +int +bfd_verify_pkt_session (const bfd_pkt_t * pkt, u16 pkt_size, + const bfd_session_t * bs) +{ + const bfd_pkt_with_auth_t *with_auth = (bfd_pkt_with_auth_t *) pkt; + if (!bfd_pkt_get_auth_present (pkt)) + { + if (pkt_size > sizeof (*pkt)) + { + BFD_ERR ("BFD verification failed - unexpected packet size '%d' " + "(auth not present)", pkt_size); + return 0; + } + } + else + { + if (!with_auth->auth.type) + { + BFD_ERR ("BFD verification failed - unexpected auth type: '%d'", + with_auth->auth.type); + return 0; + } + /* TODO FIXME - implement the actual verification */ + } + return 1; +} + +void +bfd_consume_pkt (bfd_main_t * bm, const bfd_pkt_t * pkt, u32 bs_idx) +{ + bfd_session_t *bs = bfd_find_session_by_idx (bm, bs_idx); + if (!bs) + { + return; + } + BFD_DBG ("Scanning bfd packet, bs_idx=%d", bs->bs_idx); + bs->remote_discr = pkt->my_disc; + bs->remote_state = bfd_pkt_get_state (pkt); + bs->remote_demand = bfd_pkt_get_demand (pkt); + bs->remote_min_rx_us = clib_net_to_host_u32 (pkt->req_min_rx); + bs->remote_min_rx_clocks = bfd_us_to_clocks (bm, bs->remote_min_rx_us); + BFD_DBG ("Set remote min rx to %lu clocks/%.2fs", bs->remote_min_rx_clocks, + bs->remote_min_rx_clocks / bm->cpu_cps); + bs->remote_desired_min_tx_us = clib_net_to_host_u32 (pkt->des_min_tx); + bs->remote_detect_mult = pkt->head.detect_mult; + bfd_recalc_detection_time (bm, bs); + bs->last_rx_clocks = clib_cpu_time_now (); + /* FIXME + If the Required Min Echo RX Interval field is zero, the + transmission of Echo packets, if any, MUST cease. + + If a Poll Sequence is being transmitted by the local system and + the Final (F) bit in the received packet is set, the Poll Sequence + MUST be terminated. + */ + /* FIXME 6.8.2 */ + /* FIXME 6.8.4 */ + if (BFD_STATE_admin_down == bs->local_state) + return; + if (BFD_STATE_admin_down == bs->remote_state) + { + bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down); + bfd_set_state (bm, bs, BFD_STATE_down); + } + else if (BFD_STATE_down == bs->local_state) + { + if (BFD_STATE_down == bs->remote_state) + { + bfd_set_state (bm, bs, BFD_STATE_init); + } + else if (BFD_STATE_init == bs->remote_state) + { + bfd_set_state (bm, bs, BFD_STATE_up); + } + } + else if (BFD_STATE_init == bs->local_state) + { + if (BFD_STATE_up == bs->remote_state || + BFD_STATE_init == bs->remote_state) + { + bfd_set_state (bm, bs, BFD_STATE_up); + } + } + else /* BFD_STATE_up == bs->local_state */ + { + if (BFD_STATE_down == bs->remote_state) + { + bfd_set_diag (bs, BFD_DIAG_CODE_neighbor_sig_down); + bfd_set_state (bm, bs, BFD_STATE_down); + } + } +} + +u8 * +format_bfd_session (u8 * s, va_list * args) +{ + const bfd_session_t *bs = va_arg (*args, bfd_session_t *); + return format (s, "BFD(%u): bfd.SessionState=%s, " + "bfd.RemoteSessionState=%s, " + "bfd.LocalDiscr=%u, " + "bfd.RemoteDiscr=%u, " + "bfd.LocalDiag=%s, " + "bfd.DesiredMinTxInterval=%u, " + "bfd.RequiredMinRxInterval=%u, " + "bfd.RemoteMinRxInterval=%u, " + "bfd.DemandMode=%s, " + "bfd.RemoteDemandMode=%s, " + "bfd.DetectMult=%u, ", + bs->bs_idx, bfd_state_string (bs->local_state), + bfd_state_string (bs->remote_state), bs->local_discr, + bs->remote_discr, bfd_diag_code_string (bs->local_diag), + bs->desired_min_tx_us, bs->required_min_rx_us, + bs->remote_min_rx_us, (bs->local_demand ? "yes" : "no"), + (bs->remote_demand ? "yes" : "no"), bs->local_detect_mult); +} + +bfd_main_t bfd_main; + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/bfd_main.h b/vnet/vnet/bfd/bfd_main.h new file mode 100644 index 00000000000..727903bd286 --- /dev/null +++ b/vnet/vnet/bfd/bfd_main.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief BFD global declarations + */ +#ifndef __included_bfd_main_h__ +#define __included_bfd_main_h__ + +#include <vppinfra/timing_wheel.h> +#include <vnet/vnet.h> +#include <vnet/bfd/bfd_protocol.h> +#include <vnet/bfd/bfd_udp.h> + +#define foreach_bfd_transport(F) \ + F (UDP4, "ip4-rewrite") \ + F (UDP6, "ip6-rewrite") + +typedef enum +{ +#define F(t, n) BFD_TRANSPORT_##t, + foreach_bfd_transport (F) +#undef F +} bfd_transport_t; + +#define foreach_bfd_mode(F) \ + F (asynchronous) \ + F (demand) + +typedef enum +{ +#define F(x) BFD_MODE_##x, + foreach_bfd_mode (F) +#undef F +} bfd_mode_e; + +typedef struct +{ + /* index in bfd_main.sessions pool */ + uword bs_idx; + + /* session state */ + bfd_state_e local_state; + + /* local diagnostics */ + bfd_diag_code_e local_diag; + + /* remote session state */ + bfd_state_e remote_state; + + /* local discriminator */ + u32 local_discr; + + /* remote discriminator */ + u32 remote_discr; + + /* configured desired min tx interval (microseconds) */ + u32 config_desired_min_tx_us; + + /* desired min tx interval (microseconds) */ + u32 desired_min_tx_us; + + /* desired min tx interval (clocks) */ + u64 desired_min_tx_clocks; + + /* required min rx interval */ + u32 required_min_rx_us; + + /* remote min rx interval (microseconds) */ + u32 remote_min_rx_us; + + /* remote min rx interval (clocks) */ + u64 remote_min_rx_clocks; + + /* remote desired min tx interval */ + u32 remote_desired_min_tx_us; + + /* 1 if in demand mode, 0 otherwise */ + u8 local_demand; + + /* 1 if remote system sets demand mode, 0 otherwise */ + u8 remote_demand; + + u8 local_detect_mult; + u8 remote_detect_mult; + + /* set to value of timer in timing wheel, 0 if not set */ + u64 wheel_time_clocks; + + /* transmit interval */ + u64 transmit_interval_clocks; + + /* next time at which to transmit a packet */ + u64 tx_timeout_clocks; + + /* timestamp of last packet received */ + u64 last_rx_clocks; + + /* detection time */ + u64 detection_time_clocks; + + /* transport type for this session */ + bfd_transport_t transport; + + union + { + bfd_udp_session_t udp; + }; +} bfd_session_t; + +typedef struct +{ + u32 client_index; + u32 client_pid; +} event_subscriber_t; + +typedef struct +{ + /* pool of bfd sessions context data */ + bfd_session_t *sessions; + + /* timing wheel for scheduling timeouts */ + timing_wheel_t wheel; + + /* hashmap - bfd session by discriminator */ + u32 *session_by_disc; + + /* background process node index */ + u32 bfd_process_node_index; + + /* convenience variables */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + + /* cpu clocks per second */ + f64 cpu_cps; + + /* for generating random numbers */ + u32 random_seed; + + /* pool of event subscribers */ + //event_subscriber_t *subscribers; + +} bfd_main_t; + +extern bfd_main_t bfd_main; + +/* Packet counters */ +#define foreach_bfd_error(F) \ + F (NONE, "good bfd packets (processed)") \ + F (BAD, "invalid bfd packets") \ + F (DISABLED, "bfd packets received on disabled interfaces") + +typedef enum +{ +#define F(sym, str) BFD_ERROR_##sym, + foreach_bfd_error (F) +#undef F + BFD_N_ERROR, +} bfd_error_t; + +/* bfd packet trace capture */ +typedef struct +{ + u32 len; + u8 data[400]; +} bfd_input_trace_t; + +enum +{ + BFD_EVENT_RESCHEDULE = 1, + BFD_EVENT_NEW_SESSION, +} bfd_process_event_e; + +bfd_error_t bfd_input (vlib_main_t * vm, vlib_buffer_t * b0, u32 bi0); +u8 *bfd_input_format_trace (u8 * s, va_list * args); + +bfd_session_t *bfd_get_session (bfd_main_t * bm, bfd_transport_t t); +void bfd_put_session (bfd_main_t * bm, bfd_session_t * bs); +bfd_session_t *bfd_find_session_by_idx (bfd_main_t * bm, uword bs_idx); +bfd_session_t *bfd_find_session_by_disc (bfd_main_t * bm, u32 disc); +void bfd_session_start (bfd_main_t * bm, bfd_session_t * bs); +void bfd_consume_pkt (bfd_main_t * bm, const bfd_pkt_t * bfd, u32 bs_idx); +int bfd_verify_pkt_common (const bfd_pkt_t * pkt); +int bfd_verify_pkt_session (const bfd_pkt_t * pkt, u16 pkt_size, + const bfd_session_t * bs); +void bfd_event (bfd_main_t * bm, bfd_session_t * bs); +void bfd_send_final (vlib_main_t * vm, vlib_buffer_t * b, bfd_session_t * bs); +u8 *format_bfd_session (u8 * s, va_list * args); + + +#define USEC_PER_MS 1000LL +#define USEC_PER_SECOND (1000 * USEC_PER_MS) + +/* default, slow transmission interval for BFD packets, per spec at least 1s */ +#define BFD_DEFAULT_DESIRED_MIN_TX_US USEC_PER_SECOND + +#endif /* __included_bfd_main_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/bfd_protocol.c b/vnet/vnet/bfd/bfd_protocol.c new file mode 100644 index 00000000000..ede9536f3cf --- /dev/null +++ b/vnet/vnet/bfd/bfd_protocol.c @@ -0,0 +1,74 @@ +#include <vnet/bfd/bfd_protocol.h> + +u8 bfd_pkt_get_version (const bfd_pkt_t *pkt) +{ + return pkt->head.vers_diag >> 5; +} + +void bfd_pkt_set_version (bfd_pkt_t *pkt, int version) +{ + pkt->head.vers_diag = + (version << 5) | (pkt->head.vers_diag & ((1 << 5) - 1)); +} + +u8 bfd_pkt_get_diag_code (const bfd_pkt_t *pkt) +{ + return pkt->head.vers_diag & ((1 << 5) - 1); +} + +void bfd_pkt_set_diag_code (bfd_pkt_t *pkt, int value) +{ + pkt->head.vers_diag = + (pkt->head.vers_diag & ~((1 << 5) - 1)) | (value & ((1 << 5) - 1)); +} + +u8 bfd_pkt_get_state (const bfd_pkt_t *pkt) +{ + return pkt->head.sta_flags >> 6; +} + +void bfd_pkt_set_state (bfd_pkt_t *pkt, int value) +{ + pkt->head.sta_flags = (value << 6) | (pkt->head.sta_flags & ((1 << 6) - 1)); +} + +u8 bfd_pkt_get_poll (const bfd_pkt_t *pkt) +{ + return (pkt->head.sta_flags >> 5) & 1; +} + +void bfd_pkt_set_final (bfd_pkt_t *pkt) { pkt->head.sta_flags |= 1 << 5; } + +u8 bfd_pkt_get_final (const bfd_pkt_t *pkt) +{ + return (pkt->head.sta_flags >> 4) & 1; +} + +void bfd_pkt_set_poll (bfd_pkt_t *pkt); +u8 bfd_pkt_get_control_plane_independent (const bfd_pkt_t *pkt) +{ + return (pkt->head.sta_flags >> 3) & 1; +} + +void bfd_pkt_set_control_plane_independent (bfd_pkt_t *pkt); + +u8 bfd_pkt_get_auth_present (const bfd_pkt_t *pkt) +{ + return (pkt->head.sta_flags >> 2) & 1; +} + +void bfd_pkt_set_auth_present (bfd_pkt_t *pkt); + +u8 bfd_pkt_get_demand (const bfd_pkt_t *pkt) +{ + return (pkt->head.sta_flags >> 1) & 1; +} + +void bfd_pkt_set_demand (bfd_pkt_t *pkt) { pkt->head.sta_flags |= 1 << 1; } + +u8 bfd_pkt_get_multipoint (const bfd_pkt_t *pkt) +{ + return pkt->head.sta_flags & 1; +} + +void bfd_pkt_set_multipoint (bfd_pkt_t *pkt); diff --git a/vnet/vnet/bfd/bfd_protocol.h b/vnet/vnet/bfd/bfd_protocol.h new file mode 100644 index 00000000000..cf751b3b89a --- /dev/null +++ b/vnet/vnet/bfd/bfd_protocol.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef __included_bfd_protocol_h__ +#define __included_bfd_protocol_h__ +/** + * @file + * @brief BFD protocol declarations + */ + +#include <vppinfra/types.h> +#include <vppinfra/clib.h> + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + /* + An optional Authentication Section MAY be present: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Auth Len | Authentication Data... | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + u8 type; + u8 len; + u8 data[0]; +}) bfd_auth_t; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + /* + The Mandatory Section of a BFD Control packet has the following + format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | My Discriminator | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Your Discriminator | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Desired Min TX Interval | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Required Min RX Interval | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Required Min Echo RX Interval | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + */ + struct + { + u8 vers_diag; + u8 sta_flags; + u8 detect_mult; + u8 length; + } head; + u32 my_disc; + u32 your_disc; + u32 des_min_tx; + u32 req_min_rx; + u32 req_min_echo_rx; +}) bfd_pkt_t; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + bfd_pkt_t pkt; + bfd_auth_t auth; +}) bfd_pkt_with_auth_t; +/* *INDENT-ON* */ + +u8 bfd_pkt_get_version (const bfd_pkt_t * pkt); +void bfd_pkt_set_version (bfd_pkt_t * pkt, int version); +u8 bfd_pkt_get_diag_code (const bfd_pkt_t * pkt); +void bfd_pkt_set_diag_code (bfd_pkt_t * pkt, int value); +u8 bfd_pkt_get_state (const bfd_pkt_t * pkt); +void bfd_pkt_set_state (bfd_pkt_t * pkt, int value); +u8 bfd_pkt_get_poll (const bfd_pkt_t * pkt); +void bfd_pkt_set_final (bfd_pkt_t * pkt); +u8 bfd_pkt_get_final (const bfd_pkt_t * pkt); +void bfd_pkt_set_poll (bfd_pkt_t * pkt); +u8 bfd_pkt_get_control_plane_independent (const bfd_pkt_t * pkt); +void bfd_pkt_set_control_plane_independent (bfd_pkt_t * pkt); +u8 bfd_pkt_get_auth_present (const bfd_pkt_t * pkt); +void bfd_pkt_set_auth_present (bfd_pkt_t * pkt); +u8 bfd_pkt_get_demand (const bfd_pkt_t * pkt); +void bfd_pkt_set_demand (bfd_pkt_t * pkt); +u8 bfd_pkt_get_multipoint (const bfd_pkt_t * pkt); +void bfd_pkt_set_multipoint (bfd_pkt_t * pkt); + +/* BFD diagnostic codes */ +#define foreach_bfd_diag_code(F) \ + F (0, no_diag, "No Diagnostic") \ + F (1, det_time_exp, "Control Detection Time Expired") \ + F (2, echo_failed, "Echo Function Failed") \ + F (3, neighbor_sig_down, "Neighbor Signaled Session Down") \ + F (4, fwd_plain_reset, "Forwarding Plane Reset") \ + F (5, path_down, "Path Down") \ + F (6, concat_path_down, "Concatenated Path Down") \ + F (7, admin_down, "Administratively Down") \ + F (8, reverse_concat_path_down, "Reverse Concatenated Path Down") + +#define BFD_DIAG_CODE_NAME(t) BFD_DIAG_CODE_##t + +typedef enum +{ +#define F(n, t, s) BFD_DIAG_CODE_NAME (t) = n, + foreach_bfd_diag_code (F) +#undef F +} bfd_diag_code_e; + +const char *bfd_diag_code_string (bfd_diag_code_e diag); + +/* BFD state values */ +#define foreach_bfd_state(F) \ + F (0, admin_down, "AdminDown") \ + F (1, down, "Down") \ + F (2, init, "Init") \ + F (3, up, "Up") + +#define BFD_STATE_NAME(t) BFD_STATE_##t + +typedef enum +{ +#define F(n, t, s) BFD_STATE_NAME (t) = n, + foreach_bfd_state (F) +#undef F +} bfd_state_e; + +const char *bfd_state_string (bfd_state_e state); + +#endif /* __included_bfd_protocol_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/bfd_udp.c b/vnet/vnet/bfd/bfd_udp.c new file mode 100644 index 00000000000..ded3342559c --- /dev/null +++ b/vnet/vnet/bfd/bfd_udp.c @@ -0,0 +1,610 @@ +#include <vppinfra/types.h> +#include <vlibmemory/api.h> +#include <vlib/vlib.h> +#include <vlib/buffer.h> +#include <vnet/ip/format.h> +#include <vnet/ethernet/packet.h> +#include <vnet/ip/udp_packet.h> +#include <vnet/ip/lookup.h> +#include <vnet/ip/icmp46_packet.h> +#include <vnet/ip/ip4.h> +#include <vnet/ip/ip6.h> +#include <vnet/ip/udp.h> +#include <vnet/ip/ip6_packet.h> +#include <vnet/adj/adj.h> +#include <vnet/adj/adj_nbr.h> +#include <vnet/bfd/bfd_debug.h> +#include <vnet/bfd/bfd_udp.h> +#include <vnet/bfd/bfd_main.h> +#include <vnet/bfd/bfd_api.h> + +typedef struct +{ + bfd_main_t *bfd_main; + /* hashmap - bfd session index by bfd key - used for CLI/API lookup, where + * discriminator is unknown */ + mhash_t bfd_session_idx_by_bfd_key; +} bfd_udp_main_t; + +bfd_udp_main_t bfd_udp_main; + +void bfd_udp_transport_to_buffer (vlib_main_t *vm, vlib_buffer_t *b, + bfd_udp_session_t *bus) +{ + udp_header_t *udp; + u16 udp_length, ip_length; + bfd_udp_key_t *key = &bus->key; + + b->flags |= VNET_BUFFER_LOCALLY_ORIGINATED; + if (ip46_address_is_ip4 (&key->local_addr)) + { + ip4_header_t *ip4; + const size_t data_size = sizeof (*ip4) + sizeof (*udp); + vlib_buffer_advance (b, -data_size); + ip4 = vlib_buffer_get_current (b); + udp = (udp_header_t *)(ip4 + 1); + memset (ip4, 0, data_size); + ip4->ip_version_and_header_length = 0x45; + ip4->ttl = 255; + ip4->protocol = IP_PROTOCOL_UDP; + ip4->src_address.as_u32 = key->local_addr.ip4.as_u32; + ip4->dst_address.as_u32 = key->peer_addr.ip4.as_u32; + + udp->src_port = clib_host_to_net_u16 (50000); /* FIXME */ + udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_bfd4); + + /* fix ip length, checksum and udp length */ + ip_length = vlib_buffer_length_in_chain (vm, b); + + ip4->length = clib_host_to_net_u16 (ip_length); + ip4->checksum = ip4_header_checksum (ip4); + + udp_length = ip_length - (sizeof (*ip4)); + udp->length = clib_host_to_net_u16 (udp_length); + } + else + { + BFD_ERR ("not implemented"); + abort (); + } +} + +void bfd_add_udp_transport (vlib_main_t *vm, vlib_buffer_t *b, + bfd_udp_session_t *bus) +{ + vnet_buffer (b)->ip.adj_index[VLIB_RX] = bus->adj_index; + vnet_buffer (b)->ip.adj_index[VLIB_TX] = bus->adj_index; + bfd_udp_transport_to_buffer (vm, b, bus); +} + +static bfd_session_t *bfd_lookup_session (bfd_udp_main_t *bum, + const bfd_udp_key_t *key) +{ + uword *p = mhash_get (&bum->bfd_session_idx_by_bfd_key, key); + if (p) + { + return bfd_find_session_by_idx (bum->bfd_main, *p); + } + return 0; +} + +static vnet_api_error_t +bfd_udp_add_session_internal (bfd_udp_main_t *bum, u32 sw_if_index, + u32 desired_min_tx_us, u32 required_min_rx_us, + u8 detect_mult, const ip46_address_t *local_addr, + const ip46_address_t *peer_addr) +{ + vnet_sw_interface_t *sw_if = + vnet_get_sw_interface (vnet_get_main (), sw_if_index); + /* get a pool entry and if we end up not needing it, give it back */ + bfd_transport_t t = BFD_TRANSPORT_UDP4; + if (!ip46_address_is_ip4 (local_addr)) + { + t = BFD_TRANSPORT_UDP6; + } + bfd_session_t *bs = bfd_get_session (bum->bfd_main, t); + bfd_udp_session_t *bus = &bs->udp; + memset (bus, 0, sizeof (*bus)); + bfd_udp_key_t *key = &bus->key; + key->sw_if_index = sw_if->sw_if_index; + key->local_addr.as_u64[0] = local_addr->as_u64[0]; + key->local_addr.as_u64[1] = local_addr->as_u64[1]; + key->peer_addr.as_u64[0] = peer_addr->as_u64[0]; + key->peer_addr.as_u64[1] = peer_addr->as_u64[1]; + const bfd_session_t *tmp = bfd_lookup_session (bum, key); + if (tmp) + { + BFD_ERR ("duplicate bfd-udp session, existing bs_idx=%d", tmp->bs_idx); + bfd_put_session (bum->bfd_main, bs); + return VNET_API_ERROR_BFD_EEXIST; + } + key->sw_if_index = sw_if->sw_if_index; + mhash_set (&bum->bfd_session_idx_by_bfd_key, key, bs->bs_idx, NULL); + BFD_DBG ("session created, bs_idx=%u, sw_if_index=%d, local=%U, peer=%U", + bs->bs_idx, key->sw_if_index, format_ip46_address, &key->local_addr, + IP46_TYPE_ANY, format_ip46_address, &key->peer_addr, IP46_TYPE_ANY); + if (BFD_TRANSPORT_UDP4 == t) + { + bus->adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4, VNET_LINK_IP4, + &key->peer_addr, key->sw_if_index); + BFD_DBG ("adj_nbr_add_or_lock(FIB_PROTOCOL_IP4, VNET_LINK_IP4, %U, %d) " + "returns %d", + format_ip46_address, &key->peer_addr, IP46_TYPE_ANY, + key->sw_if_index, bus->adj_index); + } + else + { + bus->adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6, + &key->peer_addr, key->sw_if_index); + BFD_DBG ("adj_nbr_add_or_lock(FIB_PROTOCOL_IP6, VNET_LINK_IP6, %U, %d) " + "returns %d", + format_ip46_address, &key->peer_addr, IP46_TYPE_ANY, + key->sw_if_index, bus->adj_index); + } + bs->config_desired_min_tx_us = desired_min_tx_us; + bs->required_min_rx_us = required_min_rx_us; + bs->local_detect_mult = detect_mult; + bfd_session_start (bum->bfd_main, bs); + return 0; +} + +static vnet_api_error_t +bfd_udp_validate_api_input (u32 sw_if_index, const ip46_address_t *local_addr, + const ip46_address_t *peer_addr) +{ + vnet_sw_interface_t *sw_if = + vnet_get_sw_interface (vnet_get_main (), sw_if_index); + u8 local_ip_valid = 0; + ip_interface_address_t *ia = NULL; + if (!sw_if) + { + BFD_ERR ("got NULL sw_if"); + return VNET_API_ERROR_INVALID_SW_IF_INDEX; + } + if (ip46_address_is_ip4 (local_addr)) + { + if (!ip46_address_is_ip4 (peer_addr)) + { + BFD_ERR ("IP family mismatch"); + return VNET_API_ERROR_INVALID_ARGUMENT; + } + ip4_main_t *im = &ip4_main; + + /* *INDENT-OFF* */ + foreach_ip_interface_address ( + &im->lookup_main, ia, sw_if_index, 0 /* honor unnumbered */, ({ + ip4_address_t *x = + ip_interface_address_get_address (&im->lookup_main, ia); + if (x->as_u32 == local_addr->ip4.as_u32) + { + /* valid address for this interface */ + local_ip_valid = 1; + break; + } + })); + /* *INDENT-ON* */ + } + else + { + if (ip46_address_is_ip4 (peer_addr)) + { + BFD_ERR ("IP family mismatch"); + return VNET_API_ERROR_INVALID_ARGUMENT; + } + ip6_main_t *im = &ip6_main; + /* *INDENT-OFF* */ + foreach_ip_interface_address ( + &im->lookup_main, ia, sw_if_index, 0 /* honor unnumbered */, ({ + ip6_address_t *x = + ip_interface_address_get_address (&im->lookup_main, ia); + if (local_addr->ip6.as_u64[0] == x->as_u64[0] && + local_addr->ip6.as_u64[1] == x->as_u64[1]) + { + /* valid address for this interface */ + local_ip_valid = 1; + break; + } + })); + /* *INDENT-ON* */ + } + + if (!local_ip_valid) + { + BFD_ERR ("address not found on interface"); + return VNET_API_ERROR_ADDRESS_NOT_FOUND_FOR_INTERFACE; + } + + return 0; +} + +vnet_api_error_t bfd_udp_add_session (u32 sw_if_index, u32 desired_min_tx_us, + u32 required_min_rx_us, u8 detect_mult, + const ip46_address_t *local_addr, + const ip46_address_t *peer_addr) +{ + vnet_api_error_t rv = + bfd_udp_validate_api_input (sw_if_index, local_addr, peer_addr); + if (rv) + { + return rv; + } + if (detect_mult < 1) + { + BFD_ERR ("detect_mult < 1"); + return VNET_API_ERROR_INVALID_ARGUMENT; + } + if (desired_min_tx_us < 1) + { + BFD_ERR ("desired_min_tx_us < 1"); + return VNET_API_ERROR_INVALID_ARGUMENT; + } + return bfd_udp_add_session_internal (&bfd_udp_main, sw_if_index, + desired_min_tx_us, required_min_rx_us, + detect_mult, local_addr, peer_addr); +} + +vnet_api_error_t bfd_udp_del_session (u32 sw_if_index, + const ip46_address_t *local_addr, + const ip46_address_t *peer_addr) +{ + vnet_api_error_t rv = + bfd_udp_validate_api_input (sw_if_index, local_addr, peer_addr); + if (rv) + { + return rv; + } + bfd_udp_main_t *bum = &bfd_udp_main; + vnet_sw_interface_t *sw_if = + vnet_get_sw_interface (vnet_get_main (), sw_if_index); + bfd_udp_key_t key; + memset (&key, 0, sizeof (key)); + key.sw_if_index = sw_if->sw_if_index; + key.local_addr.as_u64[0] = local_addr->as_u64[0]; + key.local_addr.as_u64[1] = local_addr->as_u64[1]; + key.peer_addr.as_u64[0] = peer_addr->as_u64[0]; + key.peer_addr.as_u64[1] = peer_addr->as_u64[1]; + bfd_session_t *tmp = bfd_lookup_session (bum, &key); + if (tmp) + { + BFD_DBG ("free bfd-udp session, bs_idx=%d", tmp->bs_idx); + mhash_unset (&bum->bfd_session_idx_by_bfd_key, &key, NULL); + adj_unlock (tmp->udp.adj_index); + bfd_put_session (bum->bfd_main, tmp); + } + else + { + BFD_ERR ("no such session"); + return VNET_API_ERROR_BFD_NOENT; + } + return 0; +} + +typedef enum { + BFD_UDP_INPUT_NEXT_NORMAL, + BFD_UDP_INPUT_NEXT_REPLY, + BFD_UDP_INPUT_N_NEXT, +} bfd_udp_input_next_t; + +/* Packet counters */ +#define foreach_bfd_udp_error(F) \ + F (NONE, "good bfd packets (processed)") \ + F (BAD, "invalid bfd packets") \ + F (DISABLED, "bfd packets received on disabled interfaces") + +#define F(sym, string) static char BFD_UDP_ERR_##sym##_STR[] = string; +foreach_bfd_udp_error (F); +#undef F + +static char *bfd_udp_error_strings[] = { +#define F(sym, string) BFD_UDP_ERR_##sym##_STR, + foreach_bfd_udp_error (F) +#undef F +}; + +typedef enum { +#define F(sym, str) BFD_UDP_ERROR_##sym, + foreach_bfd_udp_error (F) +#undef F + BFD_UDP_N_ERROR, +} bfd_udp_error_t; + +static void bfd_udp4_find_headers (vlib_buffer_t *b, const ip4_header_t **ip4, + const udp_header_t **udp) +{ + *ip4 = vnet_buffer (b)->ip.header; + *udp = (udp_header_t *)((*ip4) + 1); +} + +static bfd_udp_error_t bfd_udp4_verify_transport (const ip4_header_t *ip4, + const udp_header_t *udp, + const bfd_session_t *bs) +{ + const bfd_udp_session_t *bus = &bs->udp; + const bfd_udp_key_t *key = &bus->key; + if (ip4->src_address.as_u32 != key->peer_addr.ip4.as_u32) + { + BFD_ERR ("IP src addr mismatch, got %U, expected %U", format_ip4_address, + ip4->src_address.as_u32, format_ip4_address, + key->peer_addr.ip4.as_u32); + return BFD_UDP_ERROR_BAD; + } + if (ip4->dst_address.as_u32 != key->local_addr.ip4.as_u32) + { + BFD_ERR ("IP dst addr mismatch, got %U, expected %U", format_ip4_address, + ip4->dst_address.as_u32, format_ip4_address, + key->local_addr.ip4.as_u32); + return BFD_UDP_ERROR_BAD; + } + const u8 expected_ttl = 255; + if (ip4->ttl != expected_ttl) + { + BFD_ERR ("IP unexpected TTL value %d, expected %d", ip4->ttl, + expected_ttl); + return BFD_UDP_ERROR_BAD; + } + if (clib_net_to_host_u16 (udp->src_port) < 49152 || + clib_net_to_host_u16 (udp->src_port) > 65535) + { + BFD_ERR ("Invalid UDP src port %d, out of range <49152,65535>", + udp->src_port); + } + return BFD_UDP_ERROR_NONE; +} + +typedef struct +{ + u32 bs_idx; + bfd_pkt_t pkt; +} bfd_rpc_update_t; + +static void bfd_rpc_update_session_cb (const bfd_rpc_update_t *a) +{ + bfd_consume_pkt (bfd_udp_main.bfd_main, &a->pkt, a->bs_idx); +} + +static void bfd_rpc_update_session (u32 bs_idx, const bfd_pkt_t *pkt) +{ + /* packet length was already verified to be correct by the caller */ + const u32 data_size = sizeof (bfd_rpc_update_t) - + STRUCT_SIZE_OF (bfd_rpc_update_t, pkt) + + pkt->head.length; + u8 data[data_size]; + bfd_rpc_update_t *update = (bfd_rpc_update_t *)data; + update->bs_idx = bs_idx; + clib_memcpy (&update->pkt, pkt, pkt->head.length); + vl_api_rpc_call_main_thread (bfd_rpc_update_session_cb, data, data_size); +} + +static bfd_udp_error_t bfd_udp4_scan (vlib_main_t *vm, vlib_node_runtime_t *rt, + vlib_buffer_t *b, bfd_session_t **bs_out) +{ + const bfd_pkt_t *pkt = vlib_buffer_get_current (b); + if (sizeof (*pkt) > b->current_length) + { + BFD_ERR ( + "Payload size %d too small to hold bfd packet of minimum size %d", + b->current_length, sizeof (*pkt)); + return BFD_UDP_ERROR_BAD; + } + const ip4_header_t *ip4; + const udp_header_t *udp; + bfd_udp4_find_headers (b, &ip4, &udp); + if (!ip4 || !udp) + { + BFD_ERR ("Couldn't find ip4 or udp header"); + return BFD_UDP_ERROR_BAD; + } + if (!bfd_verify_pkt_common (pkt)) + { + return BFD_UDP_ERROR_BAD; + } + bfd_session_t *bs = NULL; + if (pkt->your_disc) + { + BFD_DBG ("Looking up BFD session using discriminator %u", + pkt->your_disc); + bs = bfd_find_session_by_disc (bfd_udp_main.bfd_main, pkt->your_disc); + } + else + { + bfd_udp_key_t key; + memset (&key, 0, sizeof (key)); + key.sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX]; + key.local_addr.ip4.as_u32 = ip4->dst_address.as_u32; + key.peer_addr.ip4.as_u32 = ip4->src_address.as_u32; + BFD_DBG ("Looking up BFD session using key (sw_if_index=%u, local=%U, " + "peer=%U)", + key.sw_if_index, format_ip4_address, key.local_addr.ip4.as_u8, + format_ip4_address, key.peer_addr.ip4.as_u8); + bs = bfd_lookup_session (&bfd_udp_main, &key); + } + if (!bs) + { + BFD_ERR ("BFD session lookup failed - no session matches BFD pkt"); + return BFD_UDP_ERROR_BAD; + } + BFD_DBG ("BFD session found, bs_idx=%d", bs->bs_idx); + if (!bfd_verify_pkt_session (pkt, b->current_length, bs)) + { + return BFD_UDP_ERROR_BAD; + } + bfd_udp_error_t err; + if (BFD_UDP_ERROR_NONE != (err = bfd_udp4_verify_transport (ip4, udp, bs))) + { + return err; + } + bfd_rpc_update_session (bs->bs_idx, pkt); + *bs_out = bs; + return BFD_UDP_ERROR_NONE; +} + +static bfd_udp_error_t bfd_udp6_scan (vlib_main_t *vm, vlib_buffer_t *b) +{ + /* TODO */ + return BFD_UDP_ERROR_BAD; +} + +/* + * Process a frame of bfd packets + * Expect 1 packet / frame + */ +static uword bfd_udp_input (vlib_main_t *vm, vlib_node_runtime_t *rt, + vlib_frame_t *f, int is_ipv6) +{ + u32 n_left_from, *from; + bfd_input_trace_t *t0; + + from = vlib_frame_vector_args (f); /* array of buffer indices */ + n_left_from = f->n_vectors; /* number of buffer indices */ + + while (n_left_from > 0) + { + u32 bi0; + vlib_buffer_t *b0; + u32 next0, error0; + + bi0 = from[0]; + b0 = vlib_get_buffer (vm, bi0); + + bfd_session_t *bs = NULL; + + /* If this pkt is traced, snapshot the data */ + if (b0->flags & VLIB_BUFFER_IS_TRACED) + { + int len; + t0 = vlib_add_trace (vm, rt, b0, sizeof (*t0)); + len = (b0->current_length < sizeof (t0->data)) ? b0->current_length + : sizeof (t0->data); + t0->len = len; + clib_memcpy (t0->data, vlib_buffer_get_current (b0), len); + } + + /* scan this bfd pkt. error0 is the counter index to bmp */ + if (is_ipv6) + { + error0 = bfd_udp6_scan (vm, b0); + } + else + { + error0 = bfd_udp4_scan (vm, rt, b0, &bs); + } + b0->error = rt->errors[error0]; + + next0 = BFD_UDP_INPUT_NEXT_NORMAL; + if (BFD_UDP_ERROR_NONE == error0) + { + /* if everything went fine, check for poll bit, if present, re-use + the buffer and based on (now update) session parameters, send the + final packet back */ + const bfd_pkt_t *pkt = vlib_buffer_get_current (b0); + if (bfd_pkt_get_poll (pkt)) + { + bfd_send_final (vm, b0, bs); + next0 = BFD_UDP_INPUT_NEXT_REPLY; + } + } + vlib_set_next_frame_buffer (vm, rt, next0, bi0); + + from += 1; + n_left_from -= 1; + } + + return f->n_vectors; +} + +static uword bfd_udp4_input (vlib_main_t *vm, vlib_node_runtime_t *rt, + vlib_frame_t *f) +{ + return bfd_udp_input (vm, rt, f, 0); +} + +/* + * bfd input graph node declaration + */ +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (bfd_udp4_input_node, static) = { + .function = bfd_udp4_input, + .name = "bfd-udp4-input", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = BFD_UDP_N_ERROR, + .error_strings = bfd_udp_error_strings, + + .format_trace = bfd_input_format_trace, + + .n_next_nodes = BFD_UDP_INPUT_N_NEXT, + .next_nodes = + { + [BFD_UDP_INPUT_NEXT_NORMAL] = "error-drop", + [BFD_UDP_INPUT_NEXT_REPLY] = "ip4-lookup", + }, +}; +/* *INDENT-ON* */ + +static uword bfd_udp6_input (vlib_main_t *vm, vlib_node_runtime_t *rt, + vlib_frame_t *f) +{ + return bfd_udp_input (vm, rt, f, 1); +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (bfd_udp6_input_node, static) = { + .function = bfd_udp6_input, + .name = "bfd-udp6-input", + .vector_size = sizeof (u32), + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = BFD_UDP_N_ERROR, + .error_strings = bfd_udp_error_strings, + + .format_trace = bfd_input_format_trace, + + .n_next_nodes = BFD_UDP_INPUT_N_NEXT, + .next_nodes = + { + [BFD_UDP_INPUT_NEXT_NORMAL] = "error-drop", + [BFD_UDP_INPUT_NEXT_REPLY] = "ip6-lookup", + }, +}; +/* *INDENT-ON* */ + +static clib_error_t *bfd_sw_interface_up_down (vnet_main_t *vnm, + u32 sw_if_index, u32 flags) +{ + // vnet_hw_interface_t *hi = vnet_get_sup_hw_interface (vnm, sw_if_index); + if (!(flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)) + { + /* TODO */ + } + return 0; +} + +VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (bfd_sw_interface_up_down); + +static clib_error_t *bfd_hw_interface_up_down (vnet_main_t *vnm, + u32 hw_if_index, u32 flags) +{ + if (flags & VNET_HW_INTERFACE_FLAG_LINK_UP) + { + /* TODO */ + } + return 0; +} + +VNET_HW_INTERFACE_LINK_UP_DOWN_FUNCTION (bfd_hw_interface_up_down); + +/* + * setup function + */ +static clib_error_t *bfd_udp_init (vlib_main_t *vm) +{ + mhash_init (&bfd_udp_main.bfd_session_idx_by_bfd_key, sizeof (uword), + sizeof (bfd_udp_key_t)); + bfd_udp_main.bfd_main = &bfd_main; + udp_register_dst_port (vm, UDP_DST_PORT_bfd4, bfd_udp4_input_node.index, 1); + udp_register_dst_port (vm, UDP_DST_PORT_bfd6, bfd_udp6_input_node.index, 0); + return 0; +} + +VLIB_INIT_FUNCTION (bfd_udp_init); diff --git a/vnet/vnet/bfd/bfd_udp.h b/vnet/vnet/bfd/bfd_udp.h new file mode 100644 index 00000000000..51f5327be01 --- /dev/null +++ b/vnet/vnet/bfd/bfd_udp.h @@ -0,0 +1,56 @@ +/* * Copyright (c) 2011-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief BFD global declarations + */ + +#ifndef __included_bfd_udp_h__ +#define __included_bfd_udp_h__ + +#include <vppinfra/clib.h> +#include <vnet/adj/adj_types.h> +#include <vnet/ip/ip6_packet.h> + +#define BFD_UDP_KEY_BODY + +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + + u32 sw_if_index; + ip46_address_t local_addr; + ip46_address_t peer_addr; + +}) bfd_udp_key_t; +/* *INDENT-ON* */ + +typedef struct +{ + bfd_udp_key_t key; + + adj_index_t adj_index; +} bfd_udp_session_t; + +void bfd_add_udp_transport (vlib_main_t * vm, vlib_buffer_t * b, + bfd_udp_session_t * bs); + +#endif /* __included_bfd_udp_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/vnet/vnet/bfd/dir.dox b/vnet/vnet/bfd/dir.dox new file mode 100644 index 00000000000..ed656b52074 --- /dev/null +++ b/vnet/vnet/bfd/dir.dox @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + @dir vnet/vnet/bfd + @brief Bidirectional Forwarding Detection (BFD) implementation +*/ diff --git a/vnet/vnet/buffer.h b/vnet/vnet/buffer.h index 898c94ee7b9..82806bba986 100644 --- a/vnet/vnet/buffer.h +++ b/vnet/vnet/buffer.h @@ -143,7 +143,11 @@ typedef struct u8 code; u32 data; } icmp; + + /* IP header - saved by ip*_local nodes */ + void *header; }; + } ip; /* diff --git a/vnet/vnet/ip/ip4_forward.c b/vnet/vnet/ip/ip4_forward.c index fc7b34963fe..38729c8ecfb 100644 --- a/vnet/vnet/ip/ip4_forward.c +++ b/vnet/vnet/ip/ip4_forward.c @@ -1472,9 +1472,12 @@ ip4_local (vlib_main_t * vm, ip0 = vlib_buffer_get_current (p0); ip1 = vlib_buffer_get_current (p1); - fib_index0 = vec_elt (im->fib_index_by_sw_if_index, - vnet_buffer(p0)->sw_if_index[VLIB_RX]); - fib_index1 = vec_elt (im->fib_index_by_sw_if_index, + vnet_buffer (p0)->ip.header = ip0; + vnet_buffer (p1)->ip.header = ip1; + + fib_index0 = vec_elt (im->fib_index_by_sw_if_index, + vnet_buffer (p0)->sw_if_index[VLIB_RX]); + fib_index1 = vec_elt (im->fib_index_by_sw_if_index, vnet_buffer(p1)->sw_if_index[VLIB_RX]); mtrie0 = &ip4_fib_get (fib_index0)->mtrie; @@ -1679,6 +1682,8 @@ ip4_local (vlib_main_t * vm, ip0 = vlib_buffer_get_current (p0); + vnet_buffer (p0)->ip.header = ip0; + fib_index0 = vec_elt (im->fib_index_by_sw_if_index, vnet_buffer(p0)->sw_if_index[VLIB_RX]); @@ -3294,4 +3299,3 @@ VLIB_CLI_COMMAND (set_ip_classify_command, static) = { .function = set_ip_classify_command_fn, }; /* *INDENT-ON* */ - diff --git a/vnet/vnet/ip/udp.h b/vnet/vnet/ip/udp.h index 4de30f1d16f..f8ff777e275 100644 --- a/vnet/vnet/ip/udp.h +++ b/vnet/vnet/ip/udp.h @@ -37,6 +37,7 @@ typedef enum { _ (67, dhcp_to_server) \ _ (68, dhcp_to_client) \ _ (500, ikev2) \ +_ (3784, bfd4) \ _ (4341, lisp_gpe) \ _ (4342, lisp_cp) \ _ (4739, ipfix) \ @@ -49,6 +50,7 @@ _ (6633, vpath_3) #define foreach_udp6_dst_port \ _ (547, dhcpv6_to_server) \ _ (546, dhcpv6_to_client) \ +_ (3784, bfd6) \ _ (4341, lisp_gpe6) \ _ (4342, lisp_cp6) \ _ (4790, vxlan6_gpe) \ diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c index b7753092fc4..a5ac060a8c7 100644 --- a/vpp/vpp-api/api.c +++ b/vpp/vpp-api/api.c @@ -117,6 +117,8 @@ #include <vpp-api/vpe_msg_enum.h> #include <vnet/span/span.h> +#include <vnet/bfd/bfd_main.h> +#include <vnet/bfd/bfd_api.h> #include <vnet/fib/ip6_fib.h> #include <vnet/fib/ip4_fib.h> #include <vnet/fib/fib_api.h> @@ -302,7 +304,12 @@ _(PUNT, punt) \ _(FLOW_CLASSIFY_SET_INTERFACE, flow_classify_set_interface) \ _(FLOW_CLASSIFY_DUMP, flow_classify_dump) \ _(IPSEC_SPD_DUMP, ipsec_spd_dump) \ -_(FEATURE_ENABLE_DISABLE, feature_enable_disable) +_(FEATURE_ENABLE_DISABLE, feature_enable_disable) \ +_(BFD_UDP_ADD, bfd_udp_add) \ +_(BFD_UDP_DEL, bfd_udp_del) \ +_(BFD_UDP_SESSION_DUMP, bfd_udp_session_dump) \ +_(BFD_SESSION_SET_FLAGS, bfd_session_set_flags) \ +_(WANT_BFD_EVENTS, want_bfd_events) #define QUOTE_(x) #x #define QUOTE(x) QUOTE_(x) @@ -343,6 +350,7 @@ vl_api_memclnt_delete_callback (u32 client_index) } pub_sub_handler (oam_events, OAM_EVENTS); +pub_sub_handler (bfd_events, BFD_EVENTS); #define RESOLUTION_EVENT 1 #define RESOLUTION_PENDING_EVENT 2 @@ -6766,6 +6774,163 @@ static void BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_L2_INTERFACE_PBB_TAG_REWRITE_REPLY); + +} + +static void +vl_api_bfd_udp_add_t_handler (vl_api_bfd_udp_add_t * mp) +{ + vl_api_bfd_udp_add_reply_t *rmp; + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + ip46_address_t local_addr; + memset (&local_addr, 0, sizeof (local_addr)); + ip46_address_t peer_addr; + memset (&peer_addr, 0, sizeof (peer_addr)); + if (mp->is_ipv6) + { + clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6)); + clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6)); + } + else + { + clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4)); + clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4)); + } + + rv = bfd_udp_add_session (clib_net_to_host_u32 (mp->sw_if_index), + clib_net_to_host_u32 (mp->desired_min_tx), + clib_net_to_host_u32 (mp->required_min_rx), + mp->detect_mult, &local_addr, &peer_addr); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_BFD_UDP_ADD_REPLY); +} + +static void +vl_api_bfd_udp_del_t_handler (vl_api_bfd_udp_del_t * mp) +{ + vl_api_bfd_udp_del_reply_t *rmp; + int rv; + + VALIDATE_SW_IF_INDEX (mp); + + ip46_address_t local_addr; + memset (&local_addr, 0, sizeof (local_addr)); + ip46_address_t peer_addr; + memset (&peer_addr, 0, sizeof (peer_addr)); + if (mp->is_ipv6) + { + clib_memcpy (&local_addr.ip6, mp->local_addr, sizeof (local_addr.ip6)); + clib_memcpy (&peer_addr.ip6, mp->peer_addr, sizeof (peer_addr.ip6)); + } + else + { + clib_memcpy (&local_addr.ip4, mp->local_addr, sizeof (local_addr.ip4)); + clib_memcpy (&peer_addr.ip4, mp->peer_addr, sizeof (peer_addr.ip4)); + } + + rv = + bfd_udp_del_session (clib_net_to_host_u32 (mp->sw_if_index), &local_addr, + &peer_addr); + + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_BFD_UDP_DEL_REPLY); +} + +void +send_bfd_udp_session_details (unix_shared_memory_queue_t * q, u32 context, + bfd_session_t * bs) +{ + if (bs->transport != BFD_TRANSPORT_UDP4 && + bs->transport != BFD_TRANSPORT_UDP6) + { + return; + } + + vl_api_bfd_udp_session_details_t *mp = vl_msg_api_alloc (sizeof (*mp)); + memset (mp, 0, sizeof (*mp)); + mp->_vl_msg_id = ntohs (VL_API_BFD_UDP_SESSION_DETAILS); + mp->context = context; + mp->bs_index = clib_host_to_net_u32 (bs->bs_idx); + mp->state = bs->local_state; + bfd_udp_session_t *bus = &bs->udp; + bfd_udp_key_t *key = &bus->key; + mp->sw_if_index = clib_host_to_net_u32 (key->sw_if_index); + mp->is_ipv6 = !(ip46_address_is_ip4 (&key->local_addr)); + if (mp->is_ipv6) + { + clib_memcpy (mp->local_addr, &key->local_addr, + sizeof (key->local_addr)); + clib_memcpy (mp->peer_addr, &key->peer_addr, sizeof (key->peer_addr)); + } + else + { + clib_memcpy (mp->local_addr, &key->local_addr.ip4.data, + sizeof (&key->local_addr.ip4.data)); + clib_memcpy (mp->peer_addr, &key->peer_addr.ip4.data, + sizeof (&key->peer_addr.ip4.data)); + } + + vl_msg_api_send_shmem (q, (u8 *) & mp); +} + +void +bfd_event (bfd_main_t * bm, bfd_session_t * bs) +{ + vpe_api_main_t *vam = &vpe_api_main; + vpe_client_registration_t *reg; + unix_shared_memory_queue_t *q; + /* *INDENT-OFF* */ + pool_foreach (reg, vam->bfd_events_registrations, ({ + q = vl_api_client_index_to_input_queue (reg->client_index); + if (q) + { + switch (bs->transport) + { + case BFD_TRANSPORT_UDP4: + /* fallthrough */ + case BFD_TRANSPORT_UDP6: + send_bfd_udp_session_details (q, 0, bs); + } + } + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bfd_udp_session_dump_t_handler (vl_api_bfd_udp_session_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + + q = vl_api_client_index_to_input_queue (mp->client_index); + + if (q == 0) + return; + + bfd_session_t *bs = NULL; + /* *INDENT-OFF* */ + pool_foreach (bs, bfd_main.sessions, ({ + if (bs->transport == BFD_TRANSPORT_UDP4 || + bs->transport == BFD_TRANSPORT_UDP6) + send_bfd_udp_session_details (q, mp->context, bs); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_bfd_session_set_flags_t_handler (vl_api_bfd_session_set_flags_t * mp) +{ + vl_api_bfd_session_set_flags_reply_t *rmp; + int rv; + + rv = + bfd_session_set_flags (clib_net_to_host_u32 (mp->bs_index), + mp->admin_up_down); + + REPLY_MACRO (VL_API_BFD_SESSION_SET_FLAGS_REPLY); } static void @@ -7090,6 +7255,7 @@ vpe_api_init (vlib_main_t * vm) am->to_netconf_client_registration_hash = hash_create (0, sizeof (uword)); am->from_netconf_client_registration_hash = hash_create (0, sizeof (uword)); am->oam_events_registration_hash = hash_create (0, sizeof (uword)); + am->bfd_events_registration_hash = hash_create (0, sizeof (uword)); vl_api_init (vm); vl_set_memory_region_name ("/vpe-api"); diff --git a/vpp/vpp-api/vpe.api b/vpp/vpp-api/vpe.api index 1c33f70bd94..b13c2606667 100644 --- a/vpp/vpp-api/vpe.api +++ b/vpp/vpp-api/vpe.api @@ -4656,6 +4656,191 @@ define feature_enable_disable_reply i32 retval; }; +/** \brief Configure BFD feature + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param slow_timer - slow timer (seconds) + @param min_tx - desired min tx interval + @param min_rx - desired min rx interval + @param detect_mult - desired detection multiplier +*/ +define bfd_set_config { + u32 client_index; + u32 context; + u32 slow_timer; + u32 min_tx; + u32 min_rx; + u8 detect_mult; +}; + +/** \brief Configure BFD feature response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define bfd_set_config_reply { + u32 context; + i32 retval; +}; + +/** \brief Get BFD configuration +*/ +define bfd_get_config { + u32 client_index; + u32 context; +}; + +/** \brief Get BFD configuration response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param slow_timer - slow timer (seconds) + @param min_tx - desired min tx interval + @param min_rx - desired min rx interval + @param detect_mult - desired detection multiplier +*/ +define bfd_get_config_reply { + u32 client_index; + u32 context; + u32 slow_timer; + u32 min_tx; + u32 min_rx; + u8 detect_mult; +}; + +/** \brief Add UDP BFD session on interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface + @param desired_min_tx - desired min transmit interval (microseconds) + @param required_min_rx - required min receive interval (microseconds) + @param detect_mult - detect multiplier (# of packets missed between connection goes down) + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 +*/ +define bfd_udp_add { + u32 client_index; + u32 context; + u32 sw_if_index; + u32 desired_min_tx; + u32 required_min_rx; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; + u8 detect_mult; +}; + +/** \brief Add UDP BFD session response + @param context - sender context, to match reply w/ request + @param retval - return code for the request + @param bs_index - index of the session created +*/ +define bfd_udp_add_reply { + u32 context; + i32 retval; + u32 bs_index; +}; + +/** \brief Delete UDP BFD session on interface + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param sw_if_index - sw index of the interface + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 +*/ +define bfd_udp_del { + u32 client_index; + u32 context; + u32 sw_if_index; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; +}; + +/** \brief Delete UDP BFD session response + @param context - sender context, to match reply w/ request + @param retval - return code for the request +*/ +define bfd_udp_del_reply { + u32 context; + i32 retval; +}; + +/** \brief Get all BFD sessions + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define bfd_udp_session_dump { + u32 client_index; + u32 context; +}; + +/** \brief BFD session details structure + @param context - sender context, to match reply w/ request + @param bs_index - index of the session + @param sw_if_index - sw index of the interface + @param local_addr - local address + @param peer_addr - peer address + @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 + @param state - session state +*/ +define bfd_udp_session_details { + u32 context; + u32 bs_index; + u32 sw_if_index; + u8 local_addr[16]; + u8 peer_addr[16]; + u8 is_ipv6; + u8 state; +}; + +/** \brief Set flags of BFD session + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bs_index - index of the bfd session to set flags on + @param admin_up_down - set the admin state, 1 = up, 0 = down +*/ +define bfd_session_set_flags { + u32 client_index; + u32 context; + u32 bs_index; + u8 admin_up_down; +}; + +/** \brief Reply to bfd_session_set_flags + @param context - sender context which was passed in the request + @param retval - return code of the set flags request +*/ +define bfd_session_set_flags_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Register for BFD events + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param enable_disable - 1 => register for events, 0 => cancel registration + @param pid - sender's pid +*/ +define want_bfd_events +{ + u32 client_index; + u32 context; + u32 enable_disable; + u32 pid; +}; + +/** \brief Reply for BFD events registration + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define want_bfd_events_reply +{ + u32 context; + i32 retval; +}; + /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/vppinfra/vppinfra/mhash.c b/vppinfra/vppinfra/mhash.c index 84517e191bd..c917e164cd9 100644 --- a/vppinfra/vppinfra/mhash.c +++ b/vppinfra/vppinfra/mhash.c @@ -223,7 +223,7 @@ mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes) } static uword -mhash_set_tmp_key (mhash_t * h, void *key) +mhash_set_tmp_key (mhash_t * h, const void *key) { u8 *key_tmp; int my_cpu = os_get_cpu_number (); @@ -251,7 +251,7 @@ mhash_set_tmp_key (mhash_t * h, void *key) } hash_pair_t * -mhash_get_pair (mhash_t * h, void *key) +mhash_get_pair (mhash_t * h, const void *key) { uword ikey; mhash_sanitize_hash_user (h); diff --git a/vppinfra/vppinfra/mhash.h b/vppinfra/vppinfra/mhash.h index 8ce8454b0a4..102adf4eb83 100644 --- a/vppinfra/vppinfra/mhash.h +++ b/vppinfra/vppinfra/mhash.h @@ -101,13 +101,13 @@ mhash_key_to_mem (mhash_t * h, uword key) return vec_elt_at_index (h->key_vector_or_heap, key); } -hash_pair_t *mhash_get_pair (mhash_t * h, void *key); +hash_pair_t *mhash_get_pair (mhash_t * h, const void *key); uword mhash_set_mem (mhash_t * h, void *key, uword * new_value, uword * old_value); uword mhash_unset (mhash_t * h, void *key, uword * old_value); always_inline uword * -mhash_get (mhash_t * h, void *key) +mhash_get (mhash_t * h, const void *key) { hash_pair_t *p = mhash_get_pair (h, key); return p ? &p->value[0] : 0; |