From 5d73eecd63018db69b10bf56adeec9cc5cf92790 Mon Sep 17 00:00:00 2001 From: Pablo Camarillo Date: Mon, 24 Apr 2017 17:51:56 +0200 Subject: First commit SR MPLS Change-Id: I961685a2a0e4c314049444c64eb6ccf877c278dd Signed-off-by: Pablo Camarillo --- src/vnet/srv6/dir.dox | 25 + src/vnet/srv6/ietf_draft_05.txt | 1564 ++++++++++++++++++ src/vnet/srv6/sr.api | 168 ++ src/vnet/srv6/sr.c | 57 + src/vnet/srv6/sr.h | 325 ++++ src/vnet/srv6/sr_api.c | 244 +++ src/vnet/srv6/sr_doc.md | 55 + src/vnet/srv6/sr_localsid.c | 1492 +++++++++++++++++ src/vnet/srv6/sr_localsid.md | 58 + src/vnet/srv6/sr_packet.h | 159 ++ src/vnet/srv6/sr_policy.md | 56 + src/vnet/srv6/sr_policy_rewrite.c | 3227 +++++++++++++++++++++++++++++++++++++ src/vnet/srv6/sr_steering.c | 573 +++++++ src/vnet/srv6/sr_steering.md | 11 + 14 files changed, 8014 insertions(+) create mode 100755 src/vnet/srv6/dir.dox create mode 100755 src/vnet/srv6/ietf_draft_05.txt create mode 100644 src/vnet/srv6/sr.api create mode 100755 src/vnet/srv6/sr.c create mode 100755 src/vnet/srv6/sr.h create mode 100644 src/vnet/srv6/sr_api.c create mode 100644 src/vnet/srv6/sr_doc.md create mode 100755 src/vnet/srv6/sr_localsid.c create mode 100644 src/vnet/srv6/sr_localsid.md create mode 100755 src/vnet/srv6/sr_packet.h create mode 100644 src/vnet/srv6/sr_policy.md create mode 100755 src/vnet/srv6/sr_policy_rewrite.c create mode 100755 src/vnet/srv6/sr_steering.c create mode 100644 src/vnet/srv6/sr_steering.md (limited to 'src/vnet/srv6') diff --git a/src/vnet/srv6/dir.dox b/src/vnet/srv6/dir.dox new file mode 100755 index 00000000..3f539a58 --- /dev/null +++ b/src/vnet/srv6/dir.dox @@ -0,0 +1,25 @@ +/* + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + @dir + @brief Segment Routing code + + An implementation of Segment Routing as per: + draft-ietf-6man-segment-routing-header-05 + + @see ietf_draft_05.txt + +*/ \ No newline at end of file diff --git a/src/vnet/srv6/ietf_draft_05.txt b/src/vnet/srv6/ietf_draft_05.txt new file mode 100755 index 00000000..e9bff04f --- /dev/null +++ b/src/vnet/srv6/ietf_draft_05.txt @@ -0,0 +1,1564 @@ +Network Working Group S. Previdi, Ed. +Internet-Draft C. Filsfils +Intended status: Standards Track Cisco Systems, Inc. +Expires: August 5, 2017 B. Field + Comcast + I. Leung + Rogers Communications + J. Linkova + Google + E. Aries + Facebook + T. Kosugi + NTT + E. Vyncke + Cisco Systems, Inc. + D. Lebrun + Universite Catholique de Louvain + February 1, 2017 + + + IPv6 Segment Routing Header (SRH) + draft-ietf-6man-segment-routing-header-05 + +Abstract + + Segment Routing (SR) allows a node to steer a packet through a + controlled set of instructions, called segments, by prepending an SR + header to the packet. A segment can represent any instruction, + topological or service-based. SR allows to enforce a flow through + any path (topological, or application/service based) while + maintaining per-flow state only at the ingress node to the SR domain. + + Segment Routing can be applied to the IPv6 data plane with the + addition of a new type of Routing Extension Header. This draft + describes the Segment Routing Extension Header Type and how it is + used by SR capable nodes. + +Requirements Language + + The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", + "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this + document are to be interpreted as described in RFC 2119 [RFC2119]. + +Status of This Memo + + This Internet-Draft is submitted in full conformance with the + provisions of BCP 78 and BCP 79. + + + + +Previdi, et al. Expires August 5, 2017 [Page 1] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + Internet-Drafts are working documents of the Internet Engineering + Task Force (IETF). Note that other groups may also distribute + working documents as Internet-Drafts. The list of current Internet- + Drafts is at http://datatracker.ietf.org/drafts/current/. + + Internet-Drafts are draft documents valid for a maximum of six months + and may be updated, replaced, or obsoleted by other documents at any + time. It is inappropriate to use Internet-Drafts as reference + material or to cite them other than as "work in progress." + + This Internet-Draft will expire on August 5, 2017. + +Copyright Notice + + Copyright (c) 2017 IETF Trust and the persons identified as the + document authors. All rights reserved. + + This document is subject to BCP 78 and the IETF Trust's Legal + Provisions Relating to IETF Documents + (http://trustee.ietf.org/license-info) in effect on the date of + publication of this document. Please review these documents + carefully, as they describe your rights and restrictions with respect + to this document. Code Components extracted from this document must + include Simplified BSD License text as described in Section 4.e of + the Trust Legal Provisions and are provided without warranty as + described in the Simplified BSD License. + +Table of Contents + + 1. Segment Routing Documents . . . . . . . . . . . . . . . . . . 3 + 2. Introduction . . . . . . . . . . . . . . . . . . . . . . . . 3 + 2.1. Data Planes supporting Segment Routing . . . . . . . . . 4 + 2.2. Segment Routing (SR) Domain . . . . . . . . . . . . . . . 4 + 2.2.1. SR Domain in a Service Provider Network . . . . . . . 5 + 2.2.2. SR Domain in a Overlay Network . . . . . . . . . . . 6 + 3. Segment Routing Extension Header (SRH) . . . . . . . . . . . 7 + 3.1. SRH TLVs . . . . . . . . . . . . . . . . . . . . . . . . 9 + 3.1.1. Ingress Node TLV . . . . . . . . . . . . . . . . . . 10 + 3.1.2. Egress Node TLV . . . . . . . . . . . . . . . . . . . 11 + 3.1.3. Opaque Container TLV . . . . . . . . . . . . . . . . 11 + 3.1.4. Padding TLV . . . . . . . . . . . . . . . . . . . . . 12 + 3.1.5. HMAC TLV . . . . . . . . . . . . . . . . . . . . . . 13 + 3.2. SRH and RFC2460 behavior . . . . . . . . . . . . . . . . 14 + 4. SRH Procedures . . . . . . . . . . . . . . . . . . . . . . . 14 + 4.1. Source SR Node . . . . . . . . . . . . . . . . . . . . . 14 + 4.2. Transit Node . . . . . . . . . . . . . . . . . . . . . . 15 + 4.3. SR Segment Endpoint Node . . . . . . . . . . . . . . . . 16 + 5. Security Considerations . . . . . . . . . . . . . . . . . . . 16 + + + +Previdi, et al. Expires August 5, 2017 [Page 2] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + 5.1. Threat model . . . . . . . . . . . . . . . . . . . . . . 17 + 5.1.1. Source routing threats . . . . . . . . . . . . . . . 17 + 5.1.2. Applicability of RFC 5095 to SRH . . . . . . . . . . 17 + 5.1.3. Service stealing threat . . . . . . . . . . . . . . . 18 + 5.1.4. Topology disclosure . . . . . . . . . . . . . . . . . 18 + 5.1.5. ICMP Generation . . . . . . . . . . . . . . . . . . . 18 + 5.2. Security fields in SRH . . . . . . . . . . . . . . . . . 19 + 5.2.1. Selecting a hash algorithm . . . . . . . . . . . . . 20 + 5.2.2. Performance impact of HMAC . . . . . . . . . . . . . 21 + 5.2.3. Pre-shared key management . . . . . . . . . . . . . . 21 + 5.3. Deployment Models . . . . . . . . . . . . . . . . . . . . 22 + 5.3.1. Nodes within the SR domain . . . . . . . . . . . . . 22 + 5.3.2. Nodes outside of the SR domain . . . . . . . . . . . 22 + 5.3.3. SR path exposure . . . . . . . . . . . . . . . . . . 23 + 5.3.4. Impact of BCP-38 . . . . . . . . . . . . . . . . . . 23 + 6. IANA Considerations . . . . . . . . . . . . . . . . . . . . . 24 + 7. Manageability Considerations . . . . . . . . . . . . . . . . 24 + 8. Contributors . . . . . . . . . . . . . . . . . . . . . . . . 24 + 9. Acknowledgements . . . . . . . . . . . . . . . . . . . . . . 24 + 10. References . . . . . . . . . . . . . . . . . . . . . . . . . 25 + 10.1. Normative References . . . . . . . . . . . . . . . . . . 25 + 10.2. Informative References . . . . . . . . . . . . . . . . . 25 + Authors' Addresses . . . . . . . . . . . . . . . . . . . . . . . 27 + +1. Segment Routing Documents + + Segment Routing terminology is defined in + [I-D.ietf-spring-segment-routing]. + + Segment Routing use cases are described in [RFC7855] and + [I-D.ietf-spring-ipv6-use-cases]. + + Segment Routing protocol extensions are defined in + [I-D.ietf-isis-segment-routing-extensions], and + [I-D.ietf-ospf-ospfv3-segment-routing-extensions]. + +2. Introduction + + Segment Routing (SR), defined in [I-D.ietf-spring-segment-routing], + allows a node to steer a packet through a controlled set of + instructions, called segments, by prepending an SR header to the + packet. A segment can represent any instruction, topological or + service-based. SR allows to enforce a flow through any path + (topological or service/application based) while maintaining per-flow + state only at the ingress node to the SR domain. Segments can be + derived from different components: IGP, BGP, Services, Contexts, + Locators, etc. The list of segment forming the path is called the + Segment List and is encoded in the packet header. + + + +Previdi, et al. Expires August 5, 2017 [Page 3] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + SR allows the use of strict and loose source based routing paradigms + without requiring any additional signaling protocols in the + infrastructure hence delivering an excellent scalability property. + + The source based routing model described in + [I-D.ietf-spring-segment-routing] is inherited from the ones proposed + by [RFC1940] and [RFC2460]. The source based routing model offers + the support for explicit routing capability. + +2.1. Data Planes supporting Segment Routing + + Segment Routing (SR), can be instantiated over MPLS + ([I-D.ietf-spring-segment-routing-mpls]) and IPv6. This document + defines its instantiation over the IPv6 data-plane based on the use- + cases defined in [I-D.ietf-spring-ipv6-use-cases]. + + This document defines a new type of Routing Header (originally + defined in [RFC2460]) called the Segment Routing Header (SRH) in + order to convey the Segment List in the packet header as defined in + [I-D.ietf-spring-segment-routing]. Mechanisms through which segment + are known and advertised are outside the scope of this document. + + A segment is materialized by an IPv6 address. A segment identifies a + topological instruction or a service instruction. A segment can be + either: + + o global: a global segment represents an instruction supported by + all nodes in the SR domain and it is instantiated through an IPv6 + address globally known in the SR domain. + + o local: a local segment represents an instruction supported only by + the node who originates it and it is instantiated through an IPv6 + address that is known only by the local node. + +2.2. Segment Routing (SR) Domain + + We define the concept of the Segment Routing Domain (SR Domain) as + the set of nodes participating into the source based routing model. + These nodes may be connected to the same physical infrastructure + (e.g.: a Service Provider's network) as well as nodes remotely + connected to each other (e.g.: an enterprise VPN or an overlay). + + A non-exhaustive list of examples of SR Domains is: + + o The network of an operator, service provider, content provider, + enterprise including nodes, links and Autonomous Systems. + + + + + +Previdi, et al. Expires August 5, 2017 [Page 4] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o A set of nodes connected as an overlay over one or more transit + providers. The overlay nodes exchange SR-enabled traffic with + segments belonging solely to the overlay routers (the SR domain). + None of the segments in the SR-enabled packets exchanged by the + overlay belong to the transit networks + + The source based routing model through its instantiation of the + Segment Routing Header (SRH) defined in this document equally applies + to all the above examples. + + It is assumed in this document that the SRH is added to the packet by + its source, consistently with the source routing model defined in + [RFC2460]. For example: + + o At the node originating the packet (host, server). + + o At the ingress node of an SR domain where the ingress node + receives an IPv6 packet and encapsulates it into an outer IPv6 + header followed by a Segment Routing header. + +2.2.1. SR Domain in a Service Provider Network + + The following figure illustrates an SR domain consisting of an + operator's network infrastructure. + + (-------------------------- Operator 1 -----------------------) + ( ) + ( (-----AS 1-----) (-------AS 2-------) (----AS 3-------) ) + ( ( ) ( ) ( ) ) + A1--(--(--11---13--14-)--(-21---22---23--24-)--(-31---32---34--)--)--Z1 + ( ( /|\ /|\ /| ) ( |\ /|\ /|\ /| ) ( |\ /|\ /| \ ) ) + A2--(--(/ | \/ | \/ | ) ( | \/ | \/ | \/ | ) ( | \/ | \/ | \)--)--Z2 + ( ( | /\ | /\ | ) ( | /\ | /\ | /\ | ) ( | /\ | /\ | ) ) + ( ( |/ \|/ \| ) ( |/ \|/ \|/ \| ) ( |/ \|/ \| ) ) + A3--(--(--15---17--18-)--(-25---26---27--28-)--(-35---36---38--)--)--Z3 + ( ( ) ( ) ( ) ) + ( (--------------) (------------------) (---------------) ) + ( ) + (-------------------------------------------------------------) + + Figure 1: Service Provider SR Domain + + Figure 1 describes an operator network including several ASes and + delivering connectivity between endpoints. In this scenario, Segment + Routing is used within the operator networks and across the ASes + boundaries (all being under the control of the same operator). In + this case segment routing can be used in order to address use cases + such as end-to-end traffic engineering, fast re-route, egress peer + + + +Previdi, et al. Expires August 5, 2017 [Page 5] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + engineering, data-center traffic engineering as described in + [RFC7855], [I-D.ietf-spring-ipv6-use-cases] and + [I-D.ietf-spring-resiliency-use-cases]. + + Typically, an IPv6 packet received at ingress (i.e.: from outside the + SR domain), is classified according to network operator policies and + such classification results into an outer header with an SRH applied + to the incoming packet. The SRH contains the list of segment + representing the path the packet must take inside the SR domain. + Thus, the SA of the packet is the ingress node, the DA (due to SRH + procedures described in Section 4) is set as the first segment of the + path and the last segment of the path is the egress node of the SR + domain. + + The path may include intra-AS as well as inter-AS segments. It has + to be noted that all nodes within the SR domain are under control of + the same administration. When the packet reaches the egress point of + the SR domain, the outer header and its SRH are removed so that the + destination of the packet is unaware of the SR domain the packet has + traversed. + + The outer header with the SRH is no different from any other + tunneling encapsulation mechanism and allows a network operator to + implement traffic engineering mechanisms so to efficiently steer + traffic across his infrastructure. + +2.2.2. SR Domain in a Overlay Network + + The following figure illustrates an SR domain consisting of an + overlay network over multiple operator's networks. + + (--Operator 1---) (-----Operator 2-----) (--Operator 3---) + ( ) ( ) ( ) + A1--(--11---13--14--)--(--21---22---23--24--)--(-31---32---34--)--C1 + ( /|\ /|\ /| ) ( |\ /|\ /|\ /| ) ( |\ /|\ /| \ ) + A2--(/ | \/ | \/ | ) ( | \/ | \/ | \/ | ) ( | \/ | \/ | \)--C2 + ( | /\ | /\ | ) ( | /\ | /\ | /\ | ) ( | /\ | /\ | ) + ( |/ \|/ \| ) ( |/ \|/ \|/ \| ) ( |/ \|/ \| ) + A3--(--15---17--18--)--(--25---26---27--28--)--(-35---36---38--)--C3 + ( ) ( | | | ) ( ) + (---------------) (--|----|---------|--) (---------------) + | | | + B1 B2 B3 + + Figure 2: Overlay SR Domain + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 6] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + Figure 2 describes an overlay consisting of nodes connected to three + different network operators and forming a single overlay network + where Segment routing packets are exchanged. + + The overlay consists of nodes A1, A2, A3, B1, B2, B3, C1, C2 and C3. + These nodes are connected to their respective network operator and + form an overlay network. + + Each node may originate packets with an SRH which contains, in the + segment list of the SRH or in the DA, segments identifying other + overlay nodes. This implies that packets with an SRH may traverse + operator's networks but, obviously, these SRHs cannot contain an + address/segment of the transit operators 1, 2 and 3. The SRH + originated by the overlay can only contain address/segment under the + administration of the overlay (e.g. address/segments supported by A1, + A2, A3, B1, B2, B3, C1,C2 or C3). + + In this model, the operator network nodes are transit nodes and, + according to [RFC2460], MUST NOT inspect the routing extension header + since they are not the DA of the packet. + + It is a common practice in operators networks to filter out, at + ingress, any packet whose DA is the address of an internal node and + it is also possible that an operator would filter out any packet + destined to an internal address and having an extension header in it. + + This common practice does not impact the SR-enabled traffic between + the overlay nodes as the intermediate transit networks never see a + destination address belonging to their infrastructure. These SR- + enabled overlay packets will thus never be filtered by the transit + operators. + + In all cases, transit packets (i.e.: packets whose DA is outside the + domain of the operator's network) will be forwarded accordingly + without introducing any security concern in the operator's network. + This is similar to tunneled packets. + +3. Segment Routing Extension Header (SRH) + + A new type of the Routing Header (originally defined in [RFC2460]) is + defined: the Segment Routing Header (SRH) which has a new Routing + Type, (suggested value 4) to be assigned by IANA. + + The Segment Routing Header (SRH) is defined as follows: + + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 7] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Next Header | Hdr Ext Len | Routing Type | Segments Left | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | First Segment | Flags | RESERVED | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | Segment List[0] (128 bits IPv6 address) | + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | | + ... + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | Segment List[n] (128 bits IPv6 address) | + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // // + // Optional Type Length Value objects (variable) // + // // + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Next Header: 8-bit selector. Identifies the type of header + immediately following the SRH. + + o Hdr Ext Len: 8-bit unsigned integer, is the length of the SRH + header in 8-octet units, not including the first 8 octets. + + o Routing Type: TBD, to be assigned by IANA (suggested value: 4). + + o Segments Left. Defined in [RFC2460], it contains the index, in + the Segment List, of the next segment to inspect. Segments Left + is decremented at each segment. + + o First Segment: contains the index, in the Segment List, of the + first segment of the path which is in fact the last element of the + Segment List. + + o Flags: 8 bits of flags. Following flags are defined: + + + + +Previdi, et al. Expires August 5, 2017 [Page 8] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + 0 1 2 3 4 5 6 7 + +-+-+-+-+-+-+-+-+ + |U|P|O|A|H| U | + +-+-+-+-+-+-+-+-+ + + U: Unused and for future use. SHOULD be unset on transmission + and MUST be ignored on receipt. + + P-flag: Protected flag. Set when the packet has been rerouted + through FRR mechanism by an SR endpoint node. + + O-flag: OAM flag. When set, it indicates that this packet is + an operations and management (OAM) packet. + + A-flag: Alert flag. If present, it means important Type Length + Value (TLV) objects are present. See Section 3.1 for details + on TLVs objects. + + H-flag: HMAC flag. If set, the HMAC TLV is present and is + encoded as the last TLV of the SRH. In other words, the last + 36 octets of the SRH represent the HMAC information. See + Section 3.1.5 for details on the HMAC TLV. + + o RESERVED: SHOULD be unset on transmission and MUST be ignored on + receipt. + + o Segment List[n]: 128 bit IPv6 addresses representing the nth + segment in the Segment List. The Segment List is encoded starting + from the last segment of the path. I.e., the first element of the + segment list (Segment List [0]) contains the last segment of the + path while the last segment of the Segment List (Segment List[n]) + contains the first segment of the path. The index contained in + "Segments Left" identifies the current active segment. + + o Type Length Value (TLV) are described in Section 3.1. + +3.1. SRH TLVs + + This section defines TLVs of the Segment Routing Header. + + Type Length Value (TLV) contain optional information that may be used + by the node identified in the DA of the packet. It has to be noted + that the information carried in the TLVs is not intended to be used + by the routing layer. Typically, TLVs carry information that is + consumed by other components (e.g.: OAM) than the routing function. + + Each TLV has its own length, format and semantic. The code-point + allocated (by IANA) to each TLV defines both the format and the + + + +Previdi, et al. Expires August 5, 2017 [Page 9] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + semantic of the information carried in the TLV. Multiple TLVs may be + encoded in the same SRH. + + The "Length" field of the TLV is primarily used to skip the TLV while + inspecting the SRH in case the node doesn't support or recognize the + TLV codepoint. The "Length" defines the TLV length in octets and not + including the "Type" and "Length" fields. + + The primary scope of TLVs is to give the receiver of the packet + information related to the source routed path (e.g.: where the packet + entered in the SR domain and where it is expected to exit). + + Additional TLVs may be defined in the future. + +3.1.1. Ingress Node TLV + + The Ingress Node TLV is optional and identifies the node this packet + traversed when entered the SR domain. The Ingress Node TLV has + following format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Type | Length | RESERVED | Flags | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | Ingress Node (16 octets) | + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Type: to be assigned by IANA (suggested value 1). + + o Length: 18. + + o RESERVED: 8 bits. SHOULD be unset on transmission and MUST be + ignored on receipt. + + o Flags: 8 bits. No flags are defined in this document. + + o Ingress Node: 128 bits. Defines the node where the packet is + expected to enter the SR domain. In the encapsulation case + described in Section 2.2.1, this information corresponds to the SA + of the encapsulating header. + + + + + +Previdi, et al. Expires August 5, 2017 [Page 10] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + +3.1.2. Egress Node TLV + + The Egress Node TLV is optional and identifies the node this packet + is expected to traverse when exiting the SR domain. The Egress Node + TLV has following format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Type | Length | RESERVED | Flags | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | Egress Node (16 octets) | + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Type: to be assigned by IANA (suggested value 2). + + o Length: 18. + + o RESERVED: 8 bits. SHOULD be unset on transmission and MUST be + ignored on receipt. + + o Flags: 8 bits. No flags are defined in this document. + + o Egress Node: 128 bits. Defines the node where the packet is + expected to exit the SR domain. In the encapsulation case + described in Section 2.2.1, this information corresponds to the + last segment of the SRH in the encapsulating header. + +3.1.3. Opaque Container TLV + + The Opaque Container TLV is optional and has the following format: + + + + + + + + + + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 11] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Type | Length | RESERVED | Flags | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | | + | Opaque Container (16 octets) | + | | + | | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Type: to be assigned by IANA (suggested value 3). + + o Length: 18. + + o RESERVED: 8 bits. SHOULD be unset on transmission and MUST be + ignored on receipt. + + o Flags: 8 bits. No flags are defined in this document. + + o Opaque Container: 128 bits of opaque data not relevant for the + routing layer. Typically, this information is consumed by a non- + routing component of the node receiving the packet (i.e.: the node + in the DA). + +3.1.4. Padding TLV + + The Padding TLV is optional and with the purpose of aligning the SRH + on a 8 octet boundary. The Padding TLV has the following format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Type | Length | Padding (variable) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + // Padding (variable) // + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Type: to be assigned by IANA (suggested value 4). + + o Length: 1 to 7 + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 12] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o Padding: from 1 to 7 octets of padding. Padding bits have no + semantic. They SHOULD be set to 0 on transmission and MUST be + ignored on receipt. + + The following applies to the Padding TLV: + + o Padding TLV is optional and MAY only appear once in the SRH. If + present, it MUST have a length between 1 and 7 octets. + + o The Padding TLV is used in order to align the SRH total length on + the 8 octet boundary. + + o When present, the Padding TLV MUST appear as the last TLV before + the HMAC TLV (if HMAC TLV is present). + + o When present, the Padding TLV MUST have a length from 1 to 7 in + order to align the SRH total lenght on a 8-octet boundary. + + o When a router inspecting the SRH encounters the Padding TLV, it + MUST assume that no other TLV (other than the HMAC) follow the + Padding TLV. + +3.1.5. HMAC TLV + + HMAC TLV is optional and contains the HMAC information. The HMAC TLV + has the following format: + + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Type | Length | RESERVED | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | HMAC Key ID (4 octets) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | // + | HMAC (32 octets) // + | // + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + where: + + o Type: to be assigned by IANA (suggested value 5). + + o Length: 38. + + o RESERVED: 2 octets. SHOULD be unset on transmission and MUST be + ignored on receipt. + + + + +Previdi, et al. Expires August 5, 2017 [Page 13] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o HMAC Key ID: 4 octets. + + o HMAC: 32 octets. + + o HMAC and HMAC Key ID usage is described in Section 5 + + The Following applies to the HMAC TLV: + + o When present, the HMAC TLV MUST be encoded as the last TLV of the + SRH. + + o If the HMAC TLV is present, the SRH H-Flag (Figure 4) MUST be set. + + o When the H-flag is set in the SRH, the router inspecting the SRH + MUST find the HMAC TLV in the last 38 octets of the SRH. + +3.2. SRH and RFC2460 behavior + + The SRH being a new type of the Routing Header, it also has the same + properties: + + SHOULD only appear once in the packet. + + Only the router whose address is in the DA field of the packet + header MUST inspect the SRH. + + Therefore, Segment Routing in IPv6 networks implies that the segment + identifier (i.e.: the IPv6 address of the segment) is moved into the + DA of the packet. + + The DA of the packet changes at each segment termination/completion + and therefore the final DA of the packet MUST be encoded as the last + segment of the path. + +4. SRH Procedures + + In this section we describe the different procedures on the SRH. + +4.1. Source SR Node + + A Source SR Node can be any node originating an IPv6 packet with its + IPv6 and Segment Routing Headers. This include either: + + A host originating an IPv6 packet. + + An SR domain ingress router encapsulating a received IPv6 packet + into an outer IPv6 header followed by an SRH. + + + + +Previdi, et al. Expires August 5, 2017 [Page 14] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + The mechanism through which a Segment List is derived is outside of + the scope of this document. As an example, the Segment List may be + obtained through: + + Local path computation. + + Local configuration. + + Interaction with a centralized controller delivering the path. + + Any other mechanism. + + The following are the steps of the creation of the SRH: + + Next Header and Hdr Ext Len fields are set according to [RFC2460]. + + Routing Type field is set as TBD (to be allocated by IANA, + suggested value 4). + + The Segment List is built with the FIRST segment of the path + encoded in the LAST element of the Segment List. Subsequent + segments are encoded on top of the first segment. Finally, the + LAST segment of the path is encoded in the FIRST element of the + Segment List. In other words, the Segment List is encoded in the + reverse order of the path. + + The final DA of the packet is encoded as the last segment of the + path (encoded in the first element of the Segment List). + + The DA of the packet is set with the value of the first segment + (found in the last element of the segment list). + + The Segments Left field is set to n-1 where n is the number of + elements in the Segment List. + + The First Segment field is set to n-1 where n is the number of + elements in the Segment List. + + The packet is sent out towards the first segment (i.e.: + represented in the packet DA). + + HMAC TLV may be set according to Section 5. + +4.2. Transit Node + + According to [RFC2460], the only node who is allowed to inspect the + Routing Extension Header (and therefore the SRH), is the node + corresponding to the DA of the packet. Any other transit node MUST + + + +Previdi, et al. Expires August 5, 2017 [Page 15] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + NOT inspect the underneath routing header and MUST forward the packet + towards the DA and according to the IPv6 routing table. + + In the example case described in Section 2.2.2, when SR capable nodes + are connected through an overlay spanning multiple third-party + infrastructure, it is safe to send SRH packets (i.e.: packet having a + Segment Routing Header) between each other overlay/SR-capable nodes + as long as the segment list does not include any of the transit + provider nodes. In addition, as a generic security measure, any + service provider will block any packet destined to one of its + internal routers, especially if these packets have an extended header + in it. + +4.3. SR Segment Endpoint Node + + The SR segment endpoint node is the node whose address is in the DA. + The segment endpoint node inspects the SRH and does: + + 1. IF DA = myself (segment endpoint) + 2. IF Segments Left > 0 THEN + decrement Segments Left + update DA with Segment List[Segments Left] + 3. ELSE continue IPv6 processing of the packet + End of processing. + 4. Forward the packet out + +5. Security Considerations + + This section analyzes the security threat model, the security issues + and proposed solutions related to the new Segment Routing Header. + + The Segment Routing Header (SRH) is simply another type of the + routing header as described in RFC 2460 [RFC2460] and is: + + o Added by an SR edge router when entering the segment routing + domain or by the originating host itself. The source host can + even be outside the SR domain; + + o inspected and acted upon when reaching the destination address of + the IP header per RFC 2460 [RFC2460]. + + Per RFC2460 [RFC2460], routers on the path that simply forward an + IPv6 packet (i.e. the IPv6 destination address is none of theirs) + will never inspect and process the content of the SRH. Routers whose + one interface IPv6 address equals the destination address field of + the IPv6 packet MUST parse the SRH and, if supported and if the local + configuration allows it, MUST act accordingly to the SRH content. + + + + +Previdi, et al. Expires August 5, 2017 [Page 16] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + According to RFC2460 [RFC2460], the default behavior of a non SR- + capable router upon receipt of an IPv6 packet with SRH destined to an + address of its, is to: + + o ignore the SRH completely if the Segment Left field is 0 and + proceed to process the next header in the IPv6 packet; + + o discard the IPv6 packet if Segment Left field is greater than 0, + it MAY send a Parameter Problem ICMP message back to the Source + Address. + +5.1. Threat model + +5.1.1. Source routing threats + + Using an SRH is similar to source routing, therefore it has some + well-known security issues as described in RFC4942 [RFC4942] section + 2.1.1 and RFC5095 [RFC5095]: + + o amplification attacks: where a packet could be forged in such a + way to cause looping among a set of SR-enabled routers causing + unnecessary traffic, hence a Denial of Service (DoS) against + bandwidth; + + o reflection attack: where a hacker could force an intermediate node + to appear as the immediate attacker, hence hiding the real + attacker from naive forensic; + + o bypass attack: where an intermediate node could be used as a + stepping stone (for example in a De-Militarized Zone) to attack + another host (for example in the datacenter or any back-end + server). + +5.1.2. Applicability of RFC 5095 to SRH + + First of all, the reader must remember this specific part of section + 1 of RFC5095 [RFC5095], "A side effect is that this also eliminates + benign RH0 use-cases; however, such applications may be facilitated + by future Routing Header specifications.". In short, it is not + forbidden to create new secure type of Routing Header; for example, + RFC 6554 (RPL) [RFC6554] also creates a new Routing Header type for a + specific application confined in a single network. + + In the segment routing architecture described in + [I-D.ietf-spring-segment-routing] there are basically two kinds of + nodes (routers and hosts): + + + + + +Previdi, et al. Expires August 5, 2017 [Page 17] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o nodes within the SR domain, which is within one single + administrative domain, i.e., where all nodes are trusted anyway + else the damage caused by those nodes could be worse than + amplification attacks: traffic interception, man-in-the-middle + attacks, more server DoS by dropping packets, and so on. + + o nodes outside of the SR domain, which is outside of the + administrative segment routing domain hence they cannot be trusted + because there is no physical security for those nodes, i.e., they + can be replaced by hostile nodes or can be coerced in wrong + behaviors. + + The main use case for SR consists of the single administrative domain + where only trusted nodes with SR enabled and configured participate + in SR: this is the same model as in RFC6554 [RFC6554]. All non- + trusted nodes do not participate as either SR processing is not + enabled by default or because they only process SRH from nodes within + their domain. + + Moreover, all SR nodes ignore SRH created by outsiders based on + topology information (received on a peering or internal interface) or + on presence and validity of the HMAC field. Therefore, if + intermediate nodes ONLY act on valid and authorized SRH (such as + within a single administrative domain), then there is no security + threat similar to RH-0. Hence, the RFC 5095 [RFC5095] attacks are + not applicable. + +5.1.3. Service stealing threat + + Segment routing is used for added value services, there is also a + need to prevent non-participating nodes to use those services; this + is called 'service stealing prevention'. + +5.1.4. Topology disclosure + + The SRH may also contains IPv6 addresses of some intermediate SR- + nodes in the path towards the destination, this obviously reveals + those addresses to the potentially hostile attackers if those + attackers are able to intercept packets containing SRH. On the other + hand, if the attacker can do a traceroute whose probes will be + forwarded along the SR path, then there is little learned by + intercepting the SRH itself. + +5.1.5. ICMP Generation + + Per section 4.4 of RFC2460 [RFC2460], when destination nodes (i.e. + where the destination address is one of theirs) receive a Routing + Header with unsupported Routing Type, the required behavior is: + + + +Previdi, et al. Expires August 5, 2017 [Page 18] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o If Segments Left is zero, the node must ignore the Routing header + and proceed to process the next header in the packet. + + o If Segments Left is non-zero, the node must discard the packet and + send an ICMP Parameter Problem, Code 0, message to the packet's + Source Address, pointing to the unrecognized Routing Type. + + This required behavior could be used by an attacker to force the + generation of ICMP message by any node. The attacker could send + packets with SRH (with Segment Left set to 0) destined to a node not + supporting SRH. Per RFC2460 [RFC2460], the destination node could + generate an ICMP message, causing a local CPU utilization and if the + source of the offending packet with SRH was spoofed could lead to a + reflection attack without any amplification. + + It must be noted that this is a required behavior for any unsupported + Routing Type and not limited to SRH packets. So, it is not specific + to SRH and the usual rate limiting for ICMP generation is required + anyway for any IPv6 implementation and has been implemented and + deployed for many years. + +5.2. Security fields in SRH + + This section summarizes the use of specific fields in the SRH. They + are based on a key-hashed message authentication code (HMAC). + + The security-related fields in the SRH are instantiated by the HMAC + TLV, containing: + + o HMAC Key-id, 32 bits wide; + + o HMAC, 256 bits wide (optional, exists only if HMAC Key-id is not + 0). + + The HMAC field is the output of the HMAC computation (per RFC 2104 + [RFC2104]) using a pre-shared key identified by HMAC Key-id and of + the text which consists of the concatenation of: + + o the source IPv6 address; + + o First Segment field; + + o an octet of bit flags; + + o HMAC Key-id; + + o all addresses in the Segment List. + + + + +Previdi, et al. Expires August 5, 2017 [Page 19] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + The purpose of the HMAC TLV is to verify the validity, the integrity + and the authorization of the SRH itself. If an outsider of the SR + domain does not have access to a current pre-shared secret, then it + cannot compute the right HMAC field and the first SR router on the + path processing the SRH and configured to check the validity of the + HMAC will simply reject the packet. + + The HMAC TLV is located at the end of the SRH simply because only the + router on the ingress of the SR domain needs to process it, then all + other SR nodes can ignore it (based on local policy) because they + trust the upstream router. This is to speed up forwarding operations + because SR routers which do not validate the SRH do not need to parse + the SRH until the end. + + The HMAC Key-id field allows for the simultaneous existence of + several hash algorithms (SHA-256, SHA3-256 ... or future ones) as + well as pre-shared keys. The HMAC Key-id field is opaque, i.e., it + has neither syntax nor semantic except as an index to the right + combination of pre-shared key and hash algorithm and except that a + value of 0 means that there is no HMAC field. Having an HMAC Key-id + field allows for pre-shared key roll-over when two pre-shared keys + are supported for a while when all SR nodes converged to a fresher + pre-shared key. It could also allow for interoperation among + different SR domains if allowed by local policy and assuming a + collision-free HMAC Key Id allocation. + + When a specific SRH is linked to a time-related service (such as + turbo-QoS for a 1-hour period) where the DA, Segment ID (SID) are + identical, then it is important to refresh the shared-secret + frequently as the HMAC validity period expires only when the HMAC + Key-id and its associated shared-secret expires. + +5.2.1. Selecting a hash algorithm + + The HMAC field in the HMAC TLV is 256 bit wide. Therefore, the HMAC + MUST be based on a hash function whose output is at least 256 bits. + If the output of the hash function is 256, then this output is simply + inserted in the HMAC field. If the output of the hash function is + larger than 256 bits, then the output value is truncated to 256 by + taking the least-significant 256 bits and inserting them in the HMAC + field. + + SRH implementations can support multiple hash functions but MUST + implement SHA-2 [FIPS180-4] in its SHA-256 variant. + + NOTE: SHA-1 is currently used by some early implementations used for + quick interoperations testing, the 160-bit hash value must then be + + + + +Previdi, et al. Expires August 5, 2017 [Page 20] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + right-hand padded with 96 bits set to 0. The authors understand that + this is not secure but is ok for limited tests. + +5.2.2. Performance impact of HMAC + + While adding an HMAC to each and every SR packet increases the + security, it has a performance impact. Nevertheless, it must be + noted that: + + o the HMAC field is used only when SRH is added by a device (such as + a home set-up box) which is outside of the segment routing domain. + If the SRH is added by a router in the trusted segment routing + domain, then, there is no need for an HMAC field, hence no + performance impact. + + o when present, the HMAC field MUST only be checked and validated by + the first router of the segment routing domain, this router is + named 'validating SR router'. Downstream routers may not inspect + the HMAC field. + + o this validating router can also have a cache of to improve the performance. It is not the + same use case as in IPsec where HMAC value was unique per packet, + in SRH, the HMAC value is unique per flow. + + o Last point, hash functions such as SHA-2 have been optimized for + security and performance and there are multiple implementations + with good performance. + + With the above points in mind, the performance impact of using HMAC + is minimized. + +5.2.3. Pre-shared key management + + The field HMAC Key-id allows for: + + o key roll-over: when there is a need to change the key (the hash + pre-shared secret), then multiple pre-shared keys can be used + simultaneously. The validating routing can have a table of for the currently active and future + keys. + + o different algorithms: by extending the previous table to , the validating router + can also support simultaneously several hash algorithms (see + section Section 5.2.1) + + The pre-shared secret distribution can be done: + + + +Previdi, et al. Expires August 5, 2017 [Page 21] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + o in the configuration of the validating routers, either by static + configuration or any SDN oriented approach; + + o dynamically using a trusted key distribution such as [RFC6407] + + The intent of this document is NOT to define yet-another-key- + distribution-protocol. + +5.3. Deployment Models + +5.3.1. Nodes within the SR domain + + An SR domain is defined as a set of interconnected routers where all + routers at the perimeter are configured to add and act on SRH. Some + routers inside the SR domain can also act on SRH or simply forward + IPv6 packets. + + The routers inside an SR domain can be trusted to generate SRH and to + process SRH received on interfaces that are part of the SR domain. + These nodes MUST drop all SRH packets received on an interface that + is not part of the SR domain and containing an SRH whose HMAC field + cannot be validated by local policies. This includes obviously + packet with an SRH generated by a non-cooperative SR domain. + + If the validation fails, then these packets MUST be dropped, ICMP + error messages (parameter problem) SHOULD be generated (but rate + limited) and SHOULD be logged. + +5.3.2. Nodes outside of the SR domain + + Nodes outside of the SR domain cannot be trusted for physical + security; hence, they need to request by some trusted means (outside + of the scope of this document) a complete SRH for each new connection + (i.e. new destination address). The received SRH MUST include an + HMAC TLV which is computed correctly (see Section 5.2). + + When an outside node sends a packet with an SRH and towards an SR + domain ingress node, the packet MUST contain the HMAC TLV (with a + Key-id and HMAC fields) and the the destination address MUST be an + address of an SR domain ingress node . + + The ingress SR router, i.e., the router with an interface address + equals to the destination address, MUST verify the HMAC TLV. + + If the validation is successful, then the packet is simply forwarded + as usual for an SR packet. As long as the packet travels within the + SR domain, no further HMAC check needs to be done. Subsequent + + + + +Previdi, et al. Expires August 5, 2017 [Page 22] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + routers in the SR domain MAY verify the HMAC TLV when they process + the SRH (i.e. when they are the destination). + + If the validation fails, then this packet MUST be dropped, an ICMP + error message (parameter problem) SHOULD be generated (but rate + limited) and SHOULD be logged. + +5.3.3. SR path exposure + + As the intermediate SR nodes addresses appears in the SRH, if this + SRH is visible to an outsider then he/she could reuse this knowledge + to launch an attack on the intermediate SR nodes or get some insider + knowledge on the topology. This is especially applicable when the + path between the source node and the first SR domain ingress router + is on the public Internet. + + The first remark is to state that 'security by obscurity' is never + enough; in other words, the security policy of the SR domain MUST + assume that the internal topology and addressing is known by the + attacker. A simple traceroute will also give the same information + (with even more information as all intermediate nodes between SID + will also be exposed). IPsec Encapsulating Security Payload + [RFC4303] cannot be use to protect the SRH as per RFC4303 the ESP + header must appear after any routing header (including SRH). + + To prevent a user to leverage the gained knowledge by intercepting + SRH, it it recommended to apply an infrastructure Access Control List + (iACL) at the edge of the SR domain. This iACL will drop all packets + from outside the SR-domain whose destination is any address of any + router inside the domain. This security policy should be tuned for + local operations. + +5.3.4. Impact of BCP-38 + + BCP-38 [RFC2827], also known as "Network Ingress Filtering", checks + whether the source address of packets received on an interface is + valid for this interface. The use of loose source routing such as + SRH forces packets to follow a path which differs from the expected + routing. Therefore, if BCP-38 was implemented in all routers inside + the SR domain, then SR packets could be received by an interface + which is not expected one and the packets could be dropped. + + As an SR domain is usually a subset of one administrative domain, and + as BCP-38 is only deployed at the ingress routers of this + administrative domain and as packets arriving at those ingress + routers have been normally forwarded using the normal routing + information, then there is no reason why this ingress router should + + + + +Previdi, et al. Expires August 5, 2017 [Page 23] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + drop the SRH packet based on BCP-38. Routers inside the domain + commonly do not apply BCP-38; so, this is not a problem. + +6. IANA Considerations + + This document makes the following registrations in the Internet + Protocol Version 6 (IPv6) Parameters "Routing Type" registry + maintained by IANA: + + Suggested Description Reference + Value + ---------------------------------------------------------- + 4 Segment Routing Header (SRH) This document + + In addition, this document request IANA to create and maintain a new + Registry: "Segment Routing Header Type-Value Objects". The following + code-points are requested from the registry: + + Registry: Segment Routing Header Type-Value Objects + + Suggested Description Reference + Value + ----------------------------------------------------- + 1 Ingress Node TLV This document + 2 Egress Node TLV This document + 3 Opaque Container TLV This document + 4 Padding TLV This document + 5 HMAC TLV This document + +7. Manageability Considerations + + TBD + +8. Contributors + + Dave Barach, John Leddy, John Brzozowski, Pierre Francois, Nagendra + Kumar, Mark Townsley, Christian Martin, Roberta Maglione, James + Connolly, Aloys Augustin contributed to the content of this document. + +9. Acknowledgements + + The authors would like to thank Ole Troan, Bob Hinden, Fred Baker, + Brian Carpenter, Alexandru Petrescu and Punit Kumar Jaiswal for their + comments to this document. + + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 24] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + +10. References + +10.1. Normative References + + [FIPS180-4] + National Institute of Standards and Technology, "FIPS + 180-4 Secure Hash Standard (SHS)", March 2012, + . + + [RFC2119] Bradner, S., "Key words for use in RFCs to Indicate + Requirement Levels", BCP 14, RFC 2119, + DOI 10.17487/RFC2119, March 1997, + . + + [RFC2460] Deering, S. and R. Hinden, "Internet Protocol, Version 6 + (IPv6) Specification", RFC 2460, DOI 10.17487/RFC2460, + December 1998, . + + [RFC4303] Kent, S., "IP Encapsulating Security Payload (ESP)", + RFC 4303, DOI 10.17487/RFC4303, December 2005, + . + + [RFC5095] Abley, J., Savola, P., and G. Neville-Neil, "Deprecation + of Type 0 Routing Headers in IPv6", RFC 5095, + DOI 10.17487/RFC5095, December 2007, + . + + [RFC6407] Weis, B., Rowles, S., and T. Hardjono, "The Group Domain + of Interpretation", RFC 6407, DOI 10.17487/RFC6407, + October 2011, . + +10.2. Informative References + + [I-D.ietf-isis-segment-routing-extensions] + Previdi, S., Filsfils, C., Bashandy, A., Gredler, H., + Litkowski, S., Decraene, B., and j. jefftant@gmail.com, + "IS-IS Extensions for Segment Routing", draft-ietf-isis- + segment-routing-extensions-09 (work in progress), October + 2016. + + [I-D.ietf-ospf-ospfv3-segment-routing-extensions] + Psenak, P., Previdi, S., Filsfils, C., Gredler, H., + Shakir, R., Henderickx, W., and J. Tantsura, "OSPFv3 + Extensions for Segment Routing", draft-ietf-ospf-ospfv3- + segment-routing-extensions-07 (work in progress), October + 2016. + + + + +Previdi, et al. Expires August 5, 2017 [Page 25] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + [I-D.ietf-spring-ipv6-use-cases] + Brzozowski, J., Leddy, J., Townsley, W., Filsfils, C., and + R. Maglione, "IPv6 SPRING Use Cases", draft-ietf-spring- + ipv6-use-cases-08 (work in progress), January 2017. + + [I-D.ietf-spring-resiliency-use-cases] + Filsfils, C., Previdi, S., Decraene, B., and R. Shakir, + "Resiliency use cases in SPRING networks", draft-ietf- + spring-resiliency-use-cases-08 (work in progress), October + 2016. + + [I-D.ietf-spring-segment-routing] + Filsfils, C., Previdi, S., Decraene, B., Litkowski, S., + and R. Shakir, "Segment Routing Architecture", draft-ietf- + spring-segment-routing-10 (work in progress), November + 2016. + + [I-D.ietf-spring-segment-routing-mpls] + Filsfils, C., Previdi, S., Bashandy, A., Decraene, B., + Litkowski, S., Horneffer, M., Shakir, R., + jefftant@gmail.com, j., and E. Crabbe, "Segment Routing + with MPLS data plane", draft-ietf-spring-segment-routing- + mpls-06 (work in progress), January 2017. + + [RFC1940] Estrin, D., Li, T., Rekhter, Y., Varadhan, K., and D. + Zappala, "Source Demand Routing: Packet Format and + Forwarding Specification (Version 1)", RFC 1940, + DOI 10.17487/RFC1940, May 1996, + . + + [RFC2104] Krawczyk, H., Bellare, M., and R. Canetti, "HMAC: Keyed- + Hashing for Message Authentication", RFC 2104, + DOI 10.17487/RFC2104, February 1997, + . + + [RFC2827] Ferguson, P. and D. Senie, "Network Ingress Filtering: + Defeating Denial of Service Attacks which employ IP Source + Address Spoofing", BCP 38, RFC 2827, DOI 10.17487/RFC2827, + May 2000, . + + [RFC4942] Davies, E., Krishnan, S., and P. Savola, "IPv6 Transition/ + Co-existence Security Considerations", RFC 4942, + DOI 10.17487/RFC4942, September 2007, + . + + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 26] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + [RFC6554] Hui, J., Vasseur, JP., Culler, D., and V. Manral, "An IPv6 + Routing Header for Source Routes with the Routing Protocol + for Low-Power and Lossy Networks (RPL)", RFC 6554, + DOI 10.17487/RFC6554, March 2012, + . + + [RFC7855] Previdi, S., Ed., Filsfils, C., Ed., Decraene, B., + Litkowski, S., Horneffer, M., and R. Shakir, "Source + Packet Routing in Networking (SPRING) Problem Statement + and Requirements", RFC 7855, DOI 10.17487/RFC7855, May + 2016, . + +Authors' Addresses + + Stefano Previdi (editor) + Cisco Systems, Inc. + Via Del Serafico, 200 + Rome 00142 + Italy + + Email: sprevidi@cisco.com + + + Clarence Filsfils + Cisco Systems, Inc. + Brussels + BE + + Email: cfilsfil@cisco.com + + + Brian Field + Comcast + 4100 East Dry Creek Road + Centennial, CO 80122 + US + + Email: Brian_Field@cable.comcast.com + + + Ida Leung + Rogers Communications + 8200 Dixie Road + Brampton, ON L6T 0C1 + CA + + Email: Ida.Leung@rci.rogers.com + + + + +Previdi, et al. Expires August 5, 2017 [Page 27] + +Internet-Draft IPv6 Segment Routing Header (SRH) February 2017 + + + Jen Linkova + Google + 1600 Amphitheatre Parkway + Mountain View, CA 94043 + US + + Email: furry@google.com + + + Ebben Aries + Facebook + US + + Email: exa@fb.com + + + Tomoya Kosugi + NTT + 3-9-11, Midori-Cho Musashino-Shi, + Tokyo 180-8585 + JP + + Email: kosugi.tomoya@lab.ntt.co.jp + + + Eric Vyncke + Cisco Systems, Inc. + De Kleetlaann 6A + Diegem 1831 + Belgium + + Email: evyncke@cisco.com + + + David Lebrun + Universite Catholique de Louvain + Place Ste Barbe, 2 + Louvain-la-Neuve, 1348 + Belgium + + Email: david.lebrun@uclouvain.be + + + + + + + + + + +Previdi, et al. Expires August 5, 2017 [Page 28] \ No newline at end of file diff --git a/src/vnet/srv6/sr.api b/src/vnet/srv6/sr.api new file mode 100644 index 00000000..9e900741 --- /dev/null +++ b/src/vnet/srv6/sr.api @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2015-2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** \brief IPv6 SR LocalSID add/del request + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_del Boolean of whether its a delete instruction + @param localsid_addr IPv6 address of the localsid + @param end_psp Boolean of whether decapsulation is allowed in this function + @param behavior Type of behavior (function) for this localsid + @param sw_if_index Only for L2/L3 xconnect. OIF. In VRF variant the fib_table. + @param vlan_index Only for L2 xconnect. Outgoing VLAN tag. + @param fib_table FIB table in which we should install the localsid entry + @param nh_addr Next Hop IPv4/IPv6 address. Only for L2/L3 xconnect. +*/ +autoreply define sr_localsid_add_del +{ + u32 client_index; + u32 context; + u8 is_del; + u8 localsid_addr[16]; + u8 end_psp; + u8 behavior; + u32 sw_if_index; + u32 vlan_index; + u32 fib_table; + u8 nh_addr[16]; +}; + +/** \brief IPv6 SR policy add + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy + @param weight is the weight of the sid list. optional. + @param is_encap is the behavior of the SR policy. (0.SRH insert // 1.Encapsulation) + @param type is the type of the SR policy. (0.Default // 1.Spray) + @param fib_table is the VRF where to install the FIB entry for the BSID + @param segments is a vector of IPv6 address composing the segment list +*/ +autoreply define sr_policy_add +{ + u32 client_index; + u32 context; + u8 bsid_addr[16]; + u32 weight; + u8 is_encap; + u8 type; + u32 fib_table; + u8 n_segments; + u8 segments[0]; +}; + +/** \brief IPv6 SR policy modification + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy + @param sr_policy_index is the index of the SR policy + @param fib_table is the VRF where to install the FIB entry for the BSID + @param operation is the operation to perform (among the top ones) + @param segments is a vector of IPv6 address composing the segment list + @param sl_index is the index of the Segment List to modify/delete + @param weight is the weight of the sid list. optional. + @param is_encap Mode. Encapsulation or SRH insertion. +*/ +autoreply define sr_policy_mod +{ + u32 client_index; + u32 context; + u8 bsid_addr[16]; + u32 sr_policy_index; + u32 fib_table; + u8 operation; + u32 sl_index; + u32 weight; + u8 n_segments; + u8 segments[0]; +}; + +/** \brief IPv6 SR policy deletion + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param bsid is the bindingSID of the SR Policy + @param index is the index of the SR policy +*/ +autoreply define sr_policy_del +{ + u32 client_index; + u32 context; + u8 bsid_addr[16]; + u32 sr_policy_index; +}; + +/** \brief IPv6 SR steering add/del + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param is_del + @param bsid is the bindingSID of the SR Policy (alt to sr_policy_index) + @param sr_policy is the index of the SR Policy (alt to bsid) + @param table_id is the VRF where to install the FIB entry for the BSID + @param prefix is the IPv4/v6 address for L3 traffic type + @param mask_width is the mask for L3 traffic type + @param sw_if_index is the incoming interface for L2 traffic + @param traffic_type describes the type of traffic +*/ +autoreply define sr_steering_add_del +{ + u32 client_index; + u32 context; + u8 is_del; + u8 bsid_addr[16]; + u32 sr_policy_index; + u32 table_id; + u8 prefix_addr[16]; + u32 mask_width; + u32 sw_if_index; + u8 traffic_type; +}; + +/** \brief Dump the list of SR LocalSIDs + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +/**define sr_localsids_dump +{ + u32 client_index; + u32 context; +};*/ + +/** \brief Details about a single SR LocalSID + @param context - returned sender context, to match reply w/ request + @param localsid_addr IPv6 address of the localsid + @param behavior Type of behavior (function) for this localsid + @param end_psp Boolean of whether decapsulation is allowed in this function + @param sw_if_index Only for L2/L3 xconnect. OIF. In VRF variant the fib_table. + @param vlan_index Only for L2 xconnect. Outgoing VLAN tag. + @param fib_table FIB table in which we should install the localsid entry + @param nh_addr Next Hop IPv4/IPv6 address. Only for L2/L3 xconnect. +*/ +/**manual_endian define sr_localsid_details +{ + u32 context; + u8 localsid_addr[16]; + u8 behavior; + u8 end_psp; + u32 sw_if_index; + u32 vlan_index; + u32 fib_table; + u8 nh_addr[16]; +};*/ + +/* + * fd.io coding-style-patch-verification: ON + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/srv6/sr.c b/src/vnet/srv6/sr.c new file mode 100755 index 00000000..eb4f09e7 --- /dev/null +++ b/src/vnet/srv6/sr.c @@ -0,0 +1,57 @@ +/* + * sr.c: ipv6 segment routing + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Segment Routing initialization + * + */ + +#include +#include +#include +#include +#include +#include + +ip6_sr_main_t sr_main; + +/** + * @brief no-op lock function. + * The lifetime of the SR entry is managed by the control plane + */ +void +sr_dpo_lock (dpo_id_t * dpo) +{ +} + +/** + * @brief no-op unlock function. + * The lifetime of the SR entry is managed by the control plane + */ +void +sr_dpo_unlock (dpo_id_t * dpo) +{ +} + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/srv6/sr.h b/src/vnet/srv6/sr.h new file mode 100755 index 00000000..2014a23e --- /dev/null +++ b/src/vnet/srv6/sr.h @@ -0,0 +1,325 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Segment Routing data structures definitions + * + */ + +#ifndef included_vnet_srv6_h +#define included_vnet_srv6_h + +#include +#include +#include +#include + +#include +#include + +#define IPv6_DEFAULT_HEADER_LENGTH 40 +#define IPv6_DEFAULT_HOP_LIMIT 64 +#define IPv6_DEFAULT_MAX_MASK_WIDTH 128 + +#define SR_BEHAVIOR_END 1 +#define SR_BEHAVIOR_X 2 +#define SR_BEHAVIOR_D_FIRST 3 /* Unused. Separator in between regular and D */ +#define SR_BEHAVIOR_DX2 4 +#define SR_BEHAVIOR_DX6 5 +#define SR_BEHAVIOR_DX4 6 +#define SR_BEHAVIOR_DT6 7 +#define SR_BEHAVIOR_DT4 8 +#define SR_BEHAVIOR_LAST 9 /* Must always be the last one */ + +#define SR_STEER_L2 2 +#define SR_STEER_IPV4 4 +#define SR_STEER_IPV6 6 + +#define SR_FUNCTION_SIZE 4 +#define SR_ARGUMENT_SIZE 4 + +#define SR_SEGMENT_LIST_WEIGHT_DEFAULT 1 + +/** + * @brief SR Segment List (SID list) + */ +typedef struct +{ + ip6_address_t *segments; /**< SIDs (key) */ + + u32 weight; /**< SID list weight (wECMP / UCMP) */ + + u8 *rewrite; /**< Precomputed rewrite header */ + u8 *rewrite_bsid; /**< Precomputed rewrite header for bindingSID */ + + dpo_id_t bsid_dpo; /**< DPO for Encaps/Insert for BSID */ + dpo_id_t ip6_dpo; /**< DPO for Encaps/Insert IPv6 */ + dpo_id_t ip4_dpo; /**< DPO for Encaps IPv6 */ +} ip6_sr_sl_t; + +/* SR policy types */ +#define SR_POLICY_TYPE_DEFAULT 0 +#define SR_POLICY_TYPE_SPRAY 1 +/** + * @brief SR Policy + */ +typedef struct +{ + u32 *segments_lists; /**< SID lists indexes (vector) */ + + ip6_address_t bsid; /**< BindingSID (key) */ + + u8 type; /**< Type (default is 0) */ + /* SR Policy specific DPO */ + /* IF Type = DEFAULT Then Load Balancer DPO among SID lists */ + /* IF Type = SPRAY then Spray DPO with all SID lists */ + dpo_id_t bsid_dpo; /**< SR Policy specific DPO - BSID */ + dpo_id_t ip4_dpo; /**< SR Policy specific DPO - IPv6 */ + dpo_id_t ip6_dpo; /**< SR Policy specific DPO - IPv4 */ + + u32 fib_table; /**< FIB table */ + + u8 is_encap; /**< Mode (0 is SRH insert, 1 Encaps) */ +} ip6_sr_policy_t; + +/** + * @brief SR LocalSID + */ +typedef struct +{ + ip6_address_t localsid; /**< LocalSID IPv6 address */ + + char end_psp; /**< Combined with End.PSP? */ + + u16 behavior; /**< Behavior associated to this localsid */ + + union + { + u32 sw_if_index; /**< xconnect only */ + u32 vrf_index; /**< vrf only */ + }; + + u32 fib_table; /**< FIB table where localsid is registered */ + + u32 vlan_index; /**< VLAN tag (not an index) */ + + ip46_address_t next_hop; /**< Next_hop for xconnect usage only */ + + u32 nh_adj; /**< Next_adj for xconnect usage only */ + + void *plugin_mem; /**< Memory to be used by the plugin callback functions */ +} ip6_sr_localsid_t; + +typedef int (sr_plugin_callback_t) (ip6_sr_localsid_t * localsid); + +/** + * @brief SR LocalSID behavior registration + */ +typedef struct +{ + u16 sr_localsid_function_number; /**< SR LocalSID plugin function (>SR_BEHAVIOR_LAST) */ + + u8 *function_name; /**< Function name. (key). */ + + u8 *keyword_str; /**< Behavior keyword (i.e. End.X) */ + + u8 *def_str; /**< Behavior definition (i.e. Endpoint with cross-connect) */ + + u8 *params_str; /**< Behavior parameters (i.e. ) */ + + dpo_type_t dpo; /**< DPO type registration */ + + format_function_t *ls_format; /**< LocalSID format function */ + + unformat_function_t *ls_unformat; /**< LocalSID unformat function */ + + sr_plugin_callback_t *creation; /**< Function within plugin that will be called after localsid creation*/ + + sr_plugin_callback_t *removal; /**< Function within plugin that will be called before localsid removal */ +} sr_localsid_fn_registration_t; + +/** + * @brief Steering db key + * + * L3 is IPv4/IPv6 + mask + * L2 is sf_if_index + vlan + */ +typedef struct +{ + union + { + struct + { + ip46_address_t prefix; /**< IP address of the prefix */ + u32 mask_width; /**< Mask width of the prefix */ + u32 fib_table; /**< VRF of the prefix */ + } l3; + struct + { + u32 sw_if_index; /**< Incoming software interface */ + } l2; + }; + u8 traffic_type; /**< Traffic type (IPv4, IPv6, L2) */ + u8 padding[3]; +} sr_steering_key_t; + +typedef struct +{ + sr_steering_key_t classify; /**< Traffic classification */ + u32 sr_policy; /**< SR Policy index */ +} ip6_sr_steering_policy_t; + +/** + * @brief Segment Routing main datastructure + */ +typedef struct +{ + /* L2-input -> SR rewrite next index */ + u32 l2_sr_policy_rewrite_index; + + /* SR SID lists */ + ip6_sr_sl_t *sid_lists; + + /* SRv6 policies */ + ip6_sr_policy_t *sr_policies; + + /* Hash table mapping BindingSID to SRv6 policy */ + mhash_t sr_policies_index_hash; + + /* Pool of SR localsid instances */ + ip6_sr_localsid_t *localsids; + + /* Hash table mapping LOC:FUNC to SR LocalSID instance */ + mhash_t sr_localsids_index_hash; + + /* Pool of SR steer policies instances */ + ip6_sr_steering_policy_t *steer_policies; + + /* Hash table mapping steering rules to SR steer instance */ + mhash_t sr_steer_policies_hash; + + /* L2 steering ifaces - sr_policies */ + u32 *sw_iface_sr_policies; + + /* Spray DPO */ + dpo_type_t sr_pr_spray_dpo_type; + + /* Plugin functions */ + sr_localsid_fn_registration_t *plugin_functions; + + /* Find plugin function by name */ + uword *plugin_functions_by_key; + + /* Counters */ + vlib_combined_counter_main_t sr_ls_valid_counters; + vlib_combined_counter_main_t sr_ls_invalid_counters; + + /* SR Policies FIBs */ + u32 fib_table_ip6; + u32 fib_table_ip4; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; +} ip6_sr_main_t; + +extern ip6_sr_main_t sr_main; + +extern vlib_node_registration_t sr_policy_rewrite_encaps_node; +extern vlib_node_registration_t sr_policy_rewrite_insert_node; +extern vlib_node_registration_t sr_localsid_node; +extern vlib_node_registration_t sr_localsid_d_node; + +extern void sr_dpo_lock (dpo_id_t * dpo); +extern void sr_dpo_unlock (dpo_id_t * dpo); + +extern int +sr_localsid_register_function (vlib_main_t * vm, u8 * fn_name, + u8 * keyword_str, u8 * def_str, + u8 * params_str, dpo_type_t * dpo, + format_function_t * ls_format, + unformat_function_t * ls_unformat, + sr_plugin_callback_t * creation_fn, + sr_plugin_callback_t * removal_fn); + +extern int +sr_policy_add (ip6_address_t * bsid, ip6_address_t * segments, + u32 weight, u8 behavior, u32 fib_table, u8 is_encap); +extern int +sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table, + u8 operation, ip6_address_t * segments, u32 sl_index, + u32 weight); +extern int sr_policy_del (ip6_address_t * bsid, u32 index); + +extern int +sr_cli_localsid (char is_del, ip6_address_t * localsid_addr, + char end_psp, u8 behavior, u32 sw_if_index, + u32 vlan_index, u32 fib_table, ip46_address_t * nh_addr, + void *ls_plugin_mem); + +extern int +sr_steering_policy (int is_del, ip6_address_t * bsid, u32 sr_policy_index, + u32 table_id, ip46_address_t * prefix, u32 mask_width, + u32 sw_if_index, u8 traffic_type); + +/** + * @brief SR rewrite string computation for SRH insertion (inline) + * + * @param sl is a vector of IPv6 addresses composing the Segment List + * + * @return precomputed rewrite string for SRH insertion + */ +static inline u8 * +ip6_sr_compute_rewrite_string_insert (ip6_address_t * sl) +{ + ip6_sr_header_t *srh; + ip6_address_t *addrp, *this_address; + u32 header_length = 0; + u8 *rs = NULL; + + header_length = 0; + header_length += sizeof (ip6_sr_header_t); + header_length += (vec_len (sl) + 1) * sizeof (ip6_address_t); + + vec_validate (rs, header_length - 1); + + srh = (ip6_sr_header_t *) rs; + srh->type = ROUTING_HEADER_TYPE_SR; + srh->segments_left = vec_len (sl); + srh->first_segment = vec_len (sl); + srh->length = ((sizeof (ip6_sr_header_t) + + ((vec_len (sl) + 1) * sizeof (ip6_address_t))) / 8) - 1; + srh->flags = 0x00; + srh->reserved = 0x0000; + addrp = srh->segments + vec_len (sl); + vec_foreach (this_address, sl) + { + clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); + addrp--; + } + return rs; +} + + +#endif /* included_vnet_sr_h */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/srv6/sr_api.c b/src/vnet/srv6/sr_api.c new file mode 100644 index 00000000..925b50a1 --- /dev/null +++ b/src/vnet/srv6/sr_api.c @@ -0,0 +1,244 @@ +/* + *------------------------------------------------------------------ + * sr_api.c - ipv6 segment routing api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include +#include + +#include +#include +#include + +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(SR_LOCALSID_ADD_DEL, sr_localsid_add_del) \ +_(SR_POLICY_DEL, sr_policy_del) \ +_(SR_STEERING_ADD_DEL, sr_steering_add_del) +//_(SR_LOCALSIDS, sr_localsids_dump) +//_(SR_LOCALSID_BEHAVIORS, sr_localsid_behaviors_dump) + +static void vl_api_sr_localsid_add_del_t_handler + (vl_api_sr_localsid_add_del_t * mp) +{ + vl_api_sr_localsid_add_del_reply_t *rmp; + int rv = 0; +/* + * int sr_cli_localsid (char is_del, ip6_address_t *localsid_addr, + * char end_psp, u8 behavior, u32 sw_if_index, u32 vlan_index, u32 fib_table, + * ip46_address_t *nh_addr, void *ls_plugin_mem) + */ + rv = sr_cli_localsid (mp->is_del, + (ip6_address_t *) & mp->localsid_addr, + mp->end_psp, + mp->behavior, + ntohl (mp->sw_if_index), + ntohl (mp->vlan_index), + ntohl (mp->fib_table), + (ip46_address_t *) & mp->nh_addr, NULL); + + REPLY_MACRO (VL_API_SR_LOCALSID_ADD_DEL_REPLY); +} + +static void +vl_api_sr_policy_add_t_handler (vl_api_sr_policy_add_t * mp) +{ + vl_api_sr_policy_add_reply_t *rmp; + ip6_address_t *segments = 0, *seg; + ip6_address_t *this_address = (ip6_address_t *) mp->segments; + + int i; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + +/* + * sr_policy_add (ip6_address_t *bsid, ip6_address_t *segments, + * u32 weight, u8 behavior, u32 fib_table, u8 is_encap) + */ + int rv = 0; + rv = sr_policy_add ((ip6_address_t *) & mp->bsid_addr, + segments, + ntohl (mp->weight), + mp->type, ntohl (mp->fib_table), mp->is_encap); + + REPLY_MACRO (VL_API_SR_POLICY_ADD_REPLY); +} + +static void +vl_api_sr_policy_mod_t_handler (vl_api_sr_policy_mod_t * mp) +{ + vl_api_sr_policy_mod_reply_t *rmp; + + ip6_address_t *segments = 0, *seg; + ip6_address_t *this_address = (ip6_address_t *) mp->segments; + + int i; + for (i = 0; i < mp->n_segments; i++) + { + vec_add2 (segments, seg, 1); + clib_memcpy (seg->as_u8, this_address->as_u8, sizeof (*this_address)); + this_address++; + } + + int rv = 0; +/* + * int + * sr_policy_mod(ip6_address_t *bsid, u32 index, u32 fib_table, + * u8 operation, ip6_address_t *segments, u32 sl_index, + * u32 weight, u8 is_encap) + */ + rv = sr_policy_mod ((ip6_address_t *) & mp->bsid_addr, + ntohl (mp->sr_policy_index), + ntohl (mp->fib_table), + mp->operation, + segments, ntohl (mp->sl_index), ntohl (mp->weight)); + + REPLY_MACRO (VL_API_SR_POLICY_MOD_REPLY); +} + +static void +vl_api_sr_policy_del_t_handler (vl_api_sr_policy_del_t * mp) +{ + vl_api_sr_policy_del_reply_t *rmp; + int rv = 0; +/* + * int + * sr_policy_del (ip6_address_t *bsid, u32 index) + */ + rv = sr_policy_del ((ip6_address_t *) & mp->bsid_addr, + ntohl (mp->sr_policy_index)); + + REPLY_MACRO (VL_API_SR_POLICY_DEL_REPLY); +} + +static void vl_api_sr_steering_add_del_t_handler + (vl_api_sr_steering_add_del_t * mp) +{ + vl_api_sr_steering_add_del_reply_t *rmp; + int rv = 0; +/* + * int + * sr_steering_policy(int is_del, ip6_address_t *bsid, u32 sr_policy_index, + * u32 table_id, ip46_address_t *prefix, u32 mask_width, u32 sw_if_index, + * u8 traffic_type) + */ + rv = sr_steering_policy (mp->is_del, + (ip6_address_t *) & mp->bsid_addr, + ntohl (mp->sr_policy_index), + ntohl (mp->table_id), + (ip46_address_t *) & mp->prefix_addr, + ntohl (mp->mask_width), + ntohl (mp->sw_if_index), mp->traffic_type); + + REPLY_MACRO (VL_API_SR_STEERING_ADD_DEL_REPLY); +} + +/* + * sr_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_sr; +#undef _ +} + +static clib_error_t * +sr_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Manually register the sr policy add msg, so we trace + * enough bytes to capture a typical segment list + */ + vl_msg_api_set_handlers (VL_API_SR_POLICY_ADD, + "sr_policy_add", + vl_api_sr_policy_add_t_handler, + vl_noop_handler, + vl_api_sr_policy_add_t_endian, + vl_api_sr_policy_add_t_print, 256, 1); + + /* + * Manually register the sr policy mod msg, so we trace + * enough bytes to capture a typical segment list + */ + vl_msg_api_set_handlers (VL_API_SR_POLICY_MOD, + "sr_policy_mod", + vl_api_sr_policy_mod_t_handler, + vl_noop_handler, + vl_api_sr_policy_mod_t_endian, + vl_api_sr_policy_mod_t_print, 256, 1); + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (sr_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/srv6/sr_doc.md b/src/vnet/srv6/sr_doc.md new file mode 100644 index 00000000..5cdfc906 --- /dev/null +++ b/src/vnet/srv6/sr_doc.md @@ -0,0 +1,55 @@ +# SRv6: Segment Routing for IPv6 {#srv6_doc} + +This is a memo intended to contain documentation of the VPP SRv6 implementation. +Everything that is not directly obvious should come here. +For any feedback on content that should be explained please mailto:pcamaril@cisco.com + +## Segment Routing + +Segment routing is a network technology focused on addressing the limitations of existing IP and Multiprotocol Label Switching (MPLS) networks in terms of simplicity, scale, and ease of operation. It is a foundation for application engineered routing as it prepares the networks for new business models where applications can control the network behavior. + +Segment routing seeks the right balance between distributed intelligence and centralized optimization and programming. It was built for the software-defined networking (SDN) era. + +Segment routing enhances packet forwarding behavior by enabling a network to transport unicast packets through a specific forwarding path, different from the normal path that a packet usually takes (IGP shortest path or BGP best path). This capability benefits many use cases, and one can build those specific paths based on application requirements. + +Segment routing uses the source routing paradigm. A node, usually a router but also a switch, a trusted server, or a virtual forwarder running on a hypervisor, steers a packet through an ordered list of instructions, called segments. A segment can represent any instruction, topological or service-based. A segment can have a local semantic to a segment-routing node or global within a segment-routing network. Segment routing allows an operator to enforce a flow through any topological path and service chain while maintaining per-flow state only at the ingress node to the segment-routing network. Segment routing also supports equal-cost multipath (ECMP) by design. + +Segment routing can operate with either an MPLS or an IPv6 data plane. All the currently available MPLS services, such as Layer 3 VPN (L3VPN), L2VPN (Virtual Private Wire Service [VPWS], Virtual Private LAN Services [VPLS], Ethernet VPN [E-VPN], and Provider Backbone Bridging Ethernet VPN [PBB-EVPN]), can run on top of a segment-routing transport network. + +**The implementation of Segment Routing in VPP covers both the IPv6 data plane (SRv6) as well as the MPLS data plane (SR-MPLS). This page contains the SRv6 documentation.** + +## Segment Routing terminology + +* Segment Routing Header (SRH): IPv6 routing extension header of type 'Segment Routing'. (draft-ietf-6man-segment-routing-header-05) +* SegmentID (SID): is an IPv6 address. +* Segment List (SL) (SID List): is the sequence of SIDs that the packet will traverse. +* SR Policy: defines the SRH that will be applied to a packet. A packet steered into an SR policy may either receive the SRH by IPv6 header encapsulation (as recommended in draft-ietf-6man-rfc2460bis) or it could be inserted within an existing IPv6 header. An SR policy is uniquely identified by its Binding SID and associated with a weighted set of Segment Lists. In case several SID lists are defined, traffic steered into the policy is unevenly load-balanced among them according to their respective weights. +* Local SID: is a SID associated with a processing function on the local node, which may go from advancing to the next SID in the SRH, to complex user-defined behaviors. When a FIB lookup, either in the main FIB or in a specific VRF, returns a match on a local SID, the associated function is performed. +* BindingSID: a BindingSID is a SID (only one) associated one-one with an SR Policy. If a packet arrives with an IPv6 DA corresponding to a BindingSID, then the SR policy will be applied to such packet. + +## SRv6 Features in VPP + +The SRv6 Network Programming (*draft-filsfils-spring-srv6-network-programming*) defines the SRv6 architecture. + +VPP supports the following SRv6 LocalSID functions: End, End.X, End.DX6, End.DT6, End.DX4, End.DT4, End.DX2, End.B6, End.B6.Encaps. + +For further information and how to configure each specific function: @subpage srv6_localsid_doc + + +The Segment Routing Policy (*draft-filsfils-spring-segment-routing-policy*) defines SR Policies. + +VPP supports SRv6 Policies with T.Insert and T.Encaps behaviors. + +For further information on how to create SR Policies: @subpage srv6_policy_doc + +For further information on how to steer traffic into SR Policies: @subpage srv6_steering_doc + +## SRv6 LocalSID development framework + +One of the *'key'* concepts about SRv6 is network programmability. This is why an SRv6 LocalSID is associated with an specific function. + +However, the trully way to enable network programmability is allowing any developer **easily** create his own SRv6 LocalSID function. That is the reason why we have added some API calls such that any developer can code his own SRv6 LocalSID behaviors as plugins an add them to the running SRv6 code. + +The principle is that the developer only codes the behavior -the graph node-. However all the FIB handling, SR LocalSID instantiation and so on are done by the VPP SRv6 code. + +For more information please refer to: @subpage srv6_plugin_doc diff --git a/src/vnet/srv6/sr_localsid.c b/src/vnet/srv6/sr_localsid.c new file mode 100755 index 00000000..bdc66386 --- /dev/null +++ b/src/vnet/srv6/sr_localsid.c @@ -0,0 +1,1492 @@ +/* + * sr_localsid.c: ipv6 segment routing Endpoint behaviors + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Processing of packets with a SRH + * + * CLI to define new Segment Routing End processing functions. + * Graph node to support such functions. + * + * Each function associates an SRv6 segment (IPv6 address) with an specific + * Segment Routing function. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * @brief Dynamically added SR localsid DPO type + */ +static dpo_type_t sr_localsid_dpo_type; +static dpo_type_t sr_localsid_d_dpo_type; + +/** + * @brief SR localsid add/del + * + * Function to add or delete SR LocalSIDs. + * + * @param is_del Boolean of whether its a delete instruction + * @param localsid_addr IPv6 address of the localsid + * @param is_decap Boolean of whether decapsulation is allowed in this function + * @param behavior Type of behavior (function) for this localsid + * @param sw_if_index Only for L2/L3 xconnect. OIF. In VRF variant the fib_table. + * @param vlan_index Only for L2 xconnect. Outgoing VLAN tag. + * @param fib_table FIB table in which we should install the localsid entry + * @param nh_addr Next Hop IPv4/IPv6 address. Only for L2/L3 xconnect. + * + * @return 0 on success, error otherwise. + */ +int +sr_cli_localsid (char is_del, ip6_address_t * localsid_addr, + char end_psp, u8 behavior, u32 sw_if_index, u32 vlan_index, + u32 fib_table, ip46_address_t * nh_addr, void *ls_plugin_mem) +{ + ip6_sr_main_t *sm = &sr_main; + uword *p; + int rv; + + ip6_sr_localsid_t *ls = 0; + + dpo_id_t dpo = DPO_INVALID; + + /* Search for the item */ + p = mhash_get (&sm->sr_localsids_index_hash, localsid_addr); + + if (p) + { + if (is_del) + { + /* Retrieve localsid */ + ls = pool_elt_at_index (sm->localsids, p[0]); + /* Delete FIB entry */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + .fp_addr = { + .ip6 = *localsid_addr, + } + }; + + fib_table_entry_delete (fib_table_find (FIB_PROTOCOL_IP6, + fib_table), + &pfx, FIB_SOURCE_SR); + + /* In case it is a Xconnect iface remove the (OIF, NHOP) adj */ + if (ls->behavior == SR_BEHAVIOR_X || ls->behavior == SR_BEHAVIOR_DX6 + || ls->behavior == SR_BEHAVIOR_DX4) + adj_unlock (ls->nh_adj); + + if (ls->behavior >= SR_BEHAVIOR_LAST) + { + sr_localsid_fn_registration_t *plugin = 0; + plugin = pool_elt_at_index (sm->plugin_functions, + ls->behavior - SR_BEHAVIOR_LAST); + + /* Callback plugin removal function */ + rv = plugin->removal (ls); + } + + /* Delete localsid registry */ + pool_put (sm->localsids, ls); + mhash_unset (&sm->sr_localsids_index_hash, localsid_addr, NULL); + return 1; + } + else /* create with function already existing; complain */ + return -1; + } + else + /* delete; localsid does not exist; complain */ + if (is_del) + return -2; + + /* Check whether there exists a FIB entry with such address */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + }; + + pfx.fp_addr.as_u64[0] = localsid_addr->as_u64[0]; + pfx.fp_addr.as_u64[1] = localsid_addr->as_u64[1]; + + /* Lookup the FIB index associated to the table id provided */ + u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6, fib_table); + if (fib_index == ~0) + return -3; + + /* Lookup the localsid in such FIB table */ + fib_node_index_t fei = fib_table_lookup_exact_match (fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + return -4; //There is an entry for such address (the localsid addr) + + /* Create a new localsid registry */ + pool_get (sm->localsids, ls); + memset (ls, 0, sizeof (*ls)); + + clib_memcpy (&ls->localsid, localsid_addr, sizeof (ip6_address_t)); + ls->end_psp = end_psp; + ls->behavior = behavior; + ls->nh_adj = (u32) ~ 0; + ls->fib_table = fib_table; + switch (behavior) + { + case SR_BEHAVIOR_END: + break; + case SR_BEHAVIOR_X: + ls->sw_if_index = sw_if_index; + clib_memcpy (&ls->next_hop.ip6, &nh_addr->ip6, sizeof (ip6_address_t)); + break; + case SR_BEHAVIOR_DX4: + ls->sw_if_index = sw_if_index; + clib_memcpy (&ls->next_hop.ip4, &nh_addr->ip4, sizeof (ip4_address_t)); + break; + case SR_BEHAVIOR_DX6: + ls->sw_if_index = sw_if_index; + clib_memcpy (&ls->next_hop.ip6, &nh_addr->ip6, sizeof (ip6_address_t)); + break; + case SR_BEHAVIOR_DT6: + ls->vrf_index = sw_if_index; + break; + case SR_BEHAVIOR_DX2: + ls->sw_if_index = sw_if_index; + ls->vlan_index = vlan_index; + break; + } + + /* Figure out the adjacency magic for Xconnect variants */ + if (ls->behavior == SR_BEHAVIOR_X || ls->behavior == SR_BEHAVIOR_DX4 + || ls->behavior == SR_BEHAVIOR_DX6) + { + adj_index_t nh_adj_index = ADJ_INDEX_INVALID; + + /* Retrieve the adjacency corresponding to the (OIF, next_hop) */ + if (ls->behavior == SR_BEHAVIOR_DX6 || ls->behavior == SR_BEHAVIOR_X) + nh_adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP6, VNET_LINK_IP6, + nh_addr, sw_if_index); + + else if (ls->behavior == SR_BEHAVIOR_DX4) + nh_adj_index = adj_nbr_add_or_lock (FIB_PROTOCOL_IP4, VNET_LINK_IP4, + nh_addr, sw_if_index); + + /* Check for ADJ creation error. If so panic */ + if (nh_adj_index == ADJ_INDEX_INVALID) + { + pool_put (sm->localsids, ls); + return -5; + } + + ls->nh_adj = nh_adj_index; + } + + /* Set DPO */ + if (ls->behavior == SR_BEHAVIOR_END || ls->behavior == SR_BEHAVIOR_X) + dpo_set (&dpo, sr_localsid_dpo_type, DPO_PROTO_IP6, ls - sm->localsids); + else if (ls->behavior > SR_BEHAVIOR_D_FIRST + && ls->behavior < SR_BEHAVIOR_LAST) + dpo_set (&dpo, sr_localsid_d_dpo_type, DPO_PROTO_IP6, ls - sm->localsids); + else if (ls->behavior >= SR_BEHAVIOR_LAST) + { + sr_localsid_fn_registration_t *plugin = 0; + plugin = pool_elt_at_index (sm->plugin_functions, + ls->behavior - SR_BEHAVIOR_LAST); + /* Copy the unformat memory result */ + ls->plugin_mem = ls_plugin_mem; + /* Callback plugin creation function */ + rv = plugin->creation (ls); + if (rv) + { + pool_put (sm->localsids, ls); + return -6; + } + dpo_set (&dpo, plugin->dpo, DPO_PROTO_IP6, ls - sm->localsids); + } + + /* Set hash key for searching localsid by address */ + mhash_set (&sm->sr_localsids_index_hash, localsid_addr, ls - sm->localsids, + NULL); + + fib_table_entry_special_dpo_add (fib_index, &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, &dpo); + dpo_reset (&dpo); + + /* Set counter to zero */ + vlib_validate_combined_counter (&(sm->sr_ls_valid_counters), + ls - sm->localsids); + vlib_validate_combined_counter (&(sm->sr_ls_invalid_counters), + ls - sm->localsids); + + vlib_zero_combined_counter (&(sm->sr_ls_valid_counters), + ls - sm->localsids); + vlib_zero_combined_counter (&(sm->sr_ls_invalid_counters), + ls - sm->localsids); + + return 0; +} + +/** + * @brief SR LocalSID CLI function. + * + * @see sr_cli_localsid + */ +static clib_error_t * +sr_cli_localsid_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vnet_main_t *vnm = vnet_get_main (); + ip6_sr_main_t *sm = &sr_main; + u32 sw_if_index = (u32) ~ 0, vlan_index = (u32) ~ 0, fib_index = 0; + int is_del = 0; + int end_psp = 0; + ip6_address_t resulting_address; + ip46_address_t next_hop; + char address_set = 0; + char behavior = 0; + void *ls_plugin_mem = 0; + + int rv; + + memset (&resulting_address, 0, sizeof (ip6_address_t)); + ip46_address_reset (&next_hop); + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "del")) + is_del = 1; + else if (!address_set + && unformat (input, "address %U", unformat_ip6_address, + &resulting_address)) + address_set = 1; + else if (!address_set + && unformat (input, "addr %U", unformat_ip6_address, + &resulting_address)) + address_set = 1; + else if (unformat (input, "fib-table %u", &fib_index)); + else if (vlan_index == (u32) ~ 0 + && unformat (input, "vlan %u", &vlan_index)); + else if (!behavior && unformat (input, "behavior")) + { + if (unformat (input, "end.x %U %U", + unformat_vnet_sw_interface, vnm, &sw_if_index, + unformat_ip6_address, &next_hop.ip6)) + behavior = SR_BEHAVIOR_X; + else if (unformat (input, "end.dx6 %U %U", + unformat_vnet_sw_interface, vnm, &sw_if_index, + unformat_ip6_address, &next_hop.ip6)) + behavior = SR_BEHAVIOR_DX6; + else if (unformat (input, "end.dx4 %U %U", + unformat_vnet_sw_interface, vnm, &sw_if_index, + unformat_ip4_address, &next_hop.ip4)) + behavior = SR_BEHAVIOR_DX4; + else if (unformat (input, "end.dx2 %U", + unformat_vnet_sw_interface, vnm, &sw_if_index)) + behavior = SR_BEHAVIOR_DX2; + else if (unformat (input, "end.dt6 %u", &sw_if_index)) + behavior = SR_BEHAVIOR_DT6; + else if (unformat (input, "end.dt4 %u", &sw_if_index)) + behavior = SR_BEHAVIOR_DT4; + else + { + /* Loop over all the plugin behavior format functions */ + sr_localsid_fn_registration_t *plugin = 0, **vec_plugins = 0; + sr_localsid_fn_registration_t **plugin_it = 0; + + /* Create a vector out of the plugin pool as recommended */ + /* *INDENT-OFF* */ + pool_foreach (plugin, sm->plugin_functions, + { + vec_add1 (vec_plugins, plugin); + }); + /* *INDENT-ON* */ + + vec_foreach (plugin_it, vec_plugins) + { + if (unformat + (input, "%U", (*plugin_it)->ls_unformat, &ls_plugin_mem)) + { + behavior = (*plugin_it)->sr_localsid_function_number; + break; + } + } + } + + if (!behavior) + { + if (unformat (input, "end")) + behavior = SR_BEHAVIOR_END; + else + break; + } + } + else if (!end_psp && unformat (input, "psp")) + end_psp = 1; + else + break; + } + + if (!behavior && end_psp) + behavior = SR_BEHAVIOR_END; + + if (!address_set) + return clib_error_return (0, + "Error: SRv6 LocalSID address is mandatory."); + if (!is_del && !behavior) + return clib_error_return (0, + "Error: SRv6 LocalSID behavior is mandatory."); + if (vlan_index != (u32) ~ 0) + return clib_error_return (0, + "Error: SRv6 End.DX2 with rewrite VLAN tag not supported by now."); + if (end_psp && !(behavior == SR_BEHAVIOR_END || behavior == SR_BEHAVIOR_X)) + return clib_error_return (0, + "Error: SRv6 PSP only compatible with End and End.X"); + + rv = sr_cli_localsid (is_del, &resulting_address, end_psp, behavior, + sw_if_index, vlan_index, fib_index, &next_hop, + ls_plugin_mem); + + switch (rv) + { + case 0: + break; + case 1: + return 0; + case -1: + return clib_error_return (0, + "Identical localsid already exists. Requested localsid not created."); + case -2: + return clib_error_return (0, + "The requested localsid could not be deleted. SR localsid not found"); + case -3: + return clib_error_return (0, "FIB table %u does not exist", fib_index); + case -4: + return clib_error_return (0, "There is already one FIB entry for the" + "requested localsid non segment routing related"); + case -5: + return clib_error_return (0, + "Could not create ARP/ND entry for such next_hop. Internal error."); + case -6: + return clib_error_return (0, + "Error on the plugin based localsid creation."); + default: + return clib_error_return (0, "BUG: sr localsid returns %d", rv); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (sr_localsid_command, static) = { + .path = "sr localsid", + .short_help = "sr localsid (del) address XX:XX::YY:YY" + "(fib-table 8) behavior STRING", + .long_help = + "Create SR LocalSID and binds it to a particular behavior\n" + "Arguments:\n" + "\tlocalSID IPv6_addr(128b) LocalSID IPv6 address\n" + "\t(fib-table X) Optional. VRF where to install SRv6 localsid\n" + "\tbehavior STRING Specifies the behavior\n" + "\n\tBehaviors:\n" + "\tEnd\t-> Endpoint.\n" + "\tEnd.X\t-> Endpoint with decapsulation and Layer-3 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DX2\t-> Endpoint with decapsulation and Layer-2 cross-connect.\n" + "\t\tParameters: ''\n" + "\tEnd.DX6\t-> Endpoint with decapsulation and IPv6 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DX4\t-> Endpoint with decapsulation and IPv4 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DT6\t-> Endpoint with decapsulation and specific IPv6 table lookup.\n" + "\t\tParameters: ''\n" + "\tEnd.DT4\t-> Endpoint with decapsulation and specific IPv4 table lookup.\n" + "\t\tParameters: ''\n", + .function = sr_cli_localsid_command_fn, +}; +/* *INDENT-ON* */ + +/** + * @brief CLI function to 'show' all SR LocalSIDs on console. + */ +static clib_error_t * +show_sr_localsid_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vnet_main_t *vnm = vnet_get_main (); + ip6_sr_main_t *sm = &sr_main; + ip6_sr_localsid_t **localsid_list = 0; + ip6_sr_localsid_t *ls; + int i; + + vlib_cli_output (vm, "SRv6 - My LocalSID Table:"); + vlib_cli_output (vm, "========================="); + /* *INDENT-OFF* */ + pool_foreach (ls, sm->localsids, ({ vec_add1 (localsid_list, ls); })); + /* *INDENT-ON* */ + for (i = 0; i < vec_len (localsid_list); i++) + { + ls = localsid_list[i]; + switch (ls->behavior) + { + case SR_BEHAVIOR_END: + vlib_cli_output (vm, "\tAddress: \t%U\n\tBehavior: \tEnd", + format_ip6_address, &ls->localsid); + break; + case SR_BEHAVIOR_X: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tX (Endpoint with Layer-3 cross-connect)" + "\n\tIface: \t%U\n\tNext hop: \t%U", + format_ip6_address, &ls->localsid, + format_vnet_sw_if_index_name, vnm, ls->sw_if_index, + format_ip6_address, &ls->next_hop.ip6); + break; + case SR_BEHAVIOR_DX4: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tDX4 (Endpoint with decapsulation and IPv4 cross-connect)" + "\n\tIface: \t%U\n\tNext hop: \t%U", + format_ip6_address, &ls->localsid, + format_vnet_sw_if_index_name, vnm, ls->sw_if_index, + format_ip4_address, &ls->next_hop.ip4); + break; + case SR_BEHAVIOR_DX6: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tDX6 (Endpoint with decapsulation and IPv6 cross-connect)" + "\n\tIface: \t%U\n\tNext hop: \t%U", + format_ip6_address, &ls->localsid, + format_vnet_sw_if_index_name, vnm, ls->sw_if_index, + format_ip6_address, &ls->next_hop.ip6); + break; + case SR_BEHAVIOR_DX2: + if (ls->vlan_index == (u32) ~ 0) + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tDX2 (Endpoint with decapulation and Layer-2 cross-connect)" + "\n\tIface: \t%U", format_ip6_address, + &ls->localsid, format_vnet_sw_if_index_name, vnm, + ls->sw_if_index); + else + vlib_cli_output (vm, + "Unsupported yet. (DX2 with egress VLAN rewrite)"); + break; + case SR_BEHAVIOR_DT6: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tDT6 (Endpoint with decapsulation and specific IPv6 table lookup)" + "\n\tTable: %u", format_ip6_address, &ls->localsid, + ls->fib_table); + break; + case SR_BEHAVIOR_DT4: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tDT4 (Endpoint with decapsulation and specific IPv4 table lookup)" + "\n\tTable: \t%u", format_ip6_address, + &ls->localsid, ls->fib_table); + break; + default: + if (ls->behavior >= SR_BEHAVIOR_LAST) + { + sr_localsid_fn_registration_t *plugin = + pool_elt_at_index (sm->plugin_functions, + ls->behavior - SR_BEHAVIOR_LAST); + + vlib_cli_output (vm, "\tAddress: \t%U\n" + "\tBehavior: \t%s (%s)\n\t%U", + format_ip6_address, &ls->localsid, + plugin->keyword_str, plugin->def_str, + plugin->ls_format, ls->plugin_mem); + } + else + //Should never get here... + vlib_cli_output (vm, "Internal error"); + break; + } + if (ls->end_psp) + vlib_cli_output (vm, "\tPSP: \tTrue\n"); + + /* Print counters */ + vlib_counter_t valid, invalid; + vlib_get_combined_counter (&(sm->sr_ls_valid_counters), i, &valid); + vlib_get_combined_counter (&(sm->sr_ls_invalid_counters), i, &invalid); + vlib_cli_output (vm, "\tGood traffic: \t[%Ld packets : %Ld bytes]\n", + valid.packets, valid.bytes); + vlib_cli_output (vm, "\tBad traffic: \t[%Ld packets : %Ld bytes]\n", + invalid.packets, invalid.bytes); + vlib_cli_output (vm, "--------------------"); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_sr_localsid_command, static) = { + .path = "show sr localsids", + .short_help = "show sr localsids", + .function = show_sr_localsid_command_fn, +}; +/* *INDENT-ON* */ + +/** + * @brief Function to 'clear' ALL SR localsid counters + */ +static clib_error_t * +clear_sr_localsid_counters_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + ip6_sr_main_t *sm = &sr_main; + + vlib_clear_combined_counters (&(sm->sr_ls_valid_counters)); + vlib_clear_combined_counters (&(sm->sr_ls_invalid_counters)); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (clear_sr_localsid_counters_command, static) = { + .path = "clear sr localsid counters", + .short_help = "clear sr localsid counters", + .function = clear_sr_localsid_counters_command_fn, +}; +/* *INDENT-ON* */ + +/************************ SR LocalSID graphs node ****************************/ +/** + * @brief SR localsid node trace + */ +typedef struct +{ + u32 localsid_index; + ip6_address_t src, out_dst; + u8 sr[256]; + u8 num_segments; + u8 segments_left; + //With SRv6 header update include flags here. +} sr_localsid_trace_t; + +#define foreach_sr_localsid_error \ +_(NO_INNER_HEADER, "(SR-Error) No inner IP header") \ +_(NO_MORE_SEGMENTS, "(SR-Error) No more segments") \ +_(NO_SRH, "(SR-Error) No SR header") \ +_(NO_PSP, "(SR-Error) PSP Not available (segments left > 0)") \ +_(NOT_LS, "(SR-Error) Decaps not available (segments left > 0)") \ +_(L2, "(SR-Error) SRv6 decapsulated a L2 frame without dest") + +typedef enum +{ +#define _(sym,str) SR_LOCALSID_ERROR_##sym, + foreach_sr_localsid_error +#undef _ + SR_LOCALSID_N_ERROR, +} sr_localsid_error_t; + +static char *sr_localsid_error_strings[] = { +#define _(sym,string) string, + foreach_sr_localsid_error +#undef _ +}; + +#define foreach_sr_localsid_next \ +_(ERROR, "error-drop") \ +_(IP6_LOOKUP, "ip6-lookup") \ +_(IP4_LOOKUP, "ip4-lookup") \ +_(IP6_REWRITE, "ip6-rewrite") \ +_(IP4_REWRITE, "ip4-rewrite") \ +_(INTERFACE_OUTPUT, "interface-output") + +typedef enum +{ +#define _(s,n) SR_LOCALSID_NEXT_##s, + foreach_sr_localsid_next +#undef _ + SR_LOCALSID_N_NEXT, +} sr_localsid_next_t; + +/** + * @brief SR LocalSID graph node trace function + * + * @see sr_localsid + */ +u8 * +format_sr_localsid_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + ip6_sr_main_t *sm = &sr_main; + sr_localsid_trace_t *t = va_arg (*args, sr_localsid_trace_t *); + + ip6_sr_localsid_t *ls = + pool_elt_at_index (sm->localsids, t->localsid_index); + + s = + format (s, "SR-LOCALSID:\n\tLocalsid: %U\n", format_ip6_address, + &ls->localsid); + switch (ls->behavior) + { + case SR_BEHAVIOR_END: + s = format (s, "\tBehavior: End\n"); + break; + case SR_BEHAVIOR_DX6: + s = format (s, "\tBehavior: Decapsulation with IPv6 L3 xconnect\n"); + break; + case SR_BEHAVIOR_DX4: + s = format (s, "\tBehavior: Decapsulation with IPv4 L3 xconnect\n"); + break; + case SR_BEHAVIOR_X: + s = format (s, "\tBehavior: IPv6 L3 xconnect\n"); + break; + case SR_BEHAVIOR_DT6: + s = format (s, "\tBehavior: Decapsulation with IPv6 Table lookup\n"); + break; + case SR_BEHAVIOR_DT4: + s = format (s, "\tBehavior: Decapsulation with IPv4 Table lookup\n"); + break; + case SR_BEHAVIOR_DX2: + s = format (s, "\tBehavior: Decapsulation with L2 xconnect\n"); + break; + default: + s = format (s, "\tBehavior: defined in plugin\n"); //TODO + break; + } + if (t->num_segments != 0xFF) + { + if (t->num_segments > 0) + { + s = format (s, "\tSegments left: %d\n", t->num_segments); + s = format (s, "\tSID list: [in ietf order]"); + int i = 0; + for (i = 0; i < t->num_segments; i++) + { + s = format (s, "\n\t-> %U", format_ip6_address, + (ip6_address_t *) & t->sr[i * + sizeof (ip6_address_t)]); + } + } + } + return s; +} + +/** + * @brief Function doing End processing. + */ +static_always_inline void +end_srh_processing (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, + ip6_sr_header_t * sr0, + ip6_sr_localsid_t * ls0, u32 * next0) +{ + ip6_address_t *new_dst0; + + if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) + { + if (PREDICT_TRUE (sr0->segments_left != 0)) + { + sr0->segments_left -= 1; + new_dst0 = (ip6_address_t *) (sr0->segments); + new_dst0 += sr0->segments_left; + ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; + ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; + + if (ls0->behavior == SR_BEHAVIOR_X) + { + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + } + } + else + { + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_MORE_SEGMENTS]; + } + } + else + { + /* Error. Routing header of type != SR */ + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_SRH]; + } +} + +/* + * @brief Function doing SRH processing for D* variants + */ +//FixME. I must crosscheck that next_proto matches the localsid +static_always_inline void +end_decaps_srh_processing (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, + ip6_sr_header_t * sr0, + ip6_sr_localsid_t * ls0, u32 * next0) +{ + /* Compute the size of the IPv6 header with all Ext. headers */ + u8 next_proto; + ip6_ext_header_t *next_ext_header; + u16 total_size = 0; + + next_proto = ip0->protocol; + next_ext_header = (void *) (ip0 + 1); + total_size = sizeof (ip6_header_t); + while (ip6_ext_hdr (next_proto)) + { + total_size += ip6_ext_header_len (next_ext_header); + next_proto = next_ext_header->next_hdr; + next_ext_header = ip6_ext_next_header (next_ext_header); + } + + /* Ensure this is the last segment. Otherwise drop. */ + if (sr0 && sr0->segments_left != 0) + { + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NOT_LS]; + return; + } + + switch (next_proto) + { + case IP_PROTOCOL_IPV6: + /* Encap-End IPv6. Pop outer IPv6 header. */ + if (ls0->behavior == SR_BEHAVIOR_DX6) + { + vlib_buffer_advance (b0, total_size); + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + return; + } + else if (ls0->behavior == SR_BEHAVIOR_DT6) + { + vlib_buffer_advance (b0, total_size); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->fib_table; + return; + } + break; + case IP_PROTOCOL_IP_IN_IP: + /* Encap-End IPv4. Pop outer IPv6 header */ + if (ls0->behavior == SR_BEHAVIOR_DX4) + { + vlib_buffer_advance (b0, total_size); + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP4_REWRITE; + return; + } + else if (ls0->behavior == SR_BEHAVIOR_DT4) + { + vlib_buffer_advance (b0, total_size); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->fib_table; + *next0 = SR_LOCALSID_NEXT_IP4_LOOKUP; + return; + } + break; + case IP_PROTOCOL_IP6_NONXT: + /* L2 encaps */ + if (ls0->behavior == SR_BEHAVIOR_DX2) + { + vlib_buffer_advance (b0, total_size); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->sw_if_index; + *next0 = SR_LOCALSID_NEXT_INTERFACE_OUTPUT; + return; + } + break; + } + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_INNER_HEADER]; + return; +} + +/** + * @brief Function doing End processing with PSP + */ +static_always_inline void +end_psp_srh_processing (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, + ip6_ext_header_t * prev0, + ip6_sr_header_t * sr0, + ip6_sr_localsid_t * ls0, u32 * next0) +{ + u32 new_l0, sr_len; + u64 *copy_dst0, *copy_src0; + u32 copy_len_u64s0 = 0; + int i; + + if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) + { + if (PREDICT_TRUE (sr0->segments_left == 1)) + { + ip0->dst_address.as_u64[0] = sr0->segments->as_u64[0]; + ip0->dst_address.as_u64[1] = sr0->segments->as_u64[1]; + + /* Remove the SRH taking care of the rest of IPv6 ext header */ + if (prev0) + prev0->next_hdr = sr0->protocol; + else + ip0->protocol = sr0->protocol; + + sr_len = ip6_ext_header_len (sr0); + vlib_buffer_advance (b0, sr_len); + new_l0 = clib_net_to_host_u16 (ip0->payload_length) - sr_len; + ip0->payload_length = clib_host_to_net_u16 (new_l0); + copy_src0 = (u64 *) ip0; + copy_dst0 = copy_src0 + (sr0->length + 1); + /* number of 8 octet units to copy + * By default in absence of extension headers it is equal to length of ip6 header + * With extension headers it number of 8 octet units of ext headers preceding + * SR header + */ + copy_len_u64s0 = + (((u8 *) sr0 - (u8 *) ip0) - sizeof (ip6_header_t)) >> 3; + copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0]; + copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0]; + copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0]; + copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0]; + copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0]; + + for (i = copy_len_u64s0 - 1; i >= 0; i--) + { + copy_dst0[i] = copy_src0[i]; + } + + if (ls0->behavior == SR_BEHAVIOR_X) + { + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + } + return; + } + } + /* Error. Routing header of type != SR */ + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_PSP]; +} + +/** + * @brief SR LocalSID graph node. Supports all default SR Endpoint variants + */ +static uword +sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, *from, *to_next; + ip6_sr_main_t *sm = &sr_main; + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + next_index = node->cached_next_index; + u32 thread_index = vlib_get_thread_index (); + + while (n_left_from > 0) + { + u32 n_left_to_next; + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_ext_header_t *prev0, *prev1, *prev2, *prev3; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_LOCALSID_NEXT_IP6_LOOKUP; + ip6_sr_localsid_t *ls0, *ls1, *ls2, *ls3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+4 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + ls0 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls1 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls2 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls3 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + ip6_ext_header_find_t (ip0, prev0, sr0, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip1, prev1, sr1, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip2, prev2, sr2, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip3, prev3, sr3, IP_PROTOCOL_IPV6_ROUTE); + + end_decaps_srh_processing (node, b0, ip0, sr0, ls0, &next0); + end_decaps_srh_processing (node, b1, ip1, sr1, ls1, &next1); + end_decaps_srh_processing (node, b2, ip2, sr2, ls2, &next2); + end_decaps_srh_processing (node, b3, ip3, sr3, ls3, &next3); + + //TODO: trace. + + vlib_increment_combined_counter + (((next0 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); + + vlib_increment_combined_counter + (((next1 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); + + vlib_increment_combined_counter + (((next2 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); + + vlib_increment_combined_counter + (((next3 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); + + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0; + ip6_ext_header_t *prev0; + ip6_sr_header_t *sr0; + u32 next0 = SR_LOCALSID_NEXT_IP6_LOOKUP; + ip6_sr_localsid_t *ls0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + + /* Lookup the SR End behavior based on IP DA (adj) */ + ls0 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + /* Find SRH as well as previous header */ + ip6_ext_header_find_t (ip0, prev0, sr0, IP_PROTOCOL_IPV6_ROUTE); + + /* SRH processing and End variants */ + end_decaps_srh_processing (node, b0, ip0, sr0, ls0, &next0); + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->num_segments = 0; + tr->localsid_index = ls0 - sm->localsids; + + if (ip0 == vlib_buffer_get_current (b0)) + { + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->out_dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->out_dst.as_u8)); + if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr0->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr0->segments, sr0->length * 8); + tr->num_segments = + sr0->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr0->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + /* Increase the counters */ + vlib_increment_combined_counter + (((next0 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_localsid_d_node) = { + .function = sr_localsid_d_fn, + .name = "sr-localsid-d", + .vector_size = sizeof (u32), + .format_trace = format_sr_localsid_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_LOCALSID_N_ERROR, + .error_strings = sr_localsid_error_strings, + .n_next_nodes = SR_LOCALSID_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_LOCALSID_NEXT_##s] = n, + foreach_sr_localsid_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/** + * @brief SR LocalSID graph node. Supports all default SR Endpoint variants + */ +static uword +sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + u32 n_left_from, next_index, *from, *to_next; + ip6_sr_main_t *sm = &sr_main; + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + next_index = node->cached_next_index; + u32 thread_index = vlib_get_thread_index (); + + while (n_left_from > 0) + { + u32 n_left_to_next; + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + ip6_ext_header_t *prev0, *prev1, *prev2, *prev3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_LOCALSID_NEXT_IP6_LOOKUP; + ip6_sr_localsid_t *ls0, *ls1, *ls2, *ls3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + ip6_ext_header_find_t (ip0, prev0, sr0, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip1, prev1, sr1, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip2, prev2, sr2, IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip3, prev3, sr3, IP_PROTOCOL_IPV6_ROUTE); + + ls0 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls1 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls2 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ls3 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + if (ls0->end_psp) + end_psp_srh_processing (node, b0, ip0, prev0, sr0, ls0, &next0); + else + end_srh_processing (node, b0, ip0, sr0, ls0, &next0); + + if (ls1->end_psp) + end_psp_srh_processing (node, b1, ip1, prev1, sr1, ls1, &next1); + else + end_srh_processing (node, b1, ip1, sr1, ls1, &next1); + + if (ls2->end_psp) + end_psp_srh_processing (node, b2, ip2, prev2, sr2, ls2, &next2); + else + end_srh_processing (node, b2, ip2, sr2, ls2, &next2); + + if (ls3->end_psp) + end_psp_srh_processing (node, b3, ip3, prev3, sr3, ls3, &next3); + else + end_srh_processing (node, b3, ip3, sr3, ls3, &next3); + + //TODO: proper trace. + + vlib_increment_combined_counter + (((next0 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); + + vlib_increment_combined_counter + (((next1 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); + + vlib_increment_combined_counter + (((next2 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); + + vlib_increment_combined_counter + (((next3 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); + + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0; + ip6_ext_header_t *prev0; + ip6_sr_header_t *sr0; + u32 next0 = SR_LOCALSID_NEXT_IP6_LOOKUP; + ip6_sr_localsid_t *ls0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ip0 = vlib_buffer_get_current (b0); + ip6_ext_header_find_t (ip0, prev0, sr0, IP_PROTOCOL_IPV6_ROUTE); + + /* Lookup the SR End behavior based on IP DA (adj) */ + ls0 = + pool_elt_at_index (sm->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + /* SRH processing */ + if (ls0->end_psp) + end_psp_srh_processing (node, b0, ip0, prev0, sr0, ls0, &next0); + else + end_srh_processing (node, b0, ip0, sr0, ls0, &next0); + + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->num_segments = 0; + tr->localsid_index = ls0 - sm->localsids; + + if (ip0 == vlib_buffer_get_current (b0)) + { + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->out_dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->out_dst.as_u8)); + if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr0->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr0->segments, sr0->length * 8); + tr->num_segments = + sr0->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr0->segments_left; + } + } + else + { + tr->num_segments = 0xFF; + } + } + + vlib_increment_combined_counter + (((next0 == + SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_localsid_node) = { + .function = sr_localsid_fn, + .name = "sr-localsid", + .vector_size = sizeof (u32), + .format_trace = format_sr_localsid_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_LOCALSID_N_ERROR, + .error_strings = sr_localsid_error_strings, + .n_next_nodes = SR_LOCALSID_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_LOCALSID_NEXT_##s] = n, + foreach_sr_localsid_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +static u8 * +format_sr_dpo (u8 * s, va_list * args) +{ + index_t index = va_arg (*args, index_t); + CLIB_UNUSED (u32 indent) = va_arg (*args, u32); + + return (format (s, "SR: localsid_index:[%d]", index)); +} + +const static dpo_vft_t sr_loc_vft = { + .dv_lock = sr_dpo_lock, + .dv_unlock = sr_dpo_unlock, + .dv_format = format_sr_dpo, +}; + +const static char *const sr_loc_ip6_nodes[] = { + "sr-localsid", + NULL, +}; + +const static char *const *const sr_loc_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_loc_ip6_nodes, +}; + +const static char *const sr_loc_d_ip6_nodes[] = { + "sr-localsid-d", + NULL, +}; + +const static char *const *const sr_loc_d_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_loc_d_ip6_nodes, +}; + + +/*************************** SR LocalSID plugins ******************************/ +/** + * @brief SR LocalSID plugin registry + */ +int +sr_localsid_register_function (vlib_main_t * vm, u8 * fn_name, + u8 * keyword_str, u8 * def_str, + u8 * params_str, dpo_type_t * dpo, + format_function_t * ls_format, + unformat_function_t * ls_unformat, + sr_plugin_callback_t * creation_fn, + sr_plugin_callback_t * removal_fn) +{ + ip6_sr_main_t *sm = &sr_main; + uword *p; + + sr_localsid_fn_registration_t *plugin; + + /* Did this function exist? If so update it */ + p = hash_get_mem (sm->plugin_functions_by_key, fn_name); + if (p) + { + plugin = pool_elt_at_index (sm->plugin_functions, p[0]); + } + /* Else create a new one and set hash key */ + else + { + pool_get (sm->plugin_functions, plugin); + hash_set_mem (sm->plugin_functions_by_key, fn_name, + plugin - sm->plugin_functions); + } + + memset (plugin, 0, sizeof (*plugin)); + + plugin->sr_localsid_function_number = (plugin - sm->plugin_functions); + plugin->sr_localsid_function_number += SR_BEHAVIOR_LAST; + plugin->ls_format = ls_format; + plugin->ls_unformat = ls_unformat; + plugin->creation = creation_fn; + plugin->removal = removal_fn; + clib_memcpy (&plugin->dpo, dpo, sizeof (dpo_type_t)); + plugin->function_name = format (0, "%s%c", fn_name, 0); + plugin->keyword_str = format (0, "%s%c", keyword_str, 0); + plugin->def_str = format (0, "%s%c", def_str, 0); + plugin->params_str = format (0, "%s%c", params_str, 0); + + return plugin->sr_localsid_function_number; +} + +/** + * @brief CLI function to 'show' all available SR LocalSID behaviors + */ +static clib_error_t * +show_sr_localsid_behaviors_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + ip6_sr_main_t *sm = &sr_main; + sr_localsid_fn_registration_t *plugin; + sr_localsid_fn_registration_t **plugins_vec = 0; + int i; + + vlib_cli_output (vm, + "SR LocalSIDs behaviors:\n-----------------------\n\n"); + + /* *INDENT-OFF* */ + pool_foreach (plugin, sm->plugin_functions, + ({ vec_add1 (plugins_vec, plugin); })); + /* *INDENT-ON* */ + + /* Print static behaviors */ + vlib_cli_output (vm, "Default behaviors:\n" + "\tEnd\t-> Endpoint.\n" + "\tEnd.X\t-> Endpoint with decapsulation and Layer-3 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DX2\t-> Endpoint with decapsulation and Layer-2 cross-connect.\n" + "\t\tParameters: ''\n" + "\tEnd.DX6\t-> Endpoint with decapsulation and IPv6 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DX4\t-> Endpoint with decapsulation and IPv4 cross-connect.\n" + "\t\tParameters: ' '\n" + "\tEnd.DT6\t-> Endpoint with decapsulation and specific IPv6 table lookup.\n" + "\t\tParameters: ''\n" + "\tEnd.DT4\t-> Endpoint with decapsulation and specific IPv4 table lookup.\n" + "\t\tParameters: ''\n"); + vlib_cli_output (vm, "Plugin behaviors:\n"); + for (i = 0; i < vec_len (plugins_vec); i++) + { + plugin = plugins_vec[i]; + vlib_cli_output (vm, "\t%s\t-> %s.\n", plugin->keyword_str, + plugin->def_str); + vlib_cli_output (vm, "\t\tParameters: '%s'\n", plugin->params_str); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_sr_localsid_behaviors_command, static) = { + .path = "show sr localsids behaviors", + .short_help = "show sr localsids behaviors", + .function = show_sr_localsid_behaviors_command_fn, +}; +/* *INDENT-ON* */ + +/** + * @brief SR LocalSID initialization + */ +clib_error_t * +sr_localsids_init (vlib_main_t * vm) +{ + /* Init memory for function keys */ + ip6_sr_main_t *sm = &sr_main; + mhash_init (&sm->sr_localsids_index_hash, sizeof (uword), + sizeof (ip6_address_t)); + /* Init SR behaviors DPO type */ + sr_localsid_dpo_type = dpo_register_new_type (&sr_loc_vft, sr_loc_nodes); + /* Init SR behaviors DPO type */ + sr_localsid_d_dpo_type = + dpo_register_new_type (&sr_loc_vft, sr_loc_d_nodes); + /* Init memory for localsid plugins */ + sm->plugin_functions_by_key = hash_create_string (0, sizeof (uword)); + return 0; +} + +VLIB_INIT_FUNCTION (sr_localsids_init); +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/srv6/sr_localsid.md b/src/vnet/srv6/sr_localsid.md new file mode 100644 index 00000000..340af4a3 --- /dev/null +++ b/src/vnet/srv6/sr_localsid.md @@ -0,0 +1,58 @@ +# SR LocalSIDs {#srv6_localsid_doc} + +A local SID is associated to a Segment Routing behavior -or function- on the current node. + +The most basic behavior is called END. It simply activates the next SID in the current packet, by decrementing the Segments Left value and updating the IPv6 DA. + +A local END SID is instantiated using the following CLI: + + sr localsid (del) address XX::YY behavior end + +This creates a new entry in the main FIB for IPv6 address XX::YY. All packets whose IPv6 DA matches this FIB entry are redirected to the sr-localsid node, where they are processed as described above. + +Other examples of local SIDs are the following: + + sr localsid (del) address XX::YY behavior end + sr localsid (del) address XX::YY behavior end.x GE0/1/0 2001::a + sr localsid (del) address XX::YY behavior end.dx6 GE0/1/0 2001::a + sr localsid (del) address XX::YY behavior end.dx4 GE0/1/0 10.0.0.1 + sr localsid (del) address XX::YY behavior end.dx2 GigabitE0/11/0 + sr localsid (del) address XX::YY behavior end.dt6 5 + sr localsid (del) address XX::YY behavior end.dt6 5 + +Note that all of these behaviors match the definitions of the SRv6 architecture (*draft-filsfils-spring-srv6-network-programming*). Please refer to this document for a detailed description of each behavior. + +Note also that you can configure the PSP flavor of the End and End.X behaviors by typing: + + sr localsid (del) address XX::YY behavior end psp + sr localsid (del) address XX::YY behavior end.x GE0/1/0 2001::a psp + +Help on the available local SID behaviors and their usage can be obtained with: + + help sr localsid + +Alternatively they can be obtained using. + + show sr localsids behavior + +The difference in between those two commands is that the first one will only display the SR LocalSID behaviors that are built-in VPP, while the latter will display those behaviors plus the ones added with the SR LocalSID Development Framework. + + +VPP keeps a 'My LocalSID Table' where it stores all the SR local SIDs instantiated as well as their parameters. Every time a new local SID is instantiated, a new entry is added to this table. In addition, counters for correctly and incorrectly processed traffic are maintained for each local SID. The counters store both the number of packets and bytes. + +The contents of the 'My LocalSID Table' is shown with: + + vpp# show sr localsid + SRv6 - My LocalSID Table: + ========================= + Address: c3::1 + Behavior: DX6 (Endpoint with decapsulation and IPv6 cross-connect) + Iface: GigabitEthernet0/5/0 + Next hop: b:c3::b + Good traffic: [51277 packets : 5332808 bytes] + Bad traffic: [0 packets : 0 bytes] + -------------------- + +The traffic counters can be reset with: + + vpp# clear sr localsid counters diff --git a/src/vnet/srv6/sr_packet.h b/src/vnet/srv6/sr_packet.h new file mode 100755 index 00000000..7af4ad4d --- /dev/null +++ b/src/vnet/srv6/sr_packet.h @@ -0,0 +1,159 @@ +#ifndef included_vnet_sr_packet_h +#define included_vnet_sr_packet_h + +#include + +/* + * ipv6 segment-routing header format + * + * Copyright (c) 2013 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * The Segment Routing Header (SRH) is defined as follows: + * + * 0 1 2 3 + * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Next Header | Hdr Ext Len | Routing Type | Segments Left | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | First Segment | Flags | RESERVED | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * | Segment List[0] (128 bits IPv6 address) | + * | | + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * | | + * ... + * | | + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | | + * | Segment List[n] (128 bits IPv6 address) | + * | | + * | | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * // // + * // Optional Type Length Value objects (variable) // + * // // + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * where: + * + * o Next Header: 8-bit selector. Identifies the type of header + * immediately following the SRH. + * + * o Hdr Ext Len: 8-bit unsigned integer, is the length of the SRH + * header in 8-octet units, not including the first 8 octets. + * + * o Routing Type: TBD, to be assigned by IANA (suggested value: 4). + * + * o Segments Left. Defined in [RFC2460], it contains the index, in + * the Segment List, of the next segment to inspect. Segments Left + * is decremented at each segment. + * + * o First Segment: contains the index, in the Segment List, of the + * first segment of the path which is in fact the last element of the + * Segment List. + * + * o Flags: 8 bits of flags. Following flags are defined: + * + * 0 1 2 3 4 5 6 7 + * +-+-+-+-+-+-+-+-+ + * |U|P|O|A|H| U | + * +-+-+-+-+-+-+-+-+ + * + * U: Unused and for future use. SHOULD be unset on transmission + * and MUST be ignored on receipt. + * + * P-flag: Protected flag. Set when the packet has been rerouted + * through FRR mechanism by an SR endpoint node. + * + * O-flag: OAM flag. When set, it indicates that this packet is + * an operations and management (OAM) packet. + * + * A-flag: Alert flag. If present, it means important Type Length + * Value (TLV) objects are present. See Section 3.1 for details + * on TLVs objects. + * + * H-flag: HMAC flag. If set, the HMAC TLV is present and is + * encoded as the last TLV of the SRH. In other words, the last + * 36 octets of the SRH represent the HMAC information. See + * Section 3.1.5 for details on the HMAC TLV. + * + * o RESERVED: SHOULD be unset on transmission and MUST be ignored on + * receipt. + * + * o Segment List[n]: 128 bit IPv6 addresses representing the nth + * segment in the Segment List. The Segment List is encoded starting + * from the last segment of the path. I.e., the first element of the + * segment list (Segment List [0]) contains the last segment of the + * path while the last segment of the Segment List (Segment List[n]) + * contains the first segment of the path. The index contained in + * "Segments Left" identifies the current active segment. + * + * o Type Length Value (TLV) are described in Section 3.1. + * + */ + +#ifndef IPPROTO_IPV6_ROUTE +#define IPPROTO_IPV6_ROUTE 43 +#endif + +#define ROUTING_HEADER_TYPE_SR 4 + +typedef struct +{ + /* Protocol for next header. */ + u8 protocol; + /* + * Length of routing header in 8 octet units, + * not including the first 8 octets + */ + u8 length; + + /* Type of routing header; type 4 = segement routing */ + u8 type; + + /* Next segment in the segment list */ + u8 segments_left; + + /* Pointer to the first segment in the header */ + u8 first_segment; + + /* Flag bits */ +#define IP6_SR_HEADER_FLAG_PROTECTED (0x40) +#define IP6_SR_HEADER_FLAG_OAM (0x20) +#define IP6_SR_HEADER_FLAG_ALERT (0x10) +#define IP6_SR_HEADER_FLAG_HMAC (0x80) + + /* values 0x0, 0x4 - 0x7 are reserved */ + u8 flags; + u16 reserved; + + /* The segment elts */ + ip6_address_t segments[0]; +} __attribute__ ((packed)) ip6_sr_header_t; + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ + +#endif /* included_vnet_sr_packet_h */ diff --git a/src/vnet/srv6/sr_policy.md b/src/vnet/srv6/sr_policy.md new file mode 100644 index 00000000..521b8461 --- /dev/null +++ b/src/vnet/srv6/sr_policy.md @@ -0,0 +1,56 @@ +# Creating a SR Policy {#srv6_policy_doc} + +An SR Policy is defined by a Binding SID and a weighted set of Segment Lists. + +A new SR policy is created with a first SID list using: + + sr policy add bsid 2001::1 next A1:: next B1:: next C1:: (weight 5) (fib-table 3) + +* The weight parameter is only used if more than one SID list is associated with the policy. +* The fib-table parameter specifies in which table (VRF) the Binding SID is to be installed. + +An SR policy is deleted with: + + sr policy del bsid 2001::1 + sr policy del index 1 + +The existing SR policies are listed with: + + show sr policies + +## Adding/Removing SID Lists from an SR policy + +An additional SID list is associated with an existing SR policy with: + + sr policy mod bsid 2001::1 add sl next A2:: next B2:: next C2:: (weight 3) + sr policy mod index 3 add sl next A2:: next B2:: next C2:: (weight 3) + +Conversely, a SID list can be removed from an SR policy with: + + sr policy mod bsid 2001::1 del sl index 1 + sr policy mod index 3 del sl index 1 + +Note that this cannot be used to remove the last SID list of a policy. + +The weight of a SID list can also be modified with: + + sr policy mod bsid 2001::1 mod sl index 1 weight 4 + sr policy mod index 3 mod sl index 1 weight 4 + +## SR Policies: Spray policies + +Spray policies are a specific type of SR policies where the packet is replicated on all the SID lists, rather than load-balanced among them. + +SID list weights are ignored with this type of policies. + +A Spray policy is instantiated by appending the keyword **spray** to a regular SR policy command, as in: + + sr policy add bsid 2001::1 next A1:: next B1:: next C1:: spray + +Spray policies are used for removing multicast state from a network core domain, and instead send a linear unicast copy to every access node. The last SID in each list accesses the multicast tree within the access node. + +## Encapsulation SR policies + +In case the user decides to create an SR policy an IPv6 Source Address must be specified for the encapsulated traffic. In order to do so the user might use the following command: + + set sr encaps source addr XXXX::YYYY diff --git a/src/vnet/srv6/sr_policy_rewrite.c b/src/vnet/srv6/sr_policy_rewrite.c new file mode 100755 index 00000000..7a37a66b --- /dev/null +++ b/src/vnet/srv6/sr_policy_rewrite.c @@ -0,0 +1,3227 @@ +/* + * sr_policy_rewrite.c: ipv6 sr policy creation + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief SR policy creation and application + * + * Create an SR policy. + * An SR policy can be either of 'default' type or 'spray' type + * An SR policy has attached a list of SID lists. + * In case the SR policy is a default one it will load balance among them. + * An SR policy has associated a BindingSID. + * In case any packet arrives with IPv6 DA == BindingSID then the SR policy + * associated to such bindingSID will be applied to such packet. + * + * SR policies can be applied either by using IPv6 encapsulation or + * SRH insertion. Both methods can be found on this file. + * + * Traffic input usually is IPv6 packets. However it is possible to have + * IPv4 packets or L2 frames. (that are encapsulated into IPv6 with SRH) + * + * This file provides the appropiates VPP graph nodes to do any of these + * methods. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * @brief SR policy rewrite trace + */ +typedef struct +{ + ip6_address_t src, dst; +} sr_policy_rewrite_trace_t; + +/* Graph arcs */ +#define foreach_sr_policy_rewrite_next \ +_(IP6_LOOKUP, "ip6-lookup") \ +_(ERROR, "error-drop") + +typedef enum +{ +#define _(s,n) SR_POLICY_REWRITE_NEXT_##s, + foreach_sr_policy_rewrite_next +#undef _ + SR_POLICY_REWRITE_N_NEXT, +} sr_policy_rewrite_next_t; + +/* SR rewrite errors */ +#define foreach_sr_policy_rewrite_error \ +_(INTERNAL_ERROR, "Segment Routing undefined error") \ +_(BSID_ZERO, "BSID with SL = 0") \ +_(COUNTER_TOTAL, "SR steered IPv6 packets") \ +_(COUNTER_ENCAP, "SR: Encaps packets") \ +_(COUNTER_INSERT, "SR: SRH inserted packets") \ +_(COUNTER_BSID, "SR: BindingSID steered packets") + +typedef enum +{ +#define _(sym,str) SR_POLICY_REWRITE_ERROR_##sym, + foreach_sr_policy_rewrite_error +#undef _ + SR_POLICY_REWRITE_N_ERROR, +} sr_policy_rewrite_error_t; + +static char *sr_policy_rewrite_error_strings[] = { +#define _(sym,string) string, + foreach_sr_policy_rewrite_error +#undef _ +}; + +/** + * @brief Dynamically added SR SL DPO type + */ +static dpo_type_t sr_pr_encaps_dpo_type; +static dpo_type_t sr_pr_insert_dpo_type; +static dpo_type_t sr_pr_bsid_encaps_dpo_type; +static dpo_type_t sr_pr_bsid_insert_dpo_type; + +/** + * @brief IPv6 SA for encapsulated packets + */ +static ip6_address_t sr_pr_encaps_src; + +/******************* SR rewrite set encaps IPv6 source addr *******************/ +/* Note: This is temporal. We don't know whether to follow this path or + take the ip address of a loopback interface or even the OIF */ + +static clib_error_t * +set_sr_src_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat + (input, "addr %U", unformat_ip6_address, &sr_pr_encaps_src)) + return 0; + else + return clib_error_return (0, "No address specified"); + } + return clib_error_return (0, "No address specified"); +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_sr_src_command, static) = { + .path = "set sr encaps source", + .short_help = "set sr encaps source addr ", + .function = set_sr_src_command_fn, +}; +/* *INDENT-ON* */ + +/*********************** SR rewrite string computation ************************/ +/** + * @brief SR rewrite string computation for IPv6 encapsulation (inline) + * + * @param sl is a vector of IPv6 addresses composing the Segment List + * + * @return precomputed rewrite string for encapsulation + */ +static inline u8 * +compute_rewrite_encaps (ip6_address_t * sl) +{ + ip6_header_t *iph; + ip6_sr_header_t *srh; + ip6_address_t *addrp, *this_address; + u32 header_length = 0; + u8 *rs = NULL; + + header_length = 0; + header_length += IPv6_DEFAULT_HEADER_LENGTH; + if (vec_len (sl) > 1) + { + header_length += sizeof (ip6_sr_header_t); + header_length += vec_len (sl) * sizeof (ip6_address_t); + } + + vec_validate (rs, header_length - 1); + + iph = (ip6_header_t *) rs; + iph->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 (0 | ((6 & 0xF) << 28)); + iph->src_address.as_u64[0] = sr_pr_encaps_src.as_u64[0]; + iph->src_address.as_u64[1] = sr_pr_encaps_src.as_u64[1]; + iph->payload_length = header_length - IPv6_DEFAULT_HEADER_LENGTH; + iph->protocol = IP_PROTOCOL_IPV6; + iph->hop_limit = IPv6_DEFAULT_HOP_LIMIT; + + srh = (ip6_sr_header_t *) (iph + 1); + iph->protocol = IP_PROTOCOL_IPV6_ROUTE; + srh->protocol = IP_PROTOCOL_IPV6; + srh->type = ROUTING_HEADER_TYPE_SR; + srh->segments_left = vec_len (sl) - 1; + srh->first_segment = vec_len (sl) - 1; + srh->length = ((sizeof (ip6_sr_header_t) + + (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1; + srh->flags = 0x00; + srh->reserved = 0x00; + addrp = srh->segments + vec_len (sl) - 1; + vec_foreach (this_address, sl) + { + clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); + addrp--; + } + iph->dst_address.as_u64[0] = sl->as_u64[0]; + iph->dst_address.as_u64[1] = sl->as_u64[1]; + return rs; +} + +/** + * @brief SR rewrite string computation for SRH insertion (inline) + * + * @param sl is a vector of IPv6 addresses composing the Segment List + * + * @return precomputed rewrite string for SRH insertion + */ +static inline u8 * +compute_rewrite_insert (ip6_address_t * sl) +{ + ip6_sr_header_t *srh; + ip6_address_t *addrp, *this_address; + u32 header_length = 0; + u8 *rs = NULL; + + header_length = 0; + header_length += sizeof (ip6_sr_header_t); + header_length += (vec_len (sl) + 1) * sizeof (ip6_address_t); + + vec_validate (rs, header_length - 1); + + srh = (ip6_sr_header_t *) rs; + srh->type = ROUTING_HEADER_TYPE_SR; + srh->segments_left = vec_len (sl); + srh->first_segment = vec_len (sl); + srh->length = ((sizeof (ip6_sr_header_t) + + ((vec_len (sl) + 1) * sizeof (ip6_address_t))) / 8) - 1; + srh->flags = 0x00; + srh->reserved = 0x0000; + addrp = srh->segments + vec_len (sl); + vec_foreach (this_address, sl) + { + clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); + addrp--; + } + return rs; +} + +/** + * @brief SR rewrite string computation for SRH insertion with BSID (inline) + * + * @param sl is a vector of IPv6 addresses composing the Segment List + * + * @return precomputed rewrite string for SRH insertion with BSID + */ +static inline u8 * +compute_rewrite_bsid (ip6_address_t * sl) +{ + ip6_sr_header_t *srh; + ip6_address_t *addrp, *this_address; + u32 header_length = 0; + u8 *rs = NULL; + + header_length = 0; + header_length += sizeof (ip6_sr_header_t); + header_length += vec_len (sl) * sizeof (ip6_address_t); + + vec_validate (rs, header_length - 1); + + srh = (ip6_sr_header_t *) rs; + srh->type = ROUTING_HEADER_TYPE_SR; + srh->segments_left = vec_len (sl) - 1; + srh->first_segment = vec_len (sl) - 1; + srh->length = ((sizeof (ip6_sr_header_t) + + (vec_len (sl) * sizeof (ip6_address_t))) / 8) - 1; + srh->flags = 0x00; + srh->reserved = 0x0000; + addrp = srh->segments + vec_len (sl) - 1; + vec_foreach (this_address, sl) + { + clib_memcpy (addrp->as_u8, this_address->as_u8, sizeof (ip6_address_t)); + addrp--; + } + return rs; +} + +/*************************** SR LB helper functions **************************/ +/** + * @brief Creates a Segment List and adds it to an SR policy + * + * Creates a Segment List and adds it to the SR policy. Notice that the SL are + * not necessarily unique. Hence there might be two Segment List within the + * same SR Policy with exactly the same segments and same weight. + * + * @param sr_policy is the SR policy where the SL will be added + * @param sl is a vector of IPv6 addresses composing the Segment List + * @param weight is the weight of the SegmentList (for load-balancing purposes) + * @param is_encap represents the mode (SRH insertion vs Encapsulation) + * + * @return pointer to the just created segment list + */ +static inline ip6_sr_sl_t * +create_sl (ip6_sr_policy_t * sr_policy, ip6_address_t * sl, u32 weight, + u8 is_encap) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_sr_sl_t *segment_list; + + pool_get (sm->sid_lists, segment_list); + memset (segment_list, 0, sizeof (*segment_list)); + + vec_add1 (sr_policy->segments_lists, segment_list - sm->sid_lists); + + /* Fill in segment list */ + segment_list->weight = + (weight != (u32) ~ 0 ? weight : SR_SEGMENT_LIST_WEIGHT_DEFAULT); + segment_list->segments = vec_dup (sl); + + if (is_encap) + { + segment_list->rewrite = compute_rewrite_encaps (sl); + segment_list->rewrite_bsid = segment_list->rewrite; + } + else + { + segment_list->rewrite = compute_rewrite_insert (sl); + segment_list->rewrite_bsid = compute_rewrite_bsid (sl); + } + + /* Create DPO */ + dpo_reset (&segment_list->bsid_dpo); + dpo_reset (&segment_list->ip6_dpo); + dpo_reset (&segment_list->ip4_dpo); + + if (is_encap) + { + dpo_set (&segment_list->ip6_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP6, + segment_list - sm->sid_lists); + dpo_set (&segment_list->ip4_dpo, sr_pr_encaps_dpo_type, DPO_PROTO_IP4, + segment_list - sm->sid_lists); + dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_encaps_dpo_type, + DPO_PROTO_IP6, segment_list - sm->sid_lists); + } + else + { + dpo_set (&segment_list->ip6_dpo, sr_pr_insert_dpo_type, DPO_PROTO_IP6, + segment_list - sm->sid_lists); + dpo_set (&segment_list->bsid_dpo, sr_pr_bsid_insert_dpo_type, + DPO_PROTO_IP6, segment_list - sm->sid_lists); + } + + return segment_list; +} + +/** + * @brief Updates the Load Balancer after an SR Policy change + * + * @param sr_policy is the modified SR Policy + */ +static inline void +update_lb (ip6_sr_policy_t * sr_policy) +{ + flow_hash_config_t fhc; + u32 *sl_index; + ip6_sr_sl_t *segment_list; + ip6_sr_main_t *sm = &sr_main; + load_balance_path_t path; + path.path_index = FIB_NODE_INDEX_INVALID; + load_balance_path_t *ip4_path_vector = 0; + load_balance_path_t *ip6_path_vector = 0; + load_balance_path_t *b_path_vector = 0; + + /* In case LB does not exist, create it */ + if (!dpo_id_is_valid (&sr_policy->bsid_dpo)) + { + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + .fp_addr = { + .ip6 = sr_policy->bsid, + } + }; + + /* Add FIB entry for BSID */ + fhc = fib_table_get_flow_hash_config (sr_policy->fib_table, + dpo_proto_to_fib (DPO_PROTO_IP6)); + + dpo_set (&sr_policy->bsid_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6, + load_balance_create (0, DPO_PROTO_IP6, fhc)); + + dpo_set (&sr_policy->ip6_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP6, + load_balance_create (0, DPO_PROTO_IP6, fhc)); + + /* Update FIB entry's to point to the LB DPO in the main FIB and hidden one */ + fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6, + sr_policy->fib_table), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->bsid_dpo); + + fib_table_entry_special_dpo_update (sm->fib_table_ip6, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->ip6_dpo); + + if (sr_policy->is_encap) + { + dpo_set (&sr_policy->ip4_dpo, DPO_LOAD_BALANCE, DPO_PROTO_IP4, + load_balance_create (0, DPO_PROTO_IP4, fhc)); + + fib_table_entry_special_dpo_update (sm->fib_table_ip4, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->ip4_dpo); + } + + } + + /* Create the LB path vector */ + //path_vector = vec_new(load_balance_path_t, vec_len(sr_policy->segments_lists)); + vec_foreach (sl_index, sr_policy->segments_lists) + { + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + path.path_dpo = segment_list->bsid_dpo; + path.path_weight = segment_list->weight; + vec_add1 (b_path_vector, path); + path.path_dpo = segment_list->ip6_dpo; + vec_add1 (ip6_path_vector, path); + if (sr_policy->is_encap) + { + path.path_dpo = segment_list->ip4_dpo; + vec_add1 (ip4_path_vector, path); + } + } + + /* Update LB multipath */ + load_balance_multipath_update (&sr_policy->bsid_dpo, b_path_vector, + LOAD_BALANCE_FLAG_NONE); + load_balance_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector, + LOAD_BALANCE_FLAG_NONE); + if (sr_policy->is_encap) + load_balance_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector, + LOAD_BALANCE_FLAG_NONE); + + /* Cleanup */ + vec_free (b_path_vector); + vec_free (ip6_path_vector); + vec_free (ip4_path_vector); + +} + +/** + * @brief Updates the Replicate DPO after an SR Policy change + * + * @param sr_policy is the modified SR Policy (type spray) + */ +static inline void +update_replicate (ip6_sr_policy_t * sr_policy) +{ + u32 *sl_index; + ip6_sr_sl_t *segment_list; + ip6_sr_main_t *sm = &sr_main; + load_balance_path_t path; + path.path_index = FIB_NODE_INDEX_INVALID; + load_balance_path_t *b_path_vector = 0; + load_balance_path_t *ip6_path_vector = 0; + load_balance_path_t *ip4_path_vector = 0; + + /* In case LB does not exist, create it */ + if (!dpo_id_is_valid (&sr_policy->bsid_dpo)) + { + dpo_set (&sr_policy->bsid_dpo, DPO_REPLICATE, + DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6)); + + dpo_set (&sr_policy->ip6_dpo, DPO_REPLICATE, + DPO_PROTO_IP6, replicate_create (0, DPO_PROTO_IP6)); + + /* Update FIB entry's DPO to point to SR without LB */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + .fp_addr = { + .ip6 = sr_policy->bsid, + } + }; + fib_table_entry_special_dpo_update (fib_table_find (FIB_PROTOCOL_IP6, + sr_policy->fib_table), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->bsid_dpo); + + fib_table_entry_special_dpo_update (sm->fib_table_ip6, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->ip6_dpo); + + if (sr_policy->is_encap) + { + dpo_set (&sr_policy->ip4_dpo, DPO_REPLICATE, DPO_PROTO_IP4, + replicate_create (0, DPO_PROTO_IP4)); + + fib_table_entry_special_dpo_update (sm->fib_table_ip4, + &pfx, + FIB_SOURCE_SR, + FIB_ENTRY_FLAG_EXCLUSIVE, + &sr_policy->ip4_dpo); + } + + } + + /* Create the replicate path vector */ + path.path_weight = 1; + vec_foreach (sl_index, sr_policy->segments_lists) + { + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + path.path_dpo = segment_list->bsid_dpo; + vec_add1 (b_path_vector, path); + path.path_dpo = segment_list->ip6_dpo; + vec_add1 (ip6_path_vector, path); + if (sr_policy->is_encap) + { + path.path_dpo = segment_list->ip4_dpo; + vec_add1 (ip4_path_vector, path); + } + } + + /* Update replicate multipath */ + replicate_multipath_update (&sr_policy->bsid_dpo, b_path_vector); + replicate_multipath_update (&sr_policy->ip6_dpo, ip6_path_vector); + if (sr_policy->is_encap) + replicate_multipath_update (&sr_policy->ip4_dpo, ip4_path_vector); +} + +/******************************* SR rewrite API *******************************/ +/* Three functions for handling sr policies: + * -> sr_policy_add + * -> sr_policy_del + * -> sr_policy_mod + * All of them are API. CLI function on sr_policy_command_fn */ + +/** + * @brief Create a new SR policy + * + * @param bsid is the bindingSID of the SR Policy + * @param segments is a vector of IPv6 address composing the segment list + * @param weight is the weight of the sid list. optional. + * @param behavior is the behavior of the SR policy. (default//spray) + * @param fib_table is the VRF where to install the FIB entry for the BSID + * @param is_encap (bool) whether SR policy should behave as Encap/SRH Insertion + * + * @return 0 if correct, else error + */ +int +sr_policy_add (ip6_address_t * bsid, ip6_address_t * segments, + u32 weight, u8 behavior, u32 fib_table, u8 is_encap) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_sr_policy_t *sr_policy = 0; + uword *p; + + /* Search for existing keys (BSID) */ + p = mhash_get (&sm->sr_policies_index_hash, bsid); + if (p) + { + /* Add SR policy that already exists; complain */ + return -12; + } + + /* Search collision in FIB entries */ + /* Explanation: It might be possible that some other entity has already + * created a route for the BSID. This in theory is impossible, but in + * practise we could see it. Assert it and scream if needed */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + .fp_addr = { + .ip6 = *bsid, + } + }; + + /* Lookup the FIB index associated to the table selected */ + u32 fib_index = fib_table_find (FIB_PROTOCOL_IP6, + (fib_table != (u32) ~ 0 ? fib_table : 0)); + if (fib_index == ~0) + return -13; + + /* Lookup whether there exists an entry for the BSID */ + fib_node_index_t fei = fib_table_lookup_exact_match (fib_index, &pfx); + if (FIB_NODE_INDEX_INVALID != fei) + return -12; //There is an entry for such lookup + + /* Add an SR policy object */ + pool_get (sm->sr_policies, sr_policy); + memset (sr_policy, 0, sizeof (*sr_policy)); + clib_memcpy (&sr_policy->bsid, bsid, sizeof (ip6_address_t)); + sr_policy->type = behavior; + sr_policy->fib_table = (fib_table != (u32) ~ 0 ? fib_table : 0); //Is default FIB 0 ? + sr_policy->is_encap = is_encap; + + /* Copy the key */ + mhash_set (&sm->sr_policies_index_hash, bsid, sr_policy - sm->sr_policies, + NULL); + + /* Create a segment list and add the index to the SR policy */ + create_sl (sr_policy, segments, weight, is_encap); + + /* If FIB doesnt exist, create them */ + if (sm->fib_table_ip6 == (u32) ~ 0) + { + sm->fib_table_ip6 = fib_table_create_and_lock (FIB_PROTOCOL_IP6, + "SRv6 steering of IP6 prefixes through BSIDs"); + sm->fib_table_ip4 = fib_table_create_and_lock (FIB_PROTOCOL_IP6, + "SRv6 steering of IP4 prefixes through BSIDs"); + } + + /* Create IPv6 FIB for the BindingSID attached to the DPO of the only SL */ + if (sr_policy->type == SR_POLICY_TYPE_DEFAULT) + update_lb (sr_policy); + else if (sr_policy->type == SR_POLICY_TYPE_SPRAY) + update_replicate (sr_policy); + return 0; +} + +/** + * @brief Delete a SR policy + * + * @param bsid is the bindingSID of the SR Policy + * @param index is the index of the SR policy + * + * @return 0 if correct, else error + */ +int +sr_policy_del (ip6_address_t * bsid, u32 index) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_sr_policy_t *sr_policy = 0; + ip6_sr_sl_t *segment_list; + u32 *sl_index; + uword *p; + + if (bsid) + { + p = mhash_get (&sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -1; + } + else + { + sr_policy = pool_elt_at_index (sm->sr_policies, index); + if (!sr_policy) + return -1; + } + + /* Remove BindingSID FIB entry */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = 128, + .fp_addr = { + .ip6 = sr_policy->bsid, + } + , + }; + + fib_table_entry_special_remove (fib_table_find (FIB_PROTOCOL_IP6, + sr_policy->fib_table), + &pfx, FIB_SOURCE_SR); + + fib_table_entry_special_remove (sm->fib_table_ip6, &pfx, FIB_SOURCE_SR); + + if (sr_policy->is_encap) + fib_table_entry_special_remove (sm->fib_table_ip4, &pfx, FIB_SOURCE_SR); + + if (dpo_id_is_valid (&sr_policy->bsid_dpo)) + { + dpo_reset (&sr_policy->bsid_dpo); + dpo_reset (&sr_policy->ip4_dpo); + dpo_reset (&sr_policy->ip6_dpo); + } + + /* Clean SID Lists */ + vec_foreach (sl_index, sr_policy->segments_lists) + { + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + vec_free (segment_list->segments); + vec_free (segment_list->rewrite); + vec_free (segment_list->rewrite_bsid); + pool_put_index (sm->sid_lists, *sl_index); + } + + /* Remove SR policy entry */ + mhash_unset (&sm->sr_policies_index_hash, &sr_policy->bsid, NULL); + pool_put (sm->sr_policies, sr_policy); + + /* If FIB empty unlock it */ + if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies)) + { + fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6); + fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6); + sm->fib_table_ip6 = (u32) ~ 0; + sm->fib_table_ip4 = (u32) ~ 0; + } + + return 0; +} + +/** + * @brief Modify an existing SR policy + * + * The possible modifications are adding a new Segment List, modifying an + * existing Segment List (modify the weight only) and delete a given + * Segment List from the SR Policy. + * + * @param bsid is the bindingSID of the SR Policy + * @param index is the index of the SR policy + * @param fib_table is the VRF where to install the FIB entry for the BSID + * @param operation is the operation to perform (among the top ones) + * @param segments is a vector of IPv6 address composing the segment list + * @param sl_index is the index of the Segment List to modify/delete + * @param weight is the weight of the sid list. optional. + * @param is_encap Mode. Encapsulation or SRH insertion. + * + * @return 0 if correct, else error + */ +int +sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table, + u8 operation, ip6_address_t * segments, u32 sl_index, + u32 weight) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_sr_policy_t *sr_policy = 0; + ip6_sr_sl_t *segment_list; + u32 *sl_index_iterate; + uword *p; + + if (bsid) + { + p = mhash_get (&sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -1; + } + else + { + sr_policy = pool_elt_at_index (sm->sr_policies, index); + if (!sr_policy) + return -1; + } + + if (operation == 1) /* Add SR List to an existing SR policy */ + { + /* Create the new SL */ + segment_list = + create_sl (sr_policy, segments, weight, sr_policy->is_encap); + + /* Create a new LB DPO */ + if (sr_policy->type == SR_POLICY_TYPE_DEFAULT) + update_lb (sr_policy); + else if (sr_policy->type == SR_POLICY_TYPE_SPRAY) + update_replicate (sr_policy); + } + else if (operation == 2) /* Delete SR List from an existing SR policy */ + { + /* Check that currently there are more than one SID list */ + if (vec_len (sr_policy->segments_lists) == 1) + return -21; + + /* Check that the SR list does exist and is assigned to the sr policy */ + vec_foreach (sl_index_iterate, sr_policy->segments_lists) + if (*sl_index_iterate == sl_index) + break; + + if (*sl_index_iterate != sl_index) + return -22; + + /* Remove the lucky SR list that is being kicked out */ + segment_list = pool_elt_at_index (sm->sid_lists, sl_index); + vec_free (segment_list->segments); + vec_free (segment_list->rewrite); + vec_free (segment_list->rewrite_bsid); + pool_put_index (sm->sid_lists, sl_index); + vec_del1 (sr_policy->segments_lists, + sl_index_iterate - sr_policy->segments_lists); + + /* Create a new LB DPO */ + if (sr_policy->type == SR_POLICY_TYPE_DEFAULT) + update_lb (sr_policy); + else if (sr_policy->type == SR_POLICY_TYPE_SPRAY) + update_replicate (sr_policy); + } + else if (operation == 3) /* Modify the weight of an existing SR List */ + { + /* Find the corresponding SL */ + vec_foreach (sl_index_iterate, sr_policy->segments_lists) + if (*sl_index_iterate == sl_index) + break; + + if (*sl_index_iterate != sl_index) + return -32; + + /* Change the weight */ + segment_list = pool_elt_at_index (sm->sid_lists, sl_index); + segment_list->weight = weight; + + /* Update LB */ + if (sr_policy->type == SR_POLICY_TYPE_DEFAULT) + update_lb (sr_policy); + } + else /* Incorrect op. */ + return -1; + + return 0; +} + +/** + * @brief CLI for 'sr policies' command family + */ +static clib_error_t * +sr_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + int rv = -1; + char is_del = 0, is_add = 0, is_mod = 0; + char policy_set = 0; + ip6_address_t bsid, next_address; + u32 sr_policy_index = (u32) ~ 0, sl_index = (u32) ~ 0; + u32 weight = (u32) ~ 0, fib_table = (u32) ~ 0; + ip6_address_t *segments = 0, *this_seg; + u8 operation = 0; + char is_encap = 1; + char is_spray = 0; + + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (!is_add && !is_mod && !is_del && unformat (input, "add")) + is_add = 1; + else if (!is_add && !is_mod && !is_del && unformat (input, "del")) + is_del = 1; + else if (!is_add && !is_mod && !is_del && unformat (input, "mod")) + is_mod = 1; + else if (!policy_set + && unformat (input, "bsid %U", unformat_ip6_address, &bsid)) + policy_set = 1; + else if (!is_add && !policy_set + && unformat (input, "index %d", &sr_policy_index)) + policy_set = 1; + else if (unformat (input, "weight %d", &weight)); + else + if (unformat (input, "next %U", unformat_ip6_address, &next_address)) + { + vec_add2 (segments, this_seg, 1); + clib_memcpy (this_seg->as_u8, next_address.as_u8, + sizeof (*this_seg)); + } + else if (unformat (input, "add sl")) + operation = 1; + else if (unformat (input, "del sl index %d", &sl_index)) + operation = 2; + else if (unformat (input, "mod sl index %d", &sl_index)) + operation = 3; + else if (fib_table == (u32) ~ 0 + && unformat (input, "fib-table %d", &fib_table)); + else if (unformat (input, "encap")) + is_encap = 1; + else if (unformat (input, "insert")) + is_encap = 0; + else if (unformat (input, "spray")) + is_spray = 1; + else + break; + } + + if (!is_add && !is_mod && !is_del) + return clib_error_return (0, "Incorrect CLI"); + + if (!policy_set) + return clib_error_return (0, "No SR policy BSID or index specified"); + + if (is_add) + { + if (vec_len (segments) == 0) + return clib_error_return (0, "No Segment List specified"); + rv = sr_policy_add (&bsid, segments, weight, + (is_spray ? SR_POLICY_TYPE_SPRAY : + SR_POLICY_TYPE_DEFAULT), fib_table, is_encap); + } + else if (is_del) + rv = sr_policy_del ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid), + sr_policy_index); + else if (is_mod) + { + if (!operation) + return clib_error_return (0, "No SL modification specified"); + if (operation != 1 && sl_index == (u32) ~ 0) + return clib_error_return (0, "No Segment List index specified"); + if (operation == 1 && vec_len (segments) == 0) + return clib_error_return (0, "No Segment List specified"); + if (operation == 3 && weight == (u32) ~ 0) + return clib_error_return (0, "No new weight for the SL specified"); + rv = sr_policy_mod ((sr_policy_index != (u32) ~ 0 ? NULL : &bsid), + sr_policy_index, fib_table, operation, segments, + sl_index, weight); + } + + switch (rv) + { + case 0: + break; + case 1: + return 0; + case -12: + return clib_error_return (0, + "There is already a FIB entry for the BindingSID address.\n" + "The SR policy could not be created."); + case -13: + return clib_error_return (0, "The specified FIB table does not exist."); + case -21: + return clib_error_return (0, + "The selected SR policy only contains ONE segment list. " + "Please remove the SR policy instead"); + case -22: + return clib_error_return (0, + "Could not delete the segment list. " + "It is not associated with that SR policy."); + case -32: + return clib_error_return (0, + "Could not modify the segment list. " + "The given SL is not associated with such SR policy."); + default: + return clib_error_return (0, "BUG: sr policy returns %d", rv); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (sr_policy_command, static) = { + .path = "sr policy", + .short_help = "sr policy [add||del||mod] [bsid 2001::1||index 5] " + "next A:: next B:: next C:: (weight 1) (fib-table 2) (encap|insert)", + .long_help = + "Manipulation of SR policies.\n" + "A Segment Routing policy may contain several SID lists. Each SID list has\n" + "an associated weight (default 1), which will result in wECMP (uECMP).\n" + "Segment Routing policies might be of type encapsulation or srh insertion\n" + "Each SR policy will be associated with a unique BindingSID.\n" + "A BindingSID is a locally allocated SegmentID. For every packet that arrives\n" + "with IPv6_DA:BSID such traffic will be steered into the SR policy.\n" + "The add command will create a SR policy with its first segment list (sl)\n" + "The mod command allows you to add, remove, or modify the existing segment lists\n" + "within an SR policy.\n" + "The del command allows you to delete a SR policy along with all its associated\n" + "SID lists.\n", + .function = sr_policy_command_fn, +}; +/* *INDENT-ON* */ + +/** + * @brief CLI to display onscreen all the SR policies + */ +static clib_error_t * +show_sr_policies_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + ip6_sr_main_t *sm = &sr_main; + u32 *sl_index; + ip6_sr_sl_t *segment_list = 0; + ip6_sr_policy_t *sr_policy = 0; + ip6_sr_policy_t **vec_policies = 0; + ip6_address_t *addr; + u8 *s; + int i = 0; + + vlib_cli_output (vm, "SR policies:"); + + /* *INDENT-OFF* */ + pool_foreach (sr_policy, sm->sr_policies, + {vec_add1 (vec_policies, sr_policy); } ); + /* *INDENT-ON* */ + + vec_foreach_index (i, vec_policies) + { + sr_policy = vec_policies[i]; + vlib_cli_output (vm, "[%u].-\tBSID: %U", + (u32) (sr_policy - sm->sr_policies), + format_ip6_address, &sr_policy->bsid); + vlib_cli_output (vm, "\tBehavior: %s", + (sr_policy->is_encap ? "Encapsulation" : + "SRH insertion")); + vlib_cli_output (vm, "\tType: %s", + (sr_policy->type == + SR_POLICY_TYPE_DEFAULT ? "Default" : "Spray")); + vlib_cli_output (vm, "\tFIB table: %u", + (sr_policy->fib_table != + (u32) ~ 0 ? sr_policy->fib_table : 0)); + vlib_cli_output (vm, "\tSegment Lists:"); + vec_foreach (sl_index, sr_policy->segments_lists) + { + s = NULL; + s = format (s, "\t[%u].- ", *sl_index); + segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); + s = format (s, "< "); + vec_foreach (addr, segment_list->segments) + { + s = format (s, "%U, ", format_ip6_address, addr); + } + s = format (s, "\b\b > "); + s = format (s, "weight: %u", segment_list->weight); + vlib_cli_output (vm, " %s", s); + } + vlib_cli_output (vm, "-----------"); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_sr_policies_command, static) = { + .path = "show sr policies", + .short_help = "show sr policies", + .function = show_sr_policies_command_fn, +}; +/* *INDENT-ON* */ + +/*************************** SR rewrite graph node ****************************/ +/** + * @brief Trace for the SR Policy Rewrite graph node + */ +static u8 * +format_sr_policy_rewrite_trace (u8 * s, va_list * args) +{ + //TODO + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + sr_policy_rewrite_trace_t *t = va_arg (*args, sr_policy_rewrite_trace_t *); + + s = format + (s, "SR-policy-rewrite: src %U dst %U", + format_ip6_address, &t->src, format_ip6_address, &t->dst); + + return s; +} + +/** + * @brief IPv6 encapsulation processing as per RFC2473 + */ +static_always_inline void +encaps_processing_v6 (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, ip6_header_t * ip0_encap) +{ + u32 new_l0; + + ip0_encap->hop_limit -= 1; + new_l0 = + ip0->payload_length + sizeof (ip6_header_t) + + clib_net_to_host_u16 (ip0_encap->payload_length); + ip0->payload_length = clib_host_to_net_u16 (new_l0); + ip0->ip_version_traffic_class_and_flow_label = + ip0_encap->ip_version_traffic_class_and_flow_label; +} + +/** + * @brief Graph node for applying a SR policy into an IPv6 packet. Encapsulation + */ +static uword +sr_policy_rewrite_encaps (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int encap_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + ip1_encap = vlib_buffer_get_current (b1); + ip2_encap = vlib_buffer_get_current (b2); + ip3_encap = vlib_buffer_get_current (b3); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + clib_memcpy (((u8 *) ip1_encap) - vec_len (sl1->rewrite), + sl1->rewrite, vec_len (sl1->rewrite)); + clib_memcpy (((u8 *) ip2_encap) - vec_len (sl2->rewrite), + sl2->rewrite, vec_len (sl2->rewrite)); + clib_memcpy (((u8 *) ip3_encap) - vec_len (sl3->rewrite), + sl3->rewrite, vec_len (sl3->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + encaps_processing_v6 (node, b0, ip0, ip0_encap); + encaps_processing_v6 (node, b1, ip1, ip1_encap); + encaps_processing_v6 (node, b2, ip2, ip2_encap); + encaps_processing_v6 (node, b3, ip3, ip3_encap); + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + encap_pkts += 4; + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0, *ip0_encap = 0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + b0 = vlib_get_buffer (vm, bi0); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + + encaps_processing_v6 (node, b0, ip0, ip0_encap); + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + encap_pkts++; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + encap_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_node) = { + .function = sr_policy_rewrite_encaps, + .name = "sr-pl-rewrite-encaps", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/** + * @brief IPv4 encapsulation processing as per RFC2473 + */ +static_always_inline void +encaps_processing_v4 (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, ip4_header_t * ip0_encap) +{ + u32 new_l0; + ip6_sr_header_t *sr0; + + u32 checksum0; + + /* Inner IPv4: Decrement TTL & update checksum */ + ip0_encap->ttl -= 1; + checksum0 = ip0_encap->checksum + clib_host_to_net_u16 (0x0100); + checksum0 += checksum0 >= 0xffff; + ip0_encap->checksum = checksum0; + + /* Outer IPv6: Update length, FL, proto */ + new_l0 = ip0->payload_length + clib_net_to_host_u16 (ip0_encap->length); + ip0->payload_length = clib_host_to_net_u16 (new_l0); + ip0->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 (0 | ((6 & 0xF) << 28) | + ((ip0_encap->tos & 0xFF) << 20)); + sr0 = (void *) (ip0 + 1); + sr0->protocol = IP_PROTOCOL_IP_IN_IP; +} + +/** + * @brief Graph node for applying a SR policy into an IPv4 packet. Encapsulation + */ +static uword +sr_policy_rewrite_encaps_v4 (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int encap_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip4_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + ip1_encap = vlib_buffer_get_current (b1); + ip2_encap = vlib_buffer_get_current (b2); + ip3_encap = vlib_buffer_get_current (b3); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + clib_memcpy (((u8 *) ip1_encap) - vec_len (sl1->rewrite), + sl1->rewrite, vec_len (sl1->rewrite)); + clib_memcpy (((u8 *) ip2_encap) - vec_len (sl2->rewrite), + sl2->rewrite, vec_len (sl2->rewrite)); + clib_memcpy (((u8 *) ip3_encap) - vec_len (sl3->rewrite), + sl3->rewrite, vec_len (sl3->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + encaps_processing_v4 (node, b0, ip0, ip0_encap); + encaps_processing_v4 (node, b1, ip1, ip1_encap); + encaps_processing_v4 (node, b2, ip2, ip2_encap); + encaps_processing_v4 (node, b3, ip3, ip3_encap); + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + encap_pkts += 4; + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0; + ip4_header_t *ip0_encap = 0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + b0 = vlib_get_buffer (vm, bi0); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + + encaps_processing_v4 (node, b0, ip0, ip0_encap); + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + encap_pkts++; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + encap_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_v4_node) = { + .function = sr_policy_rewrite_encaps_v4, + .name = "sr-pl-rewrite-encaps-v4", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +always_inline u32 +ip_flow_hash (void *data) +{ + ip4_header_t *iph = (ip4_header_t *) data; + + if ((iph->ip_version_and_header_length & 0xF0) == 0x40) + return ip4_compute_flow_hash (iph, IP_FLOW_HASH_DEFAULT); + else + return ip6_compute_flow_hash ((ip6_header_t *) iph, IP_FLOW_HASH_DEFAULT); +} + +always_inline u64 +mac_to_u64 (u8 * m) +{ + return (*((u64 *) m) & 0xffffffffffff); +} + +always_inline u32 +l2_flow_hash (vlib_buffer_t * b0) +{ + ethernet_header_t *eh; + u64 a, b, c; + uword is_ip, eh_size; + u16 eh_type; + + eh = vlib_buffer_get_current (b0); + eh_type = clib_net_to_host_u16 (eh->type); + eh_size = ethernet_buffer_header_size (b0); + + is_ip = (eh_type == ETHERNET_TYPE_IP4 || eh_type == ETHERNET_TYPE_IP6); + + /* since we have 2 cache lines, use them */ + if (is_ip) + a = ip_flow_hash ((u8 *) vlib_buffer_get_current (b0) + eh_size); + else + a = eh->type; + + b = mac_to_u64 ((u8 *) eh->dst_address); + c = mac_to_u64 ((u8 *) eh->src_address); + hash_mix64 (a, b, c); + + return (u32) c; +} + +/** + * @brief Graph node for applying a SR policy into a L2 frame + */ +static uword +sr_policy_rewrite_encaps_l2 (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int encap_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ethernet_header_t *en0, *en1, *en2, *en3; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + ip6_sr_policy_t *sp0, *sp1, *sp2, *sp3; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sp0 = pool_elt_at_index (sm->sr_policies, + sm->sw_iface_sr_policies[vnet_buffer + (b0)->sw_if_index + [VLIB_RX]]); + + sp1 = pool_elt_at_index (sm->sr_policies, + sm->sw_iface_sr_policies[vnet_buffer + (b1)->sw_if_index + [VLIB_RX]]); + + sp2 = pool_elt_at_index (sm->sr_policies, + sm->sw_iface_sr_policies[vnet_buffer + (b2)->sw_if_index + [VLIB_RX]]); + + sp3 = pool_elt_at_index (sm->sr_policies, + sm->sw_iface_sr_policies[vnet_buffer + (b3)->sw_if_index + [VLIB_RX]]); + + if (vec_len (sp0->segments_lists) == 1) + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0]; + else + { + vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0); + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash & + (vec_len (sp0->segments_lists) - 1))]; + } + + if (vec_len (sp1->segments_lists) == 1) + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = sp1->segments_lists[1]; + else + { + vnet_buffer (b1)->ip.flow_hash = l2_flow_hash (b1); + vnet_buffer (b1)->ip.adj_index[VLIB_TX] = + sp1->segments_lists[(vnet_buffer (b1)->ip.flow_hash & + (vec_len (sp1->segments_lists) - 1))]; + } + + if (vec_len (sp2->segments_lists) == 1) + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = sp2->segments_lists[2]; + else + { + vnet_buffer (b2)->ip.flow_hash = l2_flow_hash (b2); + vnet_buffer (b2)->ip.adj_index[VLIB_TX] = + sp2->segments_lists[(vnet_buffer (b2)->ip.flow_hash & + (vec_len (sp2->segments_lists) - 1))]; + } + + if (vec_len (sp3->segments_lists) == 1) + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = sp3->segments_lists[3]; + else + { + vnet_buffer (b3)->ip.flow_hash = l2_flow_hash (b3); + vnet_buffer (b3)->ip.adj_index[VLIB_TX] = + sp3->segments_lists[(vnet_buffer (b3)->ip.flow_hash & + (vec_len (sp3->segments_lists) - 1))]; + } + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite)); + + en0 = vlib_buffer_get_current (b0); + en1 = vlib_buffer_get_current (b1); + en2 = vlib_buffer_get_current (b2); + en3 = vlib_buffer_get_current (b3); + + clib_memcpy (((u8 *) en0) - vec_len (sl0->rewrite), sl0->rewrite, + vec_len (sl0->rewrite)); + clib_memcpy (((u8 *) en1) - vec_len (sl1->rewrite), sl1->rewrite, + vec_len (sl1->rewrite)); + clib_memcpy (((u8 *) en2) - vec_len (sl2->rewrite), sl2->rewrite, + vec_len (sl2->rewrite)); + clib_memcpy (((u8 *) en3) - vec_len (sl3->rewrite), sl3->rewrite, + vec_len (sl3->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + ip0->payload_length = + clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t)); + ip1->payload_length = + clib_host_to_net_u16 (b1->current_length - sizeof (ip6_header_t)); + ip2->payload_length = + clib_host_to_net_u16 (b2->current_length - sizeof (ip6_header_t)); + ip3->payload_length = + clib_host_to_net_u16 (b3->current_length - sizeof (ip6_header_t)); + + sr0 = (void *) (ip0 + 1); + sr1 = (void *) (ip1 + 1); + sr2 = (void *) (ip2 + 1); + sr3 = (void *) (ip3 + 1); + + sr0->protocol = sr1->protocol = sr2->protocol = sr3->protocol = + IP_PROTOCOL_IP6_NONXT; + + /* Which Traffic class and flow label do I set ? */ + //ip0->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32(0|((6&0xF)<<28)|((ip0_encap->tos&0xFF)<<20)); + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + encap_pkts += 4; + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0; + ip6_sr_header_t *sr0; + ethernet_header_t *en0; + ip6_sr_policy_t *sp0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + b0 = vlib_get_buffer (vm, bi0); + + /* Find the SR policy */ + sp0 = pool_elt_at_index (sm->sr_policies, + sm->sw_iface_sr_policies[vnet_buffer + (b0)->sw_if_index + [VLIB_RX]]); + + /* In case there is more than one SL, LB among them */ + if (vec_len (sp0->segments_lists) == 1) + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = sp0->segments_lists[0]; + else + { + vnet_buffer (b0)->ip.flow_hash = l2_flow_hash (b0); + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = + sp0->segments_lists[(vnet_buffer (b0)->ip.flow_hash & + (vec_len (sp0->segments_lists) - 1))]; + } + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + + en0 = vlib_buffer_get_current (b0); + + clib_memcpy (((u8 *) en0) - vec_len (sl0->rewrite), sl0->rewrite, + vec_len (sl0->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + + ip0->payload_length = + clib_host_to_net_u16 (b0->current_length - sizeof (ip6_header_t)); + + sr0 = (void *) (ip0 + 1); + sr0->protocol = IP_PROTOCOL_IP6_NONXT; + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + encap_pkts++; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + encap_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_encaps_l2_node) = { + .function = sr_policy_rewrite_encaps_l2, + .name = "sr-pl-rewrite-encaps-l2", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/** + * @brief Graph node for applying a SR policy into a packet. SRH insertion. + */ +static uword +sr_policy_rewrite_insert (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int insert_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + u16 new_l0, new_l1, new_l2, new_l3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr0 = + (ip6_sr_header_t *) (((void *) (ip0 + 1)) + + ip6_ext_header_len (ip0 + 1)); + else + sr0 = (ip6_sr_header_t *) (ip0 + 1); + + if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr1 = + (ip6_sr_header_t *) (((void *) (ip1 + 1)) + + ip6_ext_header_len (ip1 + 1)); + else + sr1 = (ip6_sr_header_t *) (ip1 + 1); + + if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr2 = + (ip6_sr_header_t *) (((void *) (ip2 + 1)) + + ip6_ext_header_len (ip2 + 1)); + else + sr2 = (ip6_sr_header_t *) (ip2 + 1); + + if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr3 = + (ip6_sr_header_t *) (((void *) (ip3 + 1)) + + ip6_ext_header_len (ip3 + 1)); + else + sr3 = (ip6_sr_header_t *) (ip3 + 1); + + clib_memcpy ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0, + (void *) sr0 - (void *) ip0); + clib_memcpy ((u8 *) ip1 - vec_len (sl1->rewrite), (u8 *) ip1, + (void *) sr1 - (void *) ip1); + clib_memcpy ((u8 *) ip2 - vec_len (sl2->rewrite), (u8 *) ip2, + (void *) sr2 - (void *) ip2); + clib_memcpy ((u8 *) ip3 - vec_len (sl3->rewrite), (u8 *) ip3, + (void *) sr3 - (void *) ip3); + + clib_memcpy (((u8 *) sr0 - vec_len (sl0->rewrite)), sl0->rewrite, + vec_len (sl0->rewrite)); + clib_memcpy (((u8 *) sr1 - vec_len (sl1->rewrite)), sl1->rewrite, + vec_len (sl1->rewrite)); + clib_memcpy (((u8 *) sr2 - vec_len (sl2->rewrite)), sl2->rewrite, + vec_len (sl2->rewrite)); + clib_memcpy (((u8 *) sr3 - vec_len (sl3->rewrite)), sl3->rewrite, + vec_len (sl3->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite)); + + ip0 = ((void *) ip0) - vec_len (sl0->rewrite); + ip1 = ((void *) ip1) - vec_len (sl1->rewrite); + ip2 = ((void *) ip2) - vec_len (sl2->rewrite); + ip3 = ((void *) ip3) - vec_len (sl3->rewrite); + + ip0->hop_limit -= 1; + ip1->hop_limit -= 1; + ip2->hop_limit -= 1; + ip3->hop_limit -= 1; + + new_l0 = + clib_net_to_host_u16 (ip0->payload_length) + + vec_len (sl0->rewrite); + new_l1 = + clib_net_to_host_u16 (ip1->payload_length) + + vec_len (sl1->rewrite); + new_l2 = + clib_net_to_host_u16 (ip2->payload_length) + + vec_len (sl2->rewrite); + new_l3 = + clib_net_to_host_u16 (ip3->payload_length) + + vec_len (sl3->rewrite); + + ip0->payload_length = clib_host_to_net_u16 (new_l0); + ip1->payload_length = clib_host_to_net_u16 (new_l1); + ip2->payload_length = clib_host_to_net_u16 (new_l2); + ip3->payload_length = clib_host_to_net_u16 (new_l3); + + sr0 = ((void *) sr0) - vec_len (sl0->rewrite); + sr1 = ((void *) sr1) - vec_len (sl1->rewrite); + sr2 = ((void *) sr2) - vec_len (sl2->rewrite); + sr3 = ((void *) sr3) - vec_len (sl3->rewrite); + + sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0]; + sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1]; + sr1->segments->as_u64[0] = ip1->dst_address.as_u64[0]; + sr1->segments->as_u64[1] = ip1->dst_address.as_u64[1]; + sr2->segments->as_u64[0] = ip2->dst_address.as_u64[0]; + sr2->segments->as_u64[1] = ip2->dst_address.as_u64[1]; + sr3->segments->as_u64[0] = ip3->dst_address.as_u64[0]; + sr3->segments->as_u64[1] = ip3->dst_address.as_u64[1]; + + ip0->dst_address.as_u64[0] = + (sr0->segments + sr0->segments_left)->as_u64[0]; + ip0->dst_address.as_u64[1] = + (sr0->segments + sr0->segments_left)->as_u64[1]; + ip1->dst_address.as_u64[0] = + (sr1->segments + sr1->segments_left)->as_u64[0]; + ip1->dst_address.as_u64[1] = + (sr1->segments + sr1->segments_left)->as_u64[1]; + ip2->dst_address.as_u64[0] = + (sr2->segments + sr2->segments_left)->as_u64[0]; + ip2->dst_address.as_u64[1] = + (sr2->segments + sr2->segments_left)->as_u64[1]; + ip3->dst_address.as_u64[0] = + (sr3->segments + sr3->segments_left)->as_u64[0]; + ip3->dst_address.as_u64[1] = + (sr3->segments + sr3->segments_left)->as_u64[1]; + + ip6_ext_header_t *ip_ext; + if (ip0 + 1 == (void *) sr0) + { + sr0->protocol = ip0->protocol; + ip0->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip0 + 1); + sr0->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip1 + 1 == (void *) sr1) + { + sr1->protocol = ip1->protocol; + ip1->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip2 + 1); + sr2->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip2 + 1 == (void *) sr2) + { + sr2->protocol = ip2->protocol; + ip2->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip2 + 1); + sr2->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip3 + 1 == (void *) sr3) + { + sr3->protocol = ip3->protocol; + ip3->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip3 + 1); + sr3->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + insert_pkts += 4; + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0; + ip6_sr_header_t *sr0 = 0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + u16 new_l0 = 0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + + if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr0 = + (ip6_sr_header_t *) (((void *) (ip0 + 1)) + + ip6_ext_header_len (ip0 + 1)); + else + sr0 = (ip6_sr_header_t *) (ip0 + 1); + + clib_memcpy ((u8 *) ip0 - vec_len (sl0->rewrite), (u8 *) ip0, + (void *) sr0 - (void *) ip0); + clib_memcpy (((u8 *) sr0 - vec_len (sl0->rewrite)), sl0->rewrite, + vec_len (sl0->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + + ip0 = ((void *) ip0) - vec_len (sl0->rewrite); + ip0->hop_limit -= 1; + new_l0 = + clib_net_to_host_u16 (ip0->payload_length) + + vec_len (sl0->rewrite); + ip0->payload_length = clib_host_to_net_u16 (new_l0); + + sr0 = ((void *) sr0) - vec_len (sl0->rewrite); + sr0->segments->as_u64[0] = ip0->dst_address.as_u64[0]; + sr0->segments->as_u64[1] = ip0->dst_address.as_u64[1]; + + ip0->dst_address.as_u64[0] = + (sr0->segments + sr0->segments_left)->as_u64[0]; + ip0->dst_address.as_u64[1] = + (sr0->segments + sr0->segments_left)->as_u64[1]; + + if (ip0 + 1 == (void *) sr0) + { + sr0->protocol = ip0->protocol; + ip0->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip6_ext_header_t *ip_ext = (void *) (ip0 + 1); + sr0->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + insert_pkts++; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + insert_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_insert_node) = { + .function = sr_policy_rewrite_insert, + .name = "sr-pl-rewrite-insert", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/** + * @brief Graph node for applying a SR policy into a packet. BSID - SRH insertion. + */ +static uword +sr_policy_rewrite_b_insert (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int insert_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + u16 new_l0, new_l1, new_l2, new_l3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite_bsid)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite_bsid)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite_bsid)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite_bsid)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr0 = + (ip6_sr_header_t *) (((void *) (ip0 + 1)) + + ip6_ext_header_len (ip0 + 1)); + else + sr0 = (ip6_sr_header_t *) (ip0 + 1); + + if (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr1 = + (ip6_sr_header_t *) (((void *) (ip1 + 1)) + + ip6_ext_header_len (ip1 + 1)); + else + sr1 = (ip6_sr_header_t *) (ip1 + 1); + + if (ip2->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr2 = + (ip6_sr_header_t *) (((void *) (ip2 + 1)) + + ip6_ext_header_len (ip2 + 1)); + else + sr2 = (ip6_sr_header_t *) (ip2 + 1); + + if (ip3->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr3 = + (ip6_sr_header_t *) (((void *) (ip3 + 1)) + + ip6_ext_header_len (ip3 + 1)); + else + sr3 = (ip6_sr_header_t *) (ip3 + 1); + + clib_memcpy ((u8 *) ip0 - vec_len (sl0->rewrite_bsid), (u8 *) ip0, + (void *) sr0 - (void *) ip0); + clib_memcpy ((u8 *) ip1 - vec_len (sl1->rewrite_bsid), (u8 *) ip1, + (void *) sr1 - (void *) ip1); + clib_memcpy ((u8 *) ip2 - vec_len (sl2->rewrite_bsid), (u8 *) ip2, + (void *) sr2 - (void *) ip2); + clib_memcpy ((u8 *) ip3 - vec_len (sl3->rewrite_bsid), (u8 *) ip3, + (void *) sr3 - (void *) ip3); + + clib_memcpy (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)), + sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid)); + clib_memcpy (((u8 *) sr1 - vec_len (sl1->rewrite_bsid)), + sl1->rewrite_bsid, vec_len (sl1->rewrite_bsid)); + clib_memcpy (((u8 *) sr2 - vec_len (sl2->rewrite_bsid)), + sl2->rewrite_bsid, vec_len (sl2->rewrite_bsid)); + clib_memcpy (((u8 *) sr3 - vec_len (sl3->rewrite_bsid)), + sl3->rewrite_bsid, vec_len (sl3->rewrite_bsid)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite_bsid)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite_bsid)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite_bsid)); + + ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid); + ip1 = ((void *) ip1) - vec_len (sl1->rewrite_bsid); + ip2 = ((void *) ip2) - vec_len (sl2->rewrite_bsid); + ip3 = ((void *) ip3) - vec_len (sl3->rewrite_bsid); + + ip0->hop_limit -= 1; + ip1->hop_limit -= 1; + ip2->hop_limit -= 1; + ip3->hop_limit -= 1; + + new_l0 = + clib_net_to_host_u16 (ip0->payload_length) + + vec_len (sl0->rewrite_bsid); + new_l1 = + clib_net_to_host_u16 (ip1->payload_length) + + vec_len (sl1->rewrite_bsid); + new_l2 = + clib_net_to_host_u16 (ip2->payload_length) + + vec_len (sl2->rewrite_bsid); + new_l3 = + clib_net_to_host_u16 (ip3->payload_length) + + vec_len (sl3->rewrite_bsid); + + ip0->payload_length = clib_host_to_net_u16 (new_l0); + ip1->payload_length = clib_host_to_net_u16 (new_l1); + ip2->payload_length = clib_host_to_net_u16 (new_l2); + ip3->payload_length = clib_host_to_net_u16 (new_l3); + + sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid); + sr1 = ((void *) sr1) - vec_len (sl1->rewrite_bsid); + sr2 = ((void *) sr2) - vec_len (sl2->rewrite_bsid); + sr3 = ((void *) sr3) - vec_len (sl3->rewrite_bsid); + + ip0->dst_address.as_u64[0] = + (sr0->segments + sr0->segments_left)->as_u64[0]; + ip0->dst_address.as_u64[1] = + (sr0->segments + sr0->segments_left)->as_u64[1]; + ip1->dst_address.as_u64[0] = + (sr1->segments + sr1->segments_left)->as_u64[0]; + ip1->dst_address.as_u64[1] = + (sr1->segments + sr1->segments_left)->as_u64[1]; + ip2->dst_address.as_u64[0] = + (sr2->segments + sr2->segments_left)->as_u64[0]; + ip2->dst_address.as_u64[1] = + (sr2->segments + sr2->segments_left)->as_u64[1]; + ip3->dst_address.as_u64[0] = + (sr3->segments + sr3->segments_left)->as_u64[0]; + ip3->dst_address.as_u64[1] = + (sr3->segments + sr3->segments_left)->as_u64[1]; + + ip6_ext_header_t *ip_ext; + if (ip0 + 1 == (void *) sr0) + { + sr0->protocol = ip0->protocol; + ip0->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip0 + 1); + sr0->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip1 + 1 == (void *) sr1) + { + sr1->protocol = ip1->protocol; + ip1->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip2 + 1); + sr2->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip2 + 1 == (void *) sr2) + { + sr2->protocol = ip2->protocol; + ip2->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip2 + 1); + sr2->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (ip3 + 1 == (void *) sr3) + { + sr3->protocol = ip3->protocol; + ip3->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip_ext = (void *) (ip3 + 1); + sr3->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + insert_pkts += 4; + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0; + ip6_sr_header_t *sr0 = 0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + u16 new_l0 = 0; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite_bsid)); + + ip0 = vlib_buffer_get_current (b0); + + if (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) + sr0 = + (ip6_sr_header_t *) (((void *) (ip0 + 1)) + + ip6_ext_header_len (ip0 + 1)); + else + sr0 = (ip6_sr_header_t *) (ip0 + 1); + + clib_memcpy ((u8 *) ip0 - vec_len (sl0->rewrite_bsid), (u8 *) ip0, + (void *) sr0 - (void *) ip0); + clib_memcpy (((u8 *) sr0 - vec_len (sl0->rewrite_bsid)), + sl0->rewrite_bsid, vec_len (sl0->rewrite_bsid)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite_bsid)); + + ip0 = ((void *) ip0) - vec_len (sl0->rewrite_bsid); + ip0->hop_limit -= 1; + new_l0 = + clib_net_to_host_u16 (ip0->payload_length) + + vec_len (sl0->rewrite_bsid); + ip0->payload_length = clib_host_to_net_u16 (new_l0); + + sr0 = ((void *) sr0) - vec_len (sl0->rewrite_bsid); + + ip0->dst_address.as_u64[0] = + (sr0->segments + sr0->segments_left)->as_u64[0]; + ip0->dst_address.as_u64[1] = + (sr0->segments + sr0->segments_left)->as_u64[1]; + + if (ip0 + 1 == (void *) sr0) + { + sr0->protocol = ip0->protocol; + ip0->protocol = IP_PROTOCOL_IPV6_ROUTE; + } + else + { + ip6_ext_header_t *ip_ext = (void *) (ip0 + 1); + sr0->protocol = ip_ext->next_hdr; + ip_ext->next_hdr = IP_PROTOCOL_IPV6_ROUTE; + } + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + insert_pkts++; + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + insert_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_insert_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_b_insert_node) = { + .function = sr_policy_rewrite_b_insert, + .name = "sr-pl-rewrite-b-insert", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/** + * @brief Function BSID encapsulation + */ +static_always_inline void +end_bsid_encaps_srh_processing (vlib_node_runtime_t * node, + vlib_buffer_t * b0, + ip6_header_t * ip0, + ip6_sr_header_t * sr0, u32 * next0) +{ + ip6_address_t *new_dst0; + + if (PREDICT_FALSE (!sr0)) + goto error_bsid_encaps; + + if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) + { + if (PREDICT_TRUE (sr0->segments_left != 0)) + { + sr0->segments_left -= 1; + new_dst0 = (ip6_address_t *) (sr0->segments); + new_dst0 += sr0->segments_left; + ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; + ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; + return; + } + } + +error_bsid_encaps: + *next0 = SR_POLICY_REWRITE_NEXT_ERROR; + b0->error = node->errors[SR_POLICY_REWRITE_ERROR_BSID_ZERO]; +} + +/** + * @brief Graph node for applying a SR policy BSID - Encapsulation + */ +static uword +sr_policy_rewrite_b_encaps (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + ip6_sr_main_t *sm = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + from = vlib_frame_vector_args (from_frame); + n_left_from = from_frame->n_vectors; + + next_index = node->cached_next_index; + + int encap_pkts = 0, bsid_pkts = 0; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Quad - Loop */ + while (n_left_from >= 8 && n_left_to_next >= 4) + { + u32 bi0, bi1, bi2, bi3; + vlib_buffer_t *b0, *b1, *b2, *b3; + u32 next0, next1, next2, next3; + next0 = next1 = next2 = next3 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + ip6_header_t *ip0, *ip1, *ip2, *ip3; + ip6_header_t *ip0_encap, *ip1_encap, *ip2_encap, *ip3_encap; + ip6_sr_header_t *sr0, *sr1, *sr2, *sr3; + ip6_ext_header_t *prev0, *prev1, *prev2, *prev3; + ip6_sr_sl_t *sl0, *sl1, *sl2, *sl3; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p4, *p5, *p6, *p7; + + p4 = vlib_get_buffer (vm, from[4]); + p5 = vlib_get_buffer (vm, from[5]); + p6 = vlib_get_buffer (vm, from[6]); + p7 = vlib_get_buffer (vm, from[7]); + + /* Prefetch the buffer header and packet for the N+2 loop iteration */ + vlib_prefetch_buffer_header (p4, LOAD); + vlib_prefetch_buffer_header (p5, LOAD); + vlib_prefetch_buffer_header (p6, LOAD); + vlib_prefetch_buffer_header (p7, LOAD); + + CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + } + + to_next[0] = bi0 = from[0]; + to_next[1] = bi1 = from[1]; + to_next[2] = bi2 = from[2]; + to_next[3] = bi3 = from[3]; + from += 4; + to_next += 4; + n_left_from -= 4; + n_left_to_next -= 4; + + b0 = vlib_get_buffer (vm, bi0); + b1 = vlib_get_buffer (vm, bi1); + b2 = vlib_get_buffer (vm, bi2); + b3 = vlib_get_buffer (vm, bi3); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + sl1 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b1)->ip.adj_index[VLIB_TX]); + sl2 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b2)->ip.adj_index[VLIB_TX]); + sl3 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b3)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + ASSERT (b1->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl1->rewrite)); + ASSERT (b2->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl2->rewrite)); + ASSERT (b3->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl3->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + ip1_encap = vlib_buffer_get_current (b1); + ip2_encap = vlib_buffer_get_current (b2); + ip3_encap = vlib_buffer_get_current (b3); + + ip6_ext_header_find_t (ip0_encap, prev0, sr0, + IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip1_encap, prev1, sr1, + IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip2_encap, prev2, sr2, + IP_PROTOCOL_IPV6_ROUTE); + ip6_ext_header_find_t (ip3_encap, prev3, sr3, + IP_PROTOCOL_IPV6_ROUTE); + + end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0); + end_bsid_encaps_srh_processing (node, b1, ip1_encap, sr1, &next1); + end_bsid_encaps_srh_processing (node, b2, ip2_encap, sr2, &next2); + end_bsid_encaps_srh_processing (node, b3, ip3_encap, sr3, &next3); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + clib_memcpy (((u8 *) ip1_encap) - vec_len (sl1->rewrite), + sl1->rewrite, vec_len (sl1->rewrite)); + clib_memcpy (((u8 *) ip2_encap) - vec_len (sl2->rewrite), + sl2->rewrite, vec_len (sl2->rewrite)); + clib_memcpy (((u8 *) ip3_encap) - vec_len (sl3->rewrite), + sl3->rewrite, vec_len (sl3->rewrite)); + + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + vlib_buffer_advance (b1, -(word) vec_len (sl1->rewrite)); + vlib_buffer_advance (b2, -(word) vec_len (sl2->rewrite)); + vlib_buffer_advance (b3, -(word) vec_len (sl3->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + ip1 = vlib_buffer_get_current (b1); + ip2 = vlib_buffer_get_current (b2); + ip3 = vlib_buffer_get_current (b3); + + encaps_processing_v6 (node, b0, ip0, ip0_encap); + encaps_processing_v6 (node, b1, ip1, ip1_encap); + encaps_processing_v6 (node, b2, ip2, ip2_encap); + encaps_processing_v6 (node, b3, ip3, ip3_encap); + + if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) + { + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip1->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip1->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip2->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip2->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip3->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip3->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + } + + encap_pkts += 4; + vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, + n_left_to_next, bi0, bi1, bi2, bi3, + next0, next1, next2, next3); + } + + /* Single loop for potentially the last three packets */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + ip6_header_t *ip0 = 0, *ip0_encap = 0; + ip6_ext_header_t *prev0; + ip6_sr_header_t *sr0; + ip6_sr_sl_t *sl0; + u32 next0 = SR_POLICY_REWRITE_NEXT_IP6_LOOKUP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + b0 = vlib_get_buffer (vm, bi0); + + sl0 = + pool_elt_at_index (sm->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + ASSERT (b0->current_data + VLIB_BUFFER_PRE_DATA_SIZE >= + vec_len (sl0->rewrite)); + + ip0_encap = vlib_buffer_get_current (b0); + ip6_ext_header_find_t (ip0_encap, prev0, sr0, + IP_PROTOCOL_IPV6_ROUTE); + end_bsid_encaps_srh_processing (node, b0, ip0_encap, sr0, &next0); + + clib_memcpy (((u8 *) ip0_encap) - vec_len (sl0->rewrite), + sl0->rewrite, vec_len (sl0->rewrite)); + vlib_buffer_advance (b0, -(word) vec_len (sl0->rewrite)); + + ip0 = vlib_buffer_get_current (b0); + + encaps_processing_v6 (node, b0, ip0, ip0_encap); + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_policy_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, + sizeof (tr->src.as_u8)); + clib_memcpy (tr->dst.as_u8, ip0->dst_address.as_u8, + sizeof (tr->dst.as_u8)); + } + + encap_pkts++; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + /* Update counters */ + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_TOTAL, + encap_pkts); + vlib_node_increment_counter (vm, sr_policy_rewrite_encaps_node.index, + SR_POLICY_REWRITE_ERROR_COUNTER_BSID, + bsid_pkts); + + return from_frame->n_vectors; +} + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE (sr_policy_rewrite_b_encaps_node) = { + .function = sr_policy_rewrite_b_encaps, + .name = "sr-pl-rewrite-b-encaps", + .vector_size = sizeof (u32), + .format_trace = format_sr_policy_rewrite_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = SR_POLICY_REWRITE_N_ERROR, + .error_strings = sr_policy_rewrite_error_strings, + .n_next_nodes = SR_POLICY_REWRITE_N_NEXT, + .next_nodes = { +#define _(s,n) [SR_POLICY_REWRITE_NEXT_##s] = n, + foreach_sr_policy_rewrite_next +#undef _ + }, +}; +/* *INDENT-ON* */ + +/*************************** SR Segment Lists DPOs ****************************/ +static u8 * +format_sr_segment_list_dpo (u8 * s, va_list * args) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_address_t *addr; + ip6_sr_sl_t *sl; + + index_t index = va_arg (*args, index_t); + CLIB_UNUSED (u32 indent) = va_arg (*args, u32); + s = format (s, "SR: Segment List index:[%d]", index); + s = format (s, "\n\tSegments:"); + + sl = pool_elt_at_index (sm->sid_lists, index); + + s = format (s, "< "); + vec_foreach (addr, sl->segments) + { + s = format (s, "%U, ", format_ip6_address, addr); + } + s = format (s, "\b\b > - "); + s = format (s, "Weight: %u", sl->weight); + + return s; +} + +const static dpo_vft_t sr_policy_rewrite_vft = { + .dv_lock = sr_dpo_lock, + .dv_unlock = sr_dpo_unlock, + .dv_format = format_sr_segment_list_dpo, +}; + +const static char *const sr_pr_encaps_ip6_nodes[] = { + "sr-pl-rewrite-encaps", + NULL, +}; + +const static char *const sr_pr_encaps_ip4_nodes[] = { + "sr-pl-rewrite-encaps-v4", + NULL, +}; + +const static char *const *const sr_pr_encaps_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_pr_encaps_ip6_nodes, + [DPO_PROTO_IP4] = sr_pr_encaps_ip4_nodes, +}; + +const static char *const sr_pr_insert_ip6_nodes[] = { + "sr-pl-rewrite-insert", + NULL, +}; + +const static char *const *const sr_pr_insert_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_pr_insert_ip6_nodes, +}; + +const static char *const sr_pr_bsid_insert_ip6_nodes[] = { + "sr-pl-rewrite-b-insert", + NULL, +}; + +const static char *const *const sr_pr_bsid_insert_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_pr_bsid_insert_ip6_nodes, +}; + +const static char *const sr_pr_bsid_encaps_ip6_nodes[] = { + "sr-pl-rewrite-b-encaps", + NULL, +}; + +const static char *const *const sr_pr_bsid_encaps_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = sr_pr_bsid_encaps_ip6_nodes, +}; + +/********************* SR Policy Rewrite initialization ***********************/ +/** + * @brief SR Policy Rewrite initialization + */ +clib_error_t * +sr_policy_rewrite_init (vlib_main_t * vm) +{ + ip6_sr_main_t *sm = &sr_main; + + /* Init memory for sr policy keys (bsid <-> ip6_address_t) */ + mhash_init (&sm->sr_policies_index_hash, sizeof (uword), + sizeof (ip6_address_t)); + + /* Init SR VPO DPOs type */ + sr_pr_encaps_dpo_type = + dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_encaps_nodes); + + sr_pr_insert_dpo_type = + dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_insert_nodes); + + sr_pr_bsid_encaps_dpo_type = + dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_encaps_nodes); + + sr_pr_bsid_insert_dpo_type = + dpo_register_new_type (&sr_policy_rewrite_vft, sr_pr_bsid_insert_nodes); + + /* Register the L2 encaps node used in HW redirect */ + sm->l2_sr_policy_rewrite_index = sr_policy_rewrite_encaps_node.index; + + sm->fib_table_ip6 = (u32) ~ 0; + sm->fib_table_ip4 = (u32) ~ 0; + + return 0; +} + +VLIB_INIT_FUNCTION (sr_policy_rewrite_init); + + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/srv6/sr_steering.c b/src/vnet/srv6/sr_steering.c new file mode 100755 index 00000000..a7903751 --- /dev/null +++ b/src/vnet/srv6/sr_steering.c @@ -0,0 +1,573 @@ +/* + * sr_steering.c: ipv6 segment routing steering into SR policy + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file + * @brief Packet steering into SR Policies + * + * This file is in charge of handling the FIB appropiatly to steer packets + * through SR Policies as defined in 'sr_policy_rewrite.c'. Notice that here + * we are only doing steering. SR policy application is done in + * sr_policy_rewrite.c + * + * Supports: + * - Steering of IPv6 traffic Destination Address based + * - Steering of IPv4 traffic Destination Address based + * - Steering of L2 frames, interface based (sw interface) + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +/** + * @brief Steer traffic L2 and L3 traffic through a given SR policy + * + * @param is_del + * @param bsid is the bindingSID of the SR Policy (alt to sr_policy_index) + * @param sr_policy is the index of the SR Policy (alt to bsid) + * @param table_id is the VRF where to install the FIB entry for the BSID + * @param prefix is the IPv4/v6 address for L3 traffic type + * @param mask_width is the mask for L3 traffic type + * @param sw_if_index is the incoming interface for L2 traffic + * @param traffic_type describes the type of traffic + * + * @return 0 if correct, else error + */ +int +sr_steering_policy (int is_del, ip6_address_t * bsid, u32 sr_policy_index, + u32 table_id, ip46_address_t * prefix, u32 mask_width, + u32 sw_if_index, u8 traffic_type) +{ + ip6_sr_main_t *sm = &sr_main; + sr_steering_key_t key; + ip6_sr_steering_policy_t *steer_pl; + fib_prefix_t pfx = { 0 }; + + ip6_sr_policy_t *sr_policy = 0; + uword *p = 0; + + memset (&key, 0, sizeof (sr_steering_key_t)); + + /* Compute the steer policy key */ + if (traffic_type == SR_STEER_IPV4 || traffic_type == SR_STEER_IPV6) + { + key.l3.prefix.as_u64[0] = prefix->as_u64[0]; + key.l3.prefix.as_u64[1] = prefix->as_u64[1]; + key.l3.mask_width = mask_width; + key.l3.fib_table = (table_id != (u32) ~ 0 ? table_id : 0); + } + else if (traffic_type == SR_STEER_L2) + { + key.l2.sw_if_index = sw_if_index; + + /* Sanitise the SW_IF_INDEX */ + if (pool_is_free_index (sm->vnet_main->interface_main.sw_interfaces, + sw_if_index)) + return -3; + + vnet_sw_interface_t *sw = + vnet_get_sw_interface (sm->vnet_main, sw_if_index); + if (sw->type != VNET_SW_INTERFACE_TYPE_HARDWARE) + return -3; + } + else + return -1; + + key.traffic_type = traffic_type; + + /* Search for the item */ + p = mhash_get (&sm->sr_steer_policies_hash, &key); + + if (p) + { + /* Retrieve Steer Policy function */ + steer_pl = pool_elt_at_index (sm->steer_policies, p[0]); + + if (is_del) + { + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + /* Remove FIB entry */ + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.l3.prefix.ip6; + + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP6, + steer_pl->classify.l3.fib_table), + &pfx, FIB_SOURCE_SR); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + /* Remove FIB entry */ + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.l3.prefix.ip4; + + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP4, + steer_pl->classify.l3.fib_table), &pfx, + FIB_SOURCE_SR); + } + else if (steer_pl->classify.traffic_type == SR_STEER_L2) + { + /* Remove HW redirection */ + vnet_feature_enable_disable ("device-input", + "sr-policy-rewrite-encaps-l2", + sw_if_index, 0, 0, 0); + sm->sw_iface_sr_policies[sw_if_index] = ~(u32) 0; + + /* Remove promiscous mode from interface */ + vnet_main_t *vnm = vnet_get_main (); + ethernet_main_t *em = ðernet_main; + ethernet_interface_t *eif = + ethernet_get_interface (em, sw_if_index); + + if (!eif) + goto cleanup_error_redirection; + + ethernet_set_flags (vnm, sw_if_index, 0); + } + + /* Delete SR steering policy entry */ + pool_put (sm->steer_policies, steer_pl); + mhash_unset (&sm->sr_steer_policies_hash, &key, NULL); + + /* If no more SR policies or steering policies */ + if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies)) + { + fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6); + fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6); + sm->fib_table_ip6 = (u32) ~ 0; + sm->fib_table_ip4 = (u32) ~ 0; + } + + return 1; + } + else /* It means user requested to update an existing SR steering policy */ + { + /* Retrieve SR steering policy */ + if (bsid) + { + p = mhash_get (&sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -2; + } + else + sr_policy = pool_elt_at_index (sm->sr_policies, sr_policy_index); + + if (!sr_policy) + return -2; + + steer_pl->sr_policy = sr_policy - sm->sr_policies; + + /* Remove old FIB/hw redirection and create a new one */ + if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + /* Remove FIB entry */ + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.l3.prefix.ip6; + + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP6, + steer_pl->classify.l3.fib_table), + &pfx, FIB_SOURCE_SR); + + /* Create a new one */ + goto update_fib; + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + /* Remove FIB entry */ + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.l3.prefix.ip4; + + fib_table_entry_delete (fib_table_find + (FIB_PROTOCOL_IP4, + steer_pl->classify.l3.fib_table), + &pfx, FIB_SOURCE_SR); + + /* Create a new one */ + goto update_fib; + } + else if (steer_pl->classify.traffic_type == SR_STEER_L2) + { + /* Update L2-HW redirection */ + goto update_fib; + } + } + } + else + /* delete; steering policy does not exist; complain */ + if (is_del) + return -4; + + /* Retrieve SR policy */ + if (bsid) + { + p = mhash_get (&sm->sr_policies_index_hash, bsid); + if (p) + sr_policy = pool_elt_at_index (sm->sr_policies, p[0]); + else + return -2; + } + else + sr_policy = pool_elt_at_index (sm->sr_policies, sr_policy_index); + + /* Create a new steering policy */ + pool_get (sm->steer_policies, steer_pl); + memset (steer_pl, 0, sizeof (*steer_pl)); + + if (traffic_type == SR_STEER_IPV4 || traffic_type == SR_STEER_IPV6) + { + clib_memcpy (&steer_pl->classify.l3.prefix, prefix, + sizeof (ip46_address_t)); + steer_pl->classify.l3.mask_width = mask_width; + steer_pl->classify.l3.fib_table = + (table_id != (u32) ~ 0 ? table_id : 0); + steer_pl->classify.traffic_type = traffic_type; + } + else if (traffic_type == SR_STEER_L2) + { + steer_pl->classify.l2.sw_if_index = sw_if_index; + steer_pl->classify.traffic_type = traffic_type; + } + else + { + /* Incorrect API usage. Should never get here */ + pool_put (sm->steer_policies, steer_pl); + mhash_unset (&sm->sr_steer_policies_hash, &key, NULL); + return -1; + } + steer_pl->sr_policy = sr_policy - sm->sr_policies; + + /* Create and store key */ + mhash_set (&sm->sr_steer_policies_hash, &key, steer_pl - sm->steer_policies, + NULL); + + if (traffic_type == SR_STEER_L2) + { + if (!sr_policy->is_encap) + goto cleanup_error_encap; + + if (vnet_feature_enable_disable + ("device-input", "sr-pl-rewrite-encaps-l2", sw_if_index, 1, 0, 0)) + goto cleanup_error_redirection; + + /* Set promiscous mode on interface */ + vnet_main_t *vnm = vnet_get_main (); + ethernet_main_t *em = ðernet_main; + ethernet_interface_t *eif = ethernet_get_interface (em, sw_if_index); + + if (!eif) + goto cleanup_error_redirection; + + ethernet_set_flags (vnm, sw_if_index, + ETHERNET_INTERFACE_FLAG_ACCEPT_ALL); + } + else if (traffic_type == SR_STEER_IPV4) + if (!sr_policy->is_encap) + goto cleanup_error_encap; + +update_fib: + /* FIB API calls - Recursive route through the BindingSID */ + if (traffic_type == SR_STEER_IPV6) + { + pfx.fp_proto = FIB_PROTOCOL_IP6; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip6 = steer_pl->classify.l3.prefix.ip6; + + fib_table_entry_path_add (fib_table_find (FIB_PROTOCOL_IP6, + (table_id != + (u32) ~ 0 ? + table_id : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, + FIB_PROTOCOL_IP6, + (ip46_address_t *) & sr_policy->bsid, ~0, + sm->fib_table_ip6, 1, NULL, + FIB_ROUTE_PATH_FLAG_NONE); + } + else if (traffic_type == SR_STEER_IPV4) + { + pfx.fp_proto = FIB_PROTOCOL_IP4; + pfx.fp_len = steer_pl->classify.l3.mask_width; + pfx.fp_addr.ip4 = steer_pl->classify.l3.prefix.ip4; + + fib_table_entry_path_add (fib_table_find (FIB_PROTOCOL_IP4, + (table_id != + (u32) ~ 0 ? + table_id : 0)), + &pfx, FIB_SOURCE_SR, + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, + FIB_PROTOCOL_IP6, + (ip46_address_t *) & sr_policy->bsid, ~0, + sm->fib_table_ip4, 1, NULL, + FIB_ROUTE_PATH_FLAG_NONE); + } + else if (traffic_type == SR_STEER_L2) + { + if (sw_if_index < vec_len (sm->sw_iface_sr_policies)) + sm->sw_iface_sr_policies[sw_if_index] = steer_pl->sr_policy; + else + { + vec_resize (sm->sw_iface_sr_policies, + (pool_len (sm->vnet_main->interface_main.sw_interfaces) + - vec_len (sm->sw_iface_sr_policies))); + sm->sw_iface_sr_policies[sw_if_index] = steer_pl->sr_policy; + } + } + + return 0; + +cleanup_error_encap: + pool_put (sm->steer_policies, steer_pl); + mhash_unset (&sm->sr_steer_policies_hash, &key, NULL); + return -5; + +cleanup_error_redirection: + pool_put (sm->steer_policies, steer_pl); + mhash_unset (&sm->sr_steer_policies_hash, &key, NULL); + return -3; +} + +static clib_error_t * +sr_steer_policy_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + vnet_main_t *vnm = vnet_get_main (); + + int is_del = 0; + + ip46_address_t prefix; + u32 dst_mask_width = 0; + u32 sw_if_index = (u32) ~ 0; + u8 traffic_type = 0; + u32 fib_table = (u32) ~ 0; + + ip6_address_t bsid; + u32 sr_policy_index = (u32) ~ 0; + + u8 sr_policy_set = 0; + + memset (&prefix, 0, sizeof (ip46_address_t)); + + int rv; + while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (input, "del")) + is_del = 1; + else if (!traffic_type + && unformat (input, "l3 %U/%d", unformat_ip6_address, + &prefix.ip6, &dst_mask_width)) + traffic_type = SR_STEER_IPV6; + else if (!traffic_type + && unformat (input, "l3 %U/%d", unformat_ip4_address, + &prefix.ip4, &dst_mask_width)) + traffic_type = SR_STEER_IPV4; + else if (!traffic_type + && unformat (input, "l2 %U", unformat_vnet_sw_interface, vnm, + &sw_if_index)) + traffic_type = SR_STEER_L2; + else if (!sr_policy_set + && unformat (input, "via sr policy index %d", + &sr_policy_index)) + sr_policy_set = 1; + else if (!sr_policy_set + && unformat (input, "via sr policy bsid %U", + unformat_ip6_address, &bsid)) + sr_policy_set = 1; + else if (fib_table == (u32) ~ 0 + && unformat (input, "fib-table %d", &fib_table)); + else + break; + } + + if (!traffic_type) + return clib_error_return (0, "No L2/L3 traffic specified"); + if (!sr_policy_set) + return clib_error_return (0, "No SR policy specified"); + + /* Make sure that the prefixes are clean */ + if (traffic_type == SR_STEER_IPV4) + { + u32 mask = + (dst_mask_width ? (0xFFFFFFFFu >> (32 - dst_mask_width)) : 0); + prefix.ip4.as_u32 &= mask; + } + else if (traffic_type == SR_STEER_IPV6) + { + ip6_address_t mask; + ip6_address_mask_from_width (&mask, dst_mask_width); + ip6_address_mask (&prefix.ip6, &mask); + } + + rv = + sr_steering_policy (is_del, (sr_policy_index == ~(u32) 0 ? &bsid : NULL), + sr_policy_index, fib_table, &prefix, dst_mask_width, + sw_if_index, traffic_type); + + switch (rv) + { + case 0: + break; + case 1: + return 0; + case -1: + return clib_error_return (0, "Incorrect API usage."); + case -2: + return clib_error_return (0, + "The requested SR policy could not be located. Review the BSID/index."); + case -3: + return clib_error_return (0, + "Unable to do SW redirect. Incorrect interface."); + case -4: + return clib_error_return (0, + "The requested SR steering policy could not be deleted."); + case -5: + return clib_error_return (0, + "The SR policy is not an encapsulation one."); + default: + return clib_error_return (0, "BUG: sr steer policy returns %d", rv); + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (sr_steer_policy_command, static) = { + .path = "sr steer", + .short_help = "sr steer (del) [l3 |l2 ]" + "via sr policy [index |bsid ]" + "(fib-table )", + .long_help = + "\tSteer a L2 or L3 traffic through an existing SR policy.\n" + "\tExamples:\n" + "\t\tsr steer l3 2001::/64 via sr_policy index 5\n" + "\t\tsr steer l3 2001::/64 via sr_policy bsid 2010::9999:1\n" + "\t\tsr steer l2 GigabitEthernet0/5/0 via sr_policy index 5\n" + "\t\tsr steer del l3 2001::/64 via sr_policy index 5\n", + .function = sr_steer_policy_command_fn, +}; +/* *INDENT-ON* */ + +static clib_error_t * +show_sr_steering_policies_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + ip6_sr_main_t *sm = &sr_main; + ip6_sr_steering_policy_t **steer_policies = 0; + ip6_sr_steering_policy_t *steer_pl; + + vnet_main_t *vnm = vnet_get_main (); + + ip6_sr_policy_t *pl = 0; + int i; + + vlib_cli_output (vm, "SR steering policies:"); + /* *INDENT-OFF* */ + pool_foreach (steer_pl, sm->steer_policies, ({vec_add1(steer_policies, steer_pl);})); + /* *INDENT-ON* */ + vlib_cli_output (vm, "Traffic\t\tSR policy BSID"); + for (i = 0; i < vec_len (steer_policies); i++) + { + steer_pl = steer_policies[i]; + pl = pool_elt_at_index (sm->sr_policies, steer_pl->sr_policy); + if (steer_pl->classify.traffic_type == SR_STEER_L2) + { + vlib_cli_output (vm, "L2 %U\t%U", + format_vnet_sw_if_index_name, vnm, + steer_pl->classify.l2.sw_if_index, + format_ip6_address, &pl->bsid); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV4) + { + vlib_cli_output (vm, "L3 %U/%d\t%U", + format_ip4_address, + &steer_pl->classify.l3.prefix.ip4, + steer_pl->classify.l3.mask_width, + format_ip6_address, &pl->bsid); + } + else if (steer_pl->classify.traffic_type == SR_STEER_IPV6) + { + vlib_cli_output (vm, "L3 %U/%d\t%U", + format_ip6_address, + &steer_pl->classify.l3.prefix.ip6, + steer_pl->classify.l3.mask_width, + format_ip6_address, &pl->bsid); + } + } + return 0; +} + +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (show_sr_steering_policies_command, static) = { + .path = "show sr steering policies", + .short_help = "show sr steering policies", + .function = show_sr_steering_policies_command_fn, +}; +/* *INDENT-ON* */ + +clib_error_t * +sr_steering_init (vlib_main_t * vm) +{ + ip6_sr_main_t *sm = &sr_main; + + /* Init memory for function keys */ + mhash_init (&sm->sr_steer_policies_hash, sizeof (uword), + sizeof (sr_steering_key_t)); + + sm->sw_iface_sr_policies = 0; + + sm->vnet_main = vnet_get_main (); + + return 0; +} + +/* *INDENT-OFF* */ +VLIB_INIT_FUNCTION (sr_steering_init); +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VNET_FEATURE_INIT (sr_pl_rewrite_encaps_l2, static) = +{ + .arc_name = "device-input", + .node_name = "sr-pl-rewrite-encaps-l2", + .runs_before = VNET_FEATURES ("ethernet-input"), +}; +/* *INDENT-ON* */ + +/* +* fd.io coding-style-patch-verification: ON +* +* Local Variables: +* eval: (c-set-style "gnu") +* End: +*/ diff --git a/src/vnet/srv6/sr_steering.md b/src/vnet/srv6/sr_steering.md new file mode 100644 index 00000000..cf446f81 --- /dev/null +++ b/src/vnet/srv6/sr_steering.md @@ -0,0 +1,11 @@ +# Steering packets into a SR Policy {#srv6_steering_doc} + +To steer packets in Transit into an SR policy (T.Insert, T.Encaps and T.Encaps.L2 behaviors), the user needs to create an 'sr steering policy'. + + sr steer l3 2001::/64 via sr policy index 1 + sr steer l3 2001::/64 via sr policy bsid cafe::1 + sr steer l3 2001::/64 via sr policy bsid cafe::1 fib-table 3 + sr steer l3 10.0.0.0/16 via sr policy bsid cafe::1 + sr steer l2 TenGE0/1/0 via sr policy bsid cafe::1 + +Disclaimer: The T.Encaps.L2 will steer L2 frames into an SR Policy. Notice that creating an SR steering policy for L2 frames will actually automatically *put the interface into promiscous mode*. -- cgit 1.2.3-korg From 7a4e0925f58f04cd31e4c37def959600d888940c Mon Sep 17 00:00:00 2001 From: Pablo Camarillo Date: Tue, 6 Jun 2017 15:18:12 +0200 Subject: VPP-872 and End.T function for SRv6 Fixes VPP-872 and adds support for End.T Change-Id: I3c32cb6e412f37babe1abd293c0b6b49367fc2a9 Signed-off-by: Pablo Camarillo --- src/examples/srv6-sample-localsid/node.c | 103 +++++++++++++---- src/vnet/srv6/sr.h | 15 +-- src/vnet/srv6/sr_localsid.c | 191 +++++++++++++++---------------- 3 files changed, 183 insertions(+), 126 deletions(-) (limited to 'src/vnet/srv6') diff --git a/src/examples/srv6-sample-localsid/node.c b/src/examples/srv6-sample-localsid/node.c index e83e2352..3ac7108b 100644 --- a/src/examples/srv6-sample-localsid/node.c +++ b/src/examples/srv6-sample-localsid/node.c @@ -59,43 +59,102 @@ typedef enum { } srv6_localsid_sample_next_t; /** - * @brief Function doing End processing. + * @brief Function doing End processing. */ -//Fixme: support OAM (hop-by-hop header) here! static_always_inline void end_srh_processing (vlib_node_runtime_t * node, - vlib_buffer_t * b0, - ip6_header_t * ip0, - ip6_sr_header_t * sr0, - u32 * next0) + vlib_buffer_t * b0, + ip6_header_t * ip0, + ip6_sr_header_t * sr0, + ip6_sr_localsid_t * ls0, + u32 * next0, + u8 psp, + ip6_ext_header_t * prev0) { ip6_address_t *new_dst0; - if(PREDICT_TRUE(ip0->protocol == IP_PROTOCOL_IPV6_ROUTE)) + if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) { - if(PREDICT_TRUE(sr0->type == ROUTING_HEADER_TYPE_SR)) + if (sr0->segments_left == 1 && psp) { - if(PREDICT_TRUE(sr0->segments_left != 0)) + u32 new_l0, sr_len; + u64 *copy_dst0, *copy_src0; + u32 copy_len_u64s0 = 0; + + ip0->dst_address.as_u64[0] = sr0->segments->as_u64[0]; + ip0->dst_address.as_u64[1] = sr0->segments->as_u64[1]; + + /* Remove the SRH taking care of the rest of IPv6 ext header */ + if (prev0) + prev0->next_hdr = sr0->protocol; + else + ip0->protocol = sr0->protocol; + + sr_len = ip6_ext_header_len (sr0); + vlib_buffer_advance (b0, sr_len); + new_l0 = clib_net_to_host_u16 (ip0->payload_length) - sr_len; + ip0->payload_length = clib_host_to_net_u16 (new_l0); + copy_src0 = (u64 *) ip0; + copy_dst0 = copy_src0 + (sr0->length + 1); + /* number of 8 octet units to copy + * By default in absence of extension headers it is equal to length of ip6 header + * With extension headers it number of 8 octet units of ext headers preceding + * SR header + */ + copy_len_u64s0 = + (((u8 *) sr0 - (u8 *) ip0) - sizeof (ip6_header_t)) >> 3; + copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0]; + copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0]; + copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0]; + copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0]; + copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0]; + + int i; + for (i = copy_len_u64s0 - 1; i >= 0; i--) { - sr0->segments_left -= 1; - new_dst0 = (ip6_address_t *)(sr0->segments); - new_dst0 += sr0->segments_left; - ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; - ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; + copy_dst0[i] = copy_src0[i]; } - else + + if (ls0->behavior == SR_BEHAVIOR_X) + { + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + } + else if(ls0->behavior == SR_BEHAVIOR_T) { - *next0 = SRV6_SAMPLE_LOCALSID_NEXT_ERROR; - b0->error = node->errors[SRV6_LOCALSID_COUNTER_NO_SRH]; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; + } + } + else if (PREDICT_TRUE(sr0->segments_left > 0)) + { + sr0->segments_left -= 1; + new_dst0 = (ip6_address_t *) (sr0->segments); + new_dst0 += sr0->segments_left; + ip0->dst_address.as_u64[0] = new_dst0->as_u64[0]; + ip0->dst_address.as_u64[1] = new_dst0->as_u64[1]; + + if (ls0->behavior == SR_BEHAVIOR_X) + { + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + } + else if(ls0->behavior == SR_BEHAVIOR_T) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; } } else { - /* Error. Routing header of type != SR */ - *next0 = SRV6_SAMPLE_LOCALSID_NEXT_ERROR; - b0->error = node->errors[SRV6_LOCALSID_COUNTER_NO_SRH]; + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_MORE_SEGMENTS]; } } + else + { + /* Error. Routing header of type != SR */ + *next0 = SR_LOCALSID_NEXT_ERROR; + b0->error = node->errors[SR_LOCALSID_ERROR_NO_SRH]; + } } /* @@ -129,6 +188,7 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram vlib_buffer_t * b0; ip6_header_t * ip0 = 0; ip6_sr_header_t * sr0; + ip6_ext_header_t *prev0 u32 next0 = SRV6_SAMPLE_LOCALSID_NEXT_IP6LOOKUP; ip6_sr_localsid_t *ls0; srv6_localsid_sample_per_sid_memory_t *ls0_mem; @@ -149,7 +209,8 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram ls0_mem = ls0->plugin_mem; /* SRH processing */ - end_srh_processing (node, b0, ip0, sr0, &next0); + ip6_ext_header_find_t (ip0, prev0, sr0, IP_PROTOCOL_IPV6_ROUTE); + end_decaps_srh_processing (node, b0, ip0, sr0, ls0, &next0); /* ==================================================================== */ /* INSERT CODE HERE */ diff --git a/src/vnet/srv6/sr.h b/src/vnet/srv6/sr.h index 2014a23e..d0f42869 100755 --- a/src/vnet/srv6/sr.h +++ b/src/vnet/srv6/sr.h @@ -36,13 +36,14 @@ #define SR_BEHAVIOR_END 1 #define SR_BEHAVIOR_X 2 -#define SR_BEHAVIOR_D_FIRST 3 /* Unused. Separator in between regular and D */ -#define SR_BEHAVIOR_DX2 4 -#define SR_BEHAVIOR_DX6 5 -#define SR_BEHAVIOR_DX4 6 -#define SR_BEHAVIOR_DT6 7 -#define SR_BEHAVIOR_DT4 8 -#define SR_BEHAVIOR_LAST 9 /* Must always be the last one */ +#define SR_BEHAVIOR_T 3 +#define SR_BEHAVIOR_D_FIRST 4 /* Unused. Separator in between regular and D */ +#define SR_BEHAVIOR_DX2 5 +#define SR_BEHAVIOR_DX6 6 +#define SR_BEHAVIOR_DX4 7 +#define SR_BEHAVIOR_DT6 8 +#define SR_BEHAVIOR_DT4 9 +#define SR_BEHAVIOR_LAST 10 /* Must always be the last one */ #define SR_STEER_L2 2 #define SR_STEER_IPV4 4 diff --git a/src/vnet/srv6/sr_localsid.c b/src/vnet/srv6/sr_localsid.c index bdc66386..adeb5c03 100755 --- a/src/vnet/srv6/sr_localsid.c +++ b/src/vnet/srv6/sr_localsid.c @@ -115,7 +115,7 @@ sr_cli_localsid (char is_del, ip6_address_t * localsid_addr, /* Delete localsid registry */ pool_put (sm->localsids, ls); mhash_unset (&sm->sr_localsids_index_hash, localsid_addr, NULL); - return 1; + return 0; } else /* create with function already existing; complain */ return -1; @@ -161,6 +161,9 @@ sr_cli_localsid (char is_del, ip6_address_t * localsid_addr, ls->sw_if_index = sw_if_index; clib_memcpy (&ls->next_hop.ip6, &nh_addr->ip6, sizeof (ip6_address_t)); break; + case SR_BEHAVIOR_T: + ls->vrf_index = sw_if_index; + break; case SR_BEHAVIOR_DX4: ls->sw_if_index = sw_if_index; clib_memcpy (&ls->next_hop.ip4, &nh_addr->ip4, sizeof (ip4_address_t)); @@ -172,6 +175,9 @@ sr_cli_localsid (char is_del, ip6_address_t * localsid_addr, case SR_BEHAVIOR_DT6: ls->vrf_index = sw_if_index; break; + case SR_BEHAVIOR_DT4: + ls->vrf_index = sw_if_index; + break; case SR_BEHAVIOR_DX2: ls->sw_if_index = sw_if_index; ls->vlan_index = vlan_index; @@ -294,6 +300,8 @@ sr_cli_localsid_command_fn (vlib_main_t * vm, unformat_input_t * input, unformat_vnet_sw_interface, vnm, &sw_if_index, unformat_ip6_address, &next_hop.ip6)) behavior = SR_BEHAVIOR_X; + else if (unformat (input, "end.t %u", &sw_if_index)) + behavior = SR_BEHAVIOR_T; else if (unformat (input, "end.dx6 %U %U", unformat_vnet_sw_interface, vnm, &sw_if_index, unformat_ip6_address, &next_hop.ip6)) @@ -461,6 +469,13 @@ show_sr_localsid_command_fn (vlib_main_t * vm, unformat_input_t * input, format_vnet_sw_if_index_name, vnm, ls->sw_if_index, format_ip6_address, &ls->next_hop.ip6); break; + case SR_BEHAVIOR_T: + vlib_cli_output (vm, + "\tAddress: \t%U\n\tBehavior: \tT (Endpoint with specific IPv6 table lookup)" + "\n\tTable: \t%u", + format_ip6_address, &ls->localsid, + format_vnet_sw_if_index_name, vnm, ls->vrf_index); + break; case SR_BEHAVIOR_DX4: vlib_cli_output (vm, "\tAddress: \t%U\n\tBehavior: \tDX4 (Endpoint with decapsulation and IPv4 cross-connect)" @@ -492,13 +507,13 @@ show_sr_localsid_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_output (vm, "\tAddress: \t%U\n\tBehavior: \tDT6 (Endpoint with decapsulation and specific IPv6 table lookup)" "\n\tTable: %u", format_ip6_address, &ls->localsid, - ls->fib_table); + ls->vrf_index); break; case SR_BEHAVIOR_DT4: vlib_cli_output (vm, "\tAddress: \t%U\n\tBehavior: \tDT4 (Endpoint with decapsulation and specific IPv4 table lookup)" "\n\tTable: \t%u", format_ip6_address, - &ls->localsid, ls->fib_table); + &ls->localsid, ls->vrf_index); break; default: if (ls->behavior >= SR_BEHAVIOR_LAST) @@ -651,6 +666,9 @@ format_sr_localsid_trace (u8 * s, va_list * args) case SR_BEHAVIOR_X: s = format (s, "\tBehavior: IPv6 L3 xconnect\n"); break; + case SR_BEHAVIOR_T: + s = format (s, "\tBehavior: IPv6 specific table lookup\n"); + break; case SR_BEHAVIOR_DT6: s = format (s, "\tBehavior: Decapsulation with IPv6 Table lookup\n"); break; @@ -690,13 +708,64 @@ end_srh_processing (vlib_node_runtime_t * node, vlib_buffer_t * b0, ip6_header_t * ip0, ip6_sr_header_t * sr0, - ip6_sr_localsid_t * ls0, u32 * next0) + ip6_sr_localsid_t * ls0, + u32 * next0, u8 psp, ip6_ext_header_t * prev0) { ip6_address_t *new_dst0; if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) { - if (PREDICT_TRUE (sr0->segments_left != 0)) + if (sr0->segments_left == 1 && psp) + { + u32 new_l0, sr_len; + u64 *copy_dst0, *copy_src0; + u32 copy_len_u64s0 = 0; + + ip0->dst_address.as_u64[0] = sr0->segments->as_u64[0]; + ip0->dst_address.as_u64[1] = sr0->segments->as_u64[1]; + + /* Remove the SRH taking care of the rest of IPv6 ext header */ + if (prev0) + prev0->next_hdr = sr0->protocol; + else + ip0->protocol = sr0->protocol; + + sr_len = ip6_ext_header_len (sr0); + vlib_buffer_advance (b0, sr_len); + new_l0 = clib_net_to_host_u16 (ip0->payload_length) - sr_len; + ip0->payload_length = clib_host_to_net_u16 (new_l0); + copy_src0 = (u64 *) ip0; + copy_dst0 = copy_src0 + (sr0->length + 1); + /* number of 8 octet units to copy + * By default in absence of extension headers it is equal to length of ip6 header + * With extension headers it number of 8 octet units of ext headers preceding + * SR header + */ + copy_len_u64s0 = + (((u8 *) sr0 - (u8 *) ip0) - sizeof (ip6_header_t)) >> 3; + copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0]; + copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0]; + copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0]; + copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0]; + copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0]; + + int i; + for (i = copy_len_u64s0 - 1; i >= 0; i--) + { + copy_dst0[i] = copy_src0[i]; + } + + if (ls0->behavior == SR_BEHAVIOR_X) + { + vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; + *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; + } + else if (ls0->behavior == SR_BEHAVIOR_T) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; + } + } + else if (PREDICT_TRUE (sr0->segments_left > 0)) { sr0->segments_left -= 1; new_dst0 = (ip6_address_t *) (sr0->segments); @@ -709,6 +778,10 @@ end_srh_processing (vlib_node_runtime_t * node, vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; } + else if (ls0->behavior == SR_BEHAVIOR_T) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; + } } else { @@ -727,7 +800,6 @@ end_srh_processing (vlib_node_runtime_t * node, /* * @brief Function doing SRH processing for D* variants */ -//FixME. I must crosscheck that next_proto matches the localsid static_always_inline void end_decaps_srh_processing (vlib_node_runtime_t * node, vlib_buffer_t * b0, @@ -772,7 +844,7 @@ end_decaps_srh_processing (vlib_node_runtime_t * node, else if (ls0->behavior == SR_BEHAVIOR_DT6) { vlib_buffer_advance (b0, total_size); - vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->fib_table; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; return; } break; @@ -788,7 +860,7 @@ end_decaps_srh_processing (vlib_node_runtime_t * node, else if (ls0->behavior == SR_BEHAVIOR_DT4) { vlib_buffer_advance (b0, total_size); - vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->fib_table; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = ls0->vrf_index; *next0 = SR_LOCALSID_NEXT_IP4_LOOKUP; return; } @@ -809,72 +881,6 @@ end_decaps_srh_processing (vlib_node_runtime_t * node, return; } -/** - * @brief Function doing End processing with PSP - */ -static_always_inline void -end_psp_srh_processing (vlib_node_runtime_t * node, - vlib_buffer_t * b0, - ip6_header_t * ip0, - ip6_ext_header_t * prev0, - ip6_sr_header_t * sr0, - ip6_sr_localsid_t * ls0, u32 * next0) -{ - u32 new_l0, sr_len; - u64 *copy_dst0, *copy_src0; - u32 copy_len_u64s0 = 0; - int i; - - if (PREDICT_TRUE (sr0->type == ROUTING_HEADER_TYPE_SR)) - { - if (PREDICT_TRUE (sr0->segments_left == 1)) - { - ip0->dst_address.as_u64[0] = sr0->segments->as_u64[0]; - ip0->dst_address.as_u64[1] = sr0->segments->as_u64[1]; - - /* Remove the SRH taking care of the rest of IPv6 ext header */ - if (prev0) - prev0->next_hdr = sr0->protocol; - else - ip0->protocol = sr0->protocol; - - sr_len = ip6_ext_header_len (sr0); - vlib_buffer_advance (b0, sr_len); - new_l0 = clib_net_to_host_u16 (ip0->payload_length) - sr_len; - ip0->payload_length = clib_host_to_net_u16 (new_l0); - copy_src0 = (u64 *) ip0; - copy_dst0 = copy_src0 + (sr0->length + 1); - /* number of 8 octet units to copy - * By default in absence of extension headers it is equal to length of ip6 header - * With extension headers it number of 8 octet units of ext headers preceding - * SR header - */ - copy_len_u64s0 = - (((u8 *) sr0 - (u8 *) ip0) - sizeof (ip6_header_t)) >> 3; - copy_dst0[4 + copy_len_u64s0] = copy_src0[4 + copy_len_u64s0]; - copy_dst0[3 + copy_len_u64s0] = copy_src0[3 + copy_len_u64s0]; - copy_dst0[2 + copy_len_u64s0] = copy_src0[2 + copy_len_u64s0]; - copy_dst0[1 + copy_len_u64s0] = copy_src0[1 + copy_len_u64s0]; - copy_dst0[0 + copy_len_u64s0] = copy_src0[0 + copy_len_u64s0]; - - for (i = copy_len_u64s0 - 1; i >= 0; i--) - { - copy_dst0[i] = copy_src0[i]; - } - - if (ls0->behavior == SR_BEHAVIOR_X) - { - vnet_buffer (b0)->ip.adj_index[VLIB_TX] = ls0->nh_adj; - *next0 = SR_LOCALSID_NEXT_IP6_REWRITE; - } - return; - } - } - /* Error. Routing header of type != SR */ - *next0 = SR_LOCALSID_NEXT_ERROR; - b0->error = node->errors[SR_LOCALSID_ERROR_NO_PSP]; -} - /** * @brief SR LocalSID graph node. Supports all default SR Endpoint variants */ @@ -1180,25 +1186,14 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, pool_elt_at_index (sm->localsids, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); - if (ls0->end_psp) - end_psp_srh_processing (node, b0, ip0, prev0, sr0, ls0, &next0); - else - end_srh_processing (node, b0, ip0, sr0, ls0, &next0); - - if (ls1->end_psp) - end_psp_srh_processing (node, b1, ip1, prev1, sr1, ls1, &next1); - else - end_srh_processing (node, b1, ip1, sr1, ls1, &next1); - - if (ls2->end_psp) - end_psp_srh_processing (node, b2, ip2, prev2, sr2, ls2, &next2); - else - end_srh_processing (node, b2, ip2, sr2, ls2, &next2); - - if (ls3->end_psp) - end_psp_srh_processing (node, b3, ip3, prev3, sr3, ls3, &next3); - else - end_srh_processing (node, b3, ip3, sr3, ls3, &next3); + end_srh_processing (node, b0, ip0, sr0, ls0, &next0, ls0->end_psp, + prev0); + end_srh_processing (node, b1, ip1, sr1, ls1, &next1, ls1->end_psp, + prev1); + end_srh_processing (node, b2, ip2, sr2, ls2, &next2, ls2->end_psp, + prev2); + end_srh_processing (node, b3, ip3, sr3, ls3, &next3, ls3->end_psp, + prev3); //TODO: proper trace. @@ -1259,10 +1254,8 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_buffer (b0)->ip.adj_index[VLIB_TX]); /* SRH processing */ - if (ls0->end_psp) - end_psp_srh_processing (node, b0, ip0, prev0, sr0, ls0, &next0); - else - end_srh_processing (node, b0, ip0, sr0, ls0, &next0); + end_srh_processing (node, b0, ip0, sr0, ls0, &next0, ls0->end_psp, + prev0); if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -1431,8 +1424,10 @@ show_sr_localsid_behaviors_command_fn (vlib_main_t * vm, /* Print static behaviors */ vlib_cli_output (vm, "Default behaviors:\n" "\tEnd\t-> Endpoint.\n" - "\tEnd.X\t-> Endpoint with decapsulation and Layer-3 cross-connect.\n" + "\tEnd.X\t-> Endpoint with Layer-3 cross-connect.\n" "\t\tParameters: ' '\n" + "\tEnd.T\t-> Endpoint with specific IPv6 table lookup.\n" + "\t\tParameters: ''\n" "\tEnd.DX2\t-> Endpoint with decapsulation and Layer-2 cross-connect.\n" "\t\tParameters: ''\n" "\tEnd.DX6\t-> Endpoint with decapsulation and IPv6 cross-connect.\n" -- cgit 1.2.3-korg From da78f957e46c686434149d332a477d7ea055d76a Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 24 May 2017 09:15:43 -0700 Subject: L2 over MPLS [support for VPWS/VPLS] - switch to using dpo_proto_t rather than fib_protocol_t in fib_paths so that we can describe L2 paths - VLIB nodes to handle pop/push of MPLS labels to L2 Change-Id: Id050d06a11fd2c9c1c81ce5a0654e6c5ae6afa6e Signed-off-by: Neale Ranns --- src/plugins/gtpu/gtpu.c | 2 +- src/plugins/snat/snat.c | 2 +- src/vat/api_format.c | 17 +- src/vnet/dhcp/client.c | 6 +- src/vnet/dhcp/dhcp6_proxy_node.c | 2 +- src/vnet/dpo/dpo.c | 19 ++ src/vnet/dpo/dpo.h | 11 +- src/vnet/dpo/interface_dpo.c | 30 +++ src/vnet/dpo/mpls_label_dpo.c | 45 +++- src/vnet/ethernet/arp.c | 4 +- src/vnet/fib/fib_api.h | 4 +- src/vnet/fib/fib_entry.c | 8 +- src/vnet/fib/fib_entry_src.c | 16 +- src/vnet/fib/fib_entry_src.h | 4 +- src/vnet/fib/fib_entry_src_api.c | 2 +- src/vnet/fib/fib_entry_src_default_route.c | 2 +- src/vnet/fib/fib_entry_src_interface.c | 2 +- src/vnet/fib/fib_entry_src_lisp.c | 8 +- src/vnet/fib/fib_entry_src_mpls.c | 4 +- src/vnet/fib/fib_entry_src_rr.c | 15 +- src/vnet/fib/fib_entry_src_special.c | 2 +- src/vnet/fib/fib_path.c | 79 +++---- src/vnet/fib/fib_path.h | 11 +- src/vnet/fib/fib_path_ext.c | 3 + src/vnet/fib/fib_path_list.c | 4 +- src/vnet/fib/fib_path_list.h | 4 +- src/vnet/fib/fib_table.c | 6 +- src/vnet/fib/fib_table.h | 6 +- src/vnet/fib/fib_test.c | 338 ++++++++++++++--------------- src/vnet/fib/fib_types.h | 8 +- src/vnet/interface_format.c | 12 +- src/vnet/ip/ip4_forward.c | 6 +- src/vnet/ip/ip6_forward.c | 4 +- src/vnet/ip/ip6_neighbor.c | 10 +- src/vnet/ip/ip_api.c | 38 ++-- src/vnet/ip/lookup.c | 18 +- src/vnet/lisp-gpe/lisp_gpe.c | 13 +- src/vnet/lisp-gpe/lisp_gpe_api.c | 10 +- src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c | 5 +- src/vnet/mfib/ip6_mfib.c | 6 +- src/vnet/mfib/mfib_entry.c | 10 +- src/vnet/mfib/mfib_test.c | 14 +- src/vnet/mpls/mpls.api | 4 +- src/vnet/mpls/mpls.c | 36 +-- src/vnet/mpls/mpls_api.c | 21 +- src/vnet/mpls/mpls_tunnel.c | 75 ++++--- src/vnet/mpls/mpls_tunnel.h | 38 ++-- src/vnet/srmpls/sr_mpls_policy.c | 6 +- src/vnet/srmpls/sr_mpls_steering.c | 2 +- src/vnet/srv6/sr_steering.c | 4 +- src/vnet/vxlan-gpe/vxlan_gpe.c | 2 +- src/vnet/vxlan/vxlan.c | 2 +- src/vpp/app/vpe_cli.c | 2 +- test/test_bfd.py | 6 +- test/test_gre.py | 24 +- test/test_ip6.py | 26 +-- test/test_map.py | 19 +- test/test_mpls.py | 318 ++++++++++++++++++++++----- test/test_p2p_ethernet.py | 16 +- test/vpp_ip_route.py | 24 +- test/vpp_mpls_tunnel_interface.py | 6 +- test/vpp_papi_provider.py | 4 +- 62 files changed, 889 insertions(+), 556 deletions(-) (limited to 'src/vnet/srv6') diff --git a/src/plugins/gtpu/gtpu.c b/src/plugins/gtpu/gtpu.c index 84745bd8..3dfb4210 100755 --- a/src/plugins/gtpu/gtpu.c +++ b/src/plugins/gtpu/gtpu.c @@ -534,7 +534,7 @@ int vnet_gtpu_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo (fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/plugins/snat/snat.c b/src/plugins/snat/snat.c index 9fbc1e54..f196b5c2 100644 --- a/src/plugins/snat/snat.c +++ b/src/plugins/snat/snat.c @@ -135,7 +135,7 @@ snat_add_del_addr_to_fib (ip4_address_t * addr, u8 p_len, u32 sw_if_index, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL | FIB_ENTRY_FLAG_EXCLUSIVE), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, sw_if_index, ~0, diff --git a/src/vat/api_format.c b/src/vat/api_format.c index f97cdeef..009cf173 100644 --- a/src/vat/api_format.c +++ b/src/vat/api_format.c @@ -7498,7 +7498,7 @@ api_mpls_route_add_del (vat_main_t * vam) mpls_label_t *next_hop_out_label_stack = NULL; mpls_label_t local_label = MPLS_LABEL_INVALID; u8 is_eos = 0; - u8 next_hop_proto_is_ip4 = 1; + dpo_proto_t next_hop_proto = DPO_PROTO_IP4; /* Parse args required to build the message */ while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT) @@ -7517,13 +7517,13 @@ api_mpls_route_add_del (vat_main_t * vam) &v4_next_hop_address)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 1; + next_hop_proto = DPO_PROTO_IP4; } else if (unformat (i, "via %U", unformat_ip6_address, &v6_next_hop_address)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 0; + next_hop_proto = DPO_PROTO_IP6; } else if (unformat (i, "weight %d", &next_hop_weight)) ; @@ -7548,12 +7548,12 @@ api_mpls_route_add_del (vat_main_t * vam) else if (unformat (i, "lookup-in-ip4-table %d", &next_hop_table_id)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 1; + next_hop_proto = DPO_PROTO_IP4; } else if (unformat (i, "lookup-in-ip6-table %d", &next_hop_table_id)) { next_hop_set = 1; - next_hop_proto_is_ip4 = 0; + next_hop_proto = DPO_PROTO_IP6; } else if (unformat (i, "next-hop-table %d", &next_hop_table_id)) ; @@ -7599,7 +7599,7 @@ api_mpls_route_add_del (vat_main_t * vam) mp->mr_create_table_if_needed = create_table_if_needed; mp->mr_is_add = is_add; - mp->mr_next_hop_proto_is_ip4 = next_hop_proto_is_ip4; + mp->mr_next_hop_proto = next_hop_proto; mp->mr_is_classify = is_classify; mp->mr_is_multipath = is_multipath; mp->mr_is_resolve_host = resolve_host; @@ -7622,13 +7622,14 @@ api_mpls_route_add_del (vat_main_t * vam) if (next_hop_set) { - if (next_hop_proto_is_ip4) + if (DPO_PROTO_IP4 == next_hop_proto) { clib_memcpy (mp->mr_next_hop, &v4_next_hop_address, sizeof (v4_next_hop_address)); } - else + else if (DPO_PROTO_IP6 == next_hop_proto) + { clib_memcpy (mp->mr_next_hop, &v6_next_hop_address, diff --git a/src/vnet/dhcp/client.c b/src/vnet/dhcp/client.c index cfe62a6f..dd5e99f2 100644 --- a/src/vnet/dhcp/client.c +++ b/src/vnet/dhcp/client.c @@ -296,7 +296,7 @@ int dhcp_client_for_us (u32 bi, vlib_buffer_t * b, &all_0s, FIB_SOURCE_DHCP, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, @@ -605,7 +605,7 @@ dhcp_bound_state (dhcp_client_main_t * dcm, dhcp_client_t * c, f64 now) c->sw_if_index), &all_0s, FIB_SOURCE_DHCP, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, @@ -900,7 +900,7 @@ int dhcp_client_add_del (dhcp_client_add_del_args_t * a) c->sw_if_index), &all_0s, FIB_SOURCE_DHCP, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh, c->sw_if_index, ~0, diff --git a/src/vnet/dhcp/dhcp6_proxy_node.c b/src/vnet/dhcp/dhcp6_proxy_node.c index e109cc4c..9c2f5220 100644 --- a/src/vnet/dhcp/dhcp6_proxy_node.c +++ b/src/vnet/dhcp/dhcp6_proxy_node.c @@ -857,7 +857,7 @@ dhcp6_proxy_set_server (ip46_address_t *addr, else { const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vnet/dpo/dpo.c b/src/vnet/dpo/dpo.c index 389f995b..aa770838 100644 --- a/src/vnet/dpo/dpo.c +++ b/src/vnet/dpo/dpo.c @@ -109,6 +109,25 @@ vnet_link_to_dpo_proto (vnet_link_t linkt) return (0); } +vnet_link_t +dpo_proto_to_link (dpo_proto_t dp) +{ + switch (dp) + { + case DPO_PROTO_IP6: + return (VNET_LINK_IP6); + case DPO_PROTO_IP4: + return (VNET_LINK_IP4); + case DPO_PROTO_MPLS: + return (VNET_LINK_MPLS); + case DPO_PROTO_ETHERNET: + return (VNET_LINK_ETHERNET); + case DPO_PROTO_NSH: + return (VNET_LINK_NSH); + } + return (~0); +} + u8 * format_dpo_type (u8 * s, va_list * args) { diff --git a/src/vnet/dpo/dpo.h b/src/vnet/dpo/dpo.h index 5aa4e2d2..42fc51d4 100644 --- a/src/vnet/dpo/dpo.h +++ b/src/vnet/dpo/dpo.h @@ -59,14 +59,10 @@ typedef u32 index_t; */ typedef enum dpo_proto_t_ { -#if CLIB_DEBUG > 0 - DPO_PROTO_IP4 = 1, -#else DPO_PROTO_IP4 = 0, -#endif DPO_PROTO_IP6, - DPO_PROTO_ETHERNET, DPO_PROTO_MPLS, + DPO_PROTO_ETHERNET, DPO_PROTO_NSH, } __attribute__((packed)) dpo_proto_t; @@ -272,6 +268,11 @@ extern u8 *format_dpo_type(u8 * s, va_list * args); */ extern u8 *format_dpo_proto(u8 * s, va_list * args); +/** + * @brief format a DPO protocol + */ +extern vnet_link_t dpo_proto_to_link(dpo_proto_t dp); + /** * @brief * Set and stack a DPO. diff --git a/src/vnet/dpo/interface_dpo.c b/src/vnet/dpo/interface_dpo.c index 8d700c23..780bfa2a 100644 --- a/src/vnet/dpo/interface_dpo.c +++ b/src/vnet/dpo/interface_dpo.c @@ -195,11 +195,17 @@ const static char* const interface_dpo_ip6_nodes[] = "interface-dpo-ip4", NULL, }; +const static char* const interface_dpo_l2_nodes[] = +{ + "interface-dpo-l2", + NULL, +}; const static char* const * const interface_dpo_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = interface_dpo_ip4_nodes, [DPO_PROTO_IP6] = interface_dpo_ip6_nodes, + [DPO_PROTO_ETHERNET] = interface_dpo_l2_nodes, [DPO_PROTO_MPLS] = NULL, }; @@ -382,6 +388,14 @@ interface_dpo_ip6 (vlib_main_t * vm, return (interface_dpo_inline(vm, node, from_frame)); } +static uword +interface_dpo_l2 (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) +{ + return (interface_dpo_inline(vm, node, from_frame)); +} + VLIB_REGISTER_NODE (interface_dpo_ip4_node) = { .function = interface_dpo_ip4, .name = "interface-dpo-ip4", @@ -414,3 +428,19 @@ VLIB_REGISTER_NODE (interface_dpo_ip6_node) = { VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_ip6_node, interface_dpo_ip6) +VLIB_REGISTER_NODE (interface_dpo_l2_node) = { + .function = interface_dpo_l2, + .name = "interface-dpo-l2", + .vector_size = sizeof (u32), + .format_trace = format_interface_dpo_trace, + + .n_next_nodes = 2, + .next_nodes = { + [INTERFACE_DPO_DROP] = "error-drop", + [INTERFACE_DPO_INPUT] = "l2-input", + }, +}; + +VLIB_NODE_FUNCTION_MULTIARCH (interface_dpo_l2_node, + interface_dpo_l2) + diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c index 1c451a51..b178a902 100644 --- a/src/vnet/dpo/mpls_label_dpo.c +++ b/src/vnet/dpo/mpls_label_dpo.c @@ -192,7 +192,8 @@ mpls_label_imposition_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 payload_is_ip4, - u8 payload_is_ip6) + u8 payload_is_ip6, + u8 payload_is_ethernet) { u32 n_left_from, next_index, * from, * to_next; @@ -320,6 +321,13 @@ mpls_label_imposition_inline (vlib_main_t * vm, ttl2 = ip2->hop_limit; ttl3 = ip3->hop_limit; } + else if (payload_is_ethernet) + { + /* + * nothing to chang ein the ethernet header + */ + ttl0 = ttl1 = ttl2 = ttl3 = 255; + } else { /* @@ -551,7 +559,7 @@ mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 0, 0)); + return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 0)); } VLIB_REGISTER_NODE (mpls_label_imposition_node) = { @@ -573,7 +581,7 @@ ip4_mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 1, 0)); + return (mpls_label_imposition_inline(vm, node, frame, 1, 0, 0)); } VLIB_REGISTER_NODE (ip4_mpls_label_imposition_node) = { @@ -595,7 +603,7 @@ ip6_mpls_label_imposition (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - return (mpls_label_imposition_inline(vm, node, frame, 0, 1)); + return (mpls_label_imposition_inline(vm, node, frame, 0, 1, 0)); } VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = { @@ -612,6 +620,28 @@ VLIB_REGISTER_NODE (ip6_mpls_label_imposition_node) = { VLIB_NODE_FUNCTION_MULTIARCH (ip6_mpls_label_imposition_node, ip6_mpls_label_imposition) +static uword +ethernet_mpls_label_imposition (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + return (mpls_label_imposition_inline(vm, node, frame, 0, 0, 1)); +} + +VLIB_REGISTER_NODE (ethernet_mpls_label_imposition_node) = { + .function = ethernet_mpls_label_imposition, + .name = "ethernet-mpls-label-imposition", + .vector_size = sizeof (u32), + + .format_trace = format_mpls_label_imposition_trace, + .n_next_nodes = 1, + .next_nodes = { + [0] = "error-drop", + } +}; +VLIB_NODE_FUNCTION_MULTIARCH (ethernet_mpls_label_imposition_node, + ethernet_mpls_label_imposition) + static void mpls_label_dpo_mem_show (void) { @@ -643,11 +673,18 @@ const static char* const mpls_label_imp_mpls_nodes[] = "mpls-label-imposition", NULL, }; +const static char* const mpls_label_imp_ethernet_nodes[] = +{ + "ethernet-mpls-label-imposition", + NULL, +}; + const static char* const * const mpls_label_imp_nodes[DPO_PROTO_NUM] = { [DPO_PROTO_IP4] = mpls_label_imp_ip4_nodes, [DPO_PROTO_IP6] = mpls_label_imp_ip6_nodes, [DPO_PROTO_MPLS] = mpls_label_imp_mpls_nodes, + [DPO_PROTO_ETHERNET] = mpls_label_imp_ethernet_nodes, }; diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index 4d9edaf5..8a394006 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -588,7 +588,7 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm, e->fib_entry_index = fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, &pfx.fp_addr, + DPO_PROTO_IP4, &pfx.fp_addr, e->sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -1621,7 +1621,7 @@ arp_entry_free (ethernet_arp_interface_t * eai, ethernet_arp_ip4_entry_t * e) fib_table_entry_path_remove (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx.fp_addr, e->sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h index 73d76a42..d07d6cae 100644 --- a/src/vnet/fib/fib_api.h +++ b/src/vnet/fib/fib_api.h @@ -21,7 +21,7 @@ int add_del_route_check (fib_protocol_t table_proto, u32 table_id, u32 next_hop_sw_if_index, - fib_protocol_t next_hop_table_proto, + dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, u8 create_missing_tables, u8 is_rpf_id, @@ -43,7 +43,7 @@ add_del_route_t_handler (u8 is_multipath, u8 is_rpf_id, u32 fib_index, const fib_prefix_t * prefix, - u8 next_hop_proto_is_ip4, + dpo_proto_t next_hop_proto, const ip46_address_t * next_hop, u32 next_hop_sw_if_index, u8 next_hop_fib_index, diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c index d7ff1c8c..2027f2be 100644 --- a/src/vnet/fib/fib_entry.c +++ b/src/vnet/fib/fib_entry.c @@ -58,12 +58,18 @@ fib_entry_get_index (const fib_entry_t * fib_entry) return (fib_entry - fib_entry_pool); } -static fib_protocol_t +fib_protocol_t fib_entry_get_proto (const fib_entry_t * fib_entry) { return (fib_entry->fe_prefix.fp_proto); } +dpo_proto_t +fib_entry_get_dpo_proto (const fib_entry_t * fib_entry) +{ + return (fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto)); +} + fib_forward_chain_type_t fib_entry_get_default_chain_type (const fib_entry_t *fib_entry) { diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c index ff73cbf9..173df74f 100644 --- a/src/vnet/fib/fib_entry_src.c +++ b/src/vnet/fib/fib_entry_src.c @@ -29,12 +29,6 @@ */ static fib_entry_src_vft_t fib_entry_src_vft[FIB_SOURCE_MAX]; -static fib_protocol_t -fib_entry_get_proto (const fib_entry_t * fib_entry) -{ - return (fib_entry->fe_prefix.fp_proto); -} - void fib_entry_src_register (fib_source_t source, const fib_entry_src_vft_t *vft) @@ -861,7 +855,7 @@ fib_entry_src_action_add (fib_entry_t *fib_entry, fib_entry_src_vft[source].fesv_add(esrc, fib_entry, flags, - fib_entry_get_proto(fib_entry), + fib_entry_get_dpo_proto(fib_entry), dpo); } @@ -914,7 +908,7 @@ fib_entry_src_action_update (fib_entry_t *fib_entry, fib_entry_src_vft[source].fesv_add(esrc, fib_entry, flags, - fib_entry_get_proto(fib_entry), + fib_entry_get_dpo_proto(fib_entry), dpo); } @@ -1106,8 +1100,7 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, source, flags, drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); + fib_entry_get_dpo_proto(fib_entry))); esrc = fib_entry_src_find(fib_entry, source, NULL); } @@ -1166,8 +1159,7 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry, source, flags, drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); + fib_entry_get_dpo_proto(fib_entry))); esrc = fib_entry_src_find(fib_entry, source, NULL); } diff --git a/src/vnet/fib/fib_entry_src.h b/src/vnet/fib/fib_entry_src.h index 640c174d..35c43936 100644 --- a/src/vnet/fib/fib_entry_src.h +++ b/src/vnet/fib/fib_entry_src.h @@ -73,7 +73,7 @@ typedef void (*fib_entry_src_deactivate_t)(fib_entry_src_t *src, typedef void (*fib_entry_src_add_t)(fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo); /** @@ -277,6 +277,8 @@ extern void fib_entry_src_mk_lb (fib_entry_t *fib_entry, fib_forward_chain_type_t fct, dpo_id_t *dpo_lb); +extern fib_protocol_t fib_entry_get_proto(const fib_entry_t * fib_entry); +extern dpo_proto_t fib_entry_get_dpo_proto(const fib_entry_t * fib_entry); /* * Per-source registration. declared here so we save a separate .h file for each diff --git a/src/vnet/fib/fib_entry_src_api.c b/src/vnet/fib/fib_entry_src_api.c index f895886b..1cdcfbde 100644 --- a/src/vnet/fib/fib_entry_src_api.c +++ b/src/vnet/fib/fib_entry_src_api.c @@ -131,7 +131,7 @@ static void fib_entry_src_api_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { if (FIB_ENTRY_FLAG_NONE != flags) diff --git a/src/vnet/fib/fib_entry_src_default_route.c b/src/vnet/fib/fib_entry_src_default_route.c index 9f4e7c36..431abb66 100644 --- a/src/vnet/fib/fib_entry_src_default_route.c +++ b/src/vnet/fib/fib_entry_src_default_route.c @@ -35,7 +35,7 @@ static void fib_entry_src_default_route_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special(proto, diff --git a/src/vnet/fib/fib_entry_src_interface.c b/src/vnet/fib/fib_entry_src_interface.c index bb87818f..6c087f34 100644 --- a/src/vnet/fib/fib_entry_src_interface.c +++ b/src/vnet/fib/fib_entry_src_interface.c @@ -35,7 +35,7 @@ static void fib_entry_src_interface_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special( diff --git a/src/vnet/fib/fib_entry_src_lisp.c b/src/vnet/fib/fib_entry_src_lisp.c index 7f8b91bb..e72dce63 100644 --- a/src/vnet/fib/fib_entry_src_lisp.c +++ b/src/vnet/fib/fib_entry_src_lisp.c @@ -79,10 +79,10 @@ fib_entry_src_lisp_path_remove (fib_entry_src_t *src, static void fib_entry_src_lisp_add (fib_entry_src_t *src, - const fib_entry_t *entry, - fib_entry_flag_t flags, - fib_protocol_t proto, - const dpo_id_t *dpo) + const fib_entry_t *entry, + fib_entry_flag_t flags, + dpo_proto_t proto, + const dpo_id_t *dpo) { if (FIB_ENTRY_FLAG_NONE != flags) { diff --git a/src/vnet/fib/fib_entry_src_mpls.c b/src/vnet/fib/fib_entry_src_mpls.c index 14c7310f..a616458f 100644 --- a/src/vnet/fib/fib_entry_src_mpls.c +++ b/src/vnet/fib/fib_entry_src_mpls.c @@ -57,13 +57,13 @@ static void fib_entry_src_mpls_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = fib_path_list_create_special(proto, FIB_PATH_LIST_FLAG_DROP, - drop_dpo_get(fib_proto_to_dpo(proto))); + drop_dpo_get(proto)); } static void diff --git a/src/vnet/fib/fib_entry_src_rr.c b/src/vnet/fib/fib_entry_src_rr.c index d66ef7b1..1153f3f1 100644 --- a/src/vnet/fib/fib_entry_src_rr.c +++ b/src/vnet/fib/fib_entry_src_rr.c @@ -35,7 +35,7 @@ fib_entry_src_rr_resolve_via_connected (fib_entry_src_t *src, const fib_entry_t *cover) { const fib_route_path_t path = { - .frp_proto = fib_entry->fe_prefix.fp_proto, + .frp_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto), .frp_addr = fib_entry->fe_prefix.fp_addr, .frp_sw_if_index = fib_entry_get_resolving_interface( fib_entry_get_index(cover)), @@ -90,18 +90,17 @@ fib_entry_src_rr_use_covers_pl (fib_entry_src_t *src, const fib_entry_t *cover) { fib_node_index_t *entries = NULL; - fib_protocol_t proto; + dpo_proto_t proto; - proto = fib_entry->fe_prefix.fp_proto; + proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto); vec_add1(entries, fib_entry_get_index(fib_entry)); if (fib_path_list_recursive_loop_detect(cover->fe_parent, &entries)) { - src->fes_pl = fib_path_list_create_special( - proto, - FIB_PATH_LIST_FLAG_DROP, - drop_dpo_get(fib_proto_to_dpo(proto))); + src->fes_pl = fib_path_list_create_special(proto, + FIB_PATH_LIST_FLAG_DROP, + drop_dpo_get(proto)); } else { @@ -126,7 +125,7 @@ fib_entry_src_rr_activate (fib_entry_src_t *src, */ if (FIB_PROTOCOL_MPLS == fib_entry->fe_prefix.fp_proto) { - src->fes_pl = fib_path_list_create_special(FIB_PROTOCOL_MPLS, + src->fes_pl = fib_path_list_create_special(DPO_PROTO_MPLS, FIB_PATH_LIST_FLAG_DROP, NULL); fib_path_list_lock(src->fes_pl); diff --git a/src/vnet/fib/fib_entry_src_special.c b/src/vnet/fib/fib_entry_src_special.c index 75605d7f..e979e18f 100644 --- a/src/vnet/fib/fib_entry_src_special.c +++ b/src/vnet/fib/fib_entry_src_special.c @@ -43,7 +43,7 @@ static void fib_entry_src_special_add (fib_entry_src_t *src, const fib_entry_t *entry, fib_entry_flag_t flags, - fib_protocol_t proto, + dpo_proto_t proto, const dpo_id_t *dpo) { src->fes_pl = diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 3a67a544..58050ccb 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -193,7 +193,7 @@ typedef struct fib_path_t_ { * next-hop's address. We can't derive this from the address itself * since the address can be all zeros */ - fib_protocol_t fp_nh_proto; + dpo_proto_t fp_nh_proto; /** * UCMP [unnormalised] weigth @@ -381,7 +381,7 @@ format_fib_path (u8 * s, va_list * args) s = format (s, " index:%d ", fib_path_get_index(path)); s = format (s, "pl-index:%d ", path->fp_pl_index); - s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto); + s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto); s = format (s, "weight=%d ", path->fp_weight); s = format (s, "pref=%d ", path->fp_preference); s = format (s, "%s: ", fib_path_type_names[path->fp_type]); @@ -454,7 +454,7 @@ format_fib_path (u8 * s, va_list * args) } break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { s = format (s, "via %U %U", format_mpls_unicast_label, @@ -552,14 +552,14 @@ fib_path_attached_next_hop_get_adj (fib_path_t *path, * the subnet address (the attached route) links to the * auto-adj (see below), we want that adj here too. */ - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &zero_addr, path->attached_next_hop.fp_interface)); } else { - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &path->attached_next_hop.fp_nh, path->attached_next_hop.fp_interface)); @@ -575,10 +575,10 @@ fib_path_attached_next_hop_set (fib_path_t *path) */ dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto))); + dpo_proto_to_link(path->fp_nh_proto))); /* * become a child of the adjacency so we receive updates @@ -607,14 +607,14 @@ fib_path_attached_get_adj (fib_path_t *path, * point-2-point interfaces do not require a glean, since * there is nothing to ARP. Install a rewrite/nbr adj instead */ - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &zero_addr, path->attached.fp_interface)); } else { - return (adj_glean_add_or_lock(path->fp_nh_proto, + return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), path->attached.fp_interface, NULL)); } @@ -650,7 +650,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); } else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST) { @@ -668,7 +668,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -685,7 +685,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib))) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -699,7 +699,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!fib_entry_is_resolved(path->fp_via_fib)) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -720,9 +720,7 @@ fib_path_recursive_adj_update (fib_path_t *path, */ dpo_copy(dpo, &via_dpo); - FIB_PATH_DBG(path, "recursive update: %U", - fib_get_lookup_main(path->fp_nh_proto), - &path->fp_dpo, 2); + FIB_PATH_DBG(path, "recursive update:"); dpo_reset(&via_dpo); } @@ -804,13 +802,8 @@ fib_path_unresolve (fib_path_t *path) static fib_forward_chain_type_t fib_path_to_chain_type (const fib_path_t *path) { - switch (path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { - case FIB_PROTOCOL_IP4: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); - case FIB_PROTOCOL_IP6: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6); - case FIB_PROTOCOL_MPLS: if (FIB_PATH_TYPE_RECURSIVE == path->fp_type && MPLS_EOS == path->recursive.fp_nh.fp_eos) { @@ -821,7 +814,10 @@ fib_path_to_chain_type (const fib_path_t *path) return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS); } } - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); + else + { + return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto)); + } } /* @@ -927,7 +923,7 @@ FIXME comment ai = fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto)); + dpo_proto_to_link(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; if (if_is_up && adj_is_up(ai)) @@ -935,9 +931,7 @@ FIXME comment path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED; } - dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), - ai); + dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai); adj_unlock(ai); if (!if_is_up) @@ -1141,7 +1135,7 @@ fib_path_create (fib_node_index_t pl_index, else { path->fp_type = FIB_PATH_TYPE_RECURSIVE; - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { path->recursive.fp_nh.fp_local_label = rpath->frp_local_label; path->recursive.fp_nh.fp_eos = rpath->frp_eos; @@ -1167,7 +1161,7 @@ fib_path_create (fib_node_index_t pl_index, */ fib_node_index_t fib_path_create_special (fib_node_index_t pl_index, - fib_protocol_t nh_proto, + dpo_proto_t nh_proto, fib_path_cfg_flags_t flags, const dpo_id_t *dpo) { @@ -1433,7 +1427,7 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index, res = (path->attached.fp_interface - rpath->frp_sw_if_index); break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label; @@ -1535,8 +1529,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index, FIB_PATH_DBG(path, "recursive loop formed"); path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP; - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); } else { @@ -1590,8 +1583,7 @@ fib_path_resolve (fib_node_index_t path_index) */ if (fib_path_is_permanent_drop(path)) { - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; return (fib_path_is_resolved(path_index)); } @@ -1612,9 +1604,9 @@ fib_path_resolve (fib_node_index_t path_index) } dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, fib_path_attached_get_adj(path, - fib_proto_to_link(path->fp_nh_proto))); + dpo_proto_to_link(path->fp_nh_proto))); /* * become a child of the adjacency so we receive updates @@ -1639,7 +1631,7 @@ fib_path_resolve (fib_node_index_t path_index) ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib); - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, path->recursive.fp_nh.fp_eos, @@ -1680,8 +1672,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via the drop */ - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); break; case FIB_PATH_TYPE_DEAG: { @@ -1696,7 +1687,7 @@ fib_path_resolve (fib_node_index_t path_index) LOOKUP_UNICAST); lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, cast, LOOKUP_INPUT_DST_ADDR, LOOKUP_TABLE_FROM_CONFIG, @@ -1707,7 +1698,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + receive_dpo_add_or_lock(path->fp_nh_proto, path->receive.fp_interface, &path->receive.fp_addr, &path->fp_dpo); @@ -1716,7 +1707,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + interface_dpo_add_or_lock(path->fp_nh_proto, path->intf_rx.fp_interface, &path->fp_dpo); break; @@ -2035,7 +2026,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, /* * Create the adj needed for sending IP multicast traffic */ - ai = adj_mcast_add_or_lock(path->fp_nh_proto, + ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), fib_forw_chain_type_to_link_type(fct), path->attached.fp_interface); dpo_set(dpo, DPO_ADJACENCY, @@ -2187,7 +2178,7 @@ fib_path_encode (fib_node_index_t path_list_index, return (FIB_PATH_LIST_WALK_CONTINUE); } -fib_protocol_t +dpo_proto_t fib_path_get_proto (fib_node_index_t path_index) { fib_path_t *path; diff --git a/src/vnet/fib/fib_path.h b/src/vnet/fib/fib_path.h index a34cb43f..f986e437 100644 --- a/src/vnet/fib/fib_path.h +++ b/src/vnet/fib/fib_path.h @@ -78,6 +78,11 @@ typedef enum fib_path_cfg_attribute_t_ { * The path is an interface recieve */ FIB_PATH_CFG_ATTRIBUTE_LOCAL, + /** + * The path is L2. i.e. the parameters therein are to be interpreted as + * pertaining to L2 config. + */ + FIB_PATH_CFG_ATTRIBUTE_L2, /** * Marker. Add new types before this one, then update it. */ @@ -98,6 +103,7 @@ typedef enum fib_path_cfg_attribute_t_ { [FIB_PATH_CFG_ATTRIBUTE_ATTACHED] = "attached", \ [FIB_PATH_CFG_ATTRIBUTE_INTF_RX] = "interface-rx", \ [FIB_PATH_CFG_ATTRIBUTE_RPF_ID] = "rpf-id", \ + [FIB_PATH_CFG_ATTRIBUTE_L2] = "l2", \ } #define FOR_EACH_FIB_PATH_CFG_ATTRIBUTE(_item) \ @@ -118,6 +124,7 @@ typedef enum fib_path_cfg_flags_t_ { FIB_PATH_CFG_FLAG_ATTACHED = (1 << FIB_PATH_CFG_ATTRIBUTE_ATTACHED), FIB_PATH_CFG_FLAG_INTF_RX = (1 << FIB_PATH_CFG_ATTRIBUTE_INTF_RX), FIB_PATH_CFG_FLAG_RPF_ID = (1 << FIB_PATH_CFG_ATTRIBUTE_RPF_ID), + FIB_PATH_CFG_FLAG_L2 = (1 << FIB_PATH_CFG_ATTRIBUTE_L2), } __attribute__ ((packed)) fib_path_cfg_flags_t; @@ -131,7 +138,7 @@ extern u8 * format_fib_path(u8 * s, va_list * args); extern fib_node_index_t fib_path_create(fib_node_index_t pl_index, const fib_route_path_t *path); extern fib_node_index_t fib_path_create_special(fib_node_index_t pl_index, - fib_protocol_t nh_proto, + dpo_proto_t nh_proto, fib_path_cfg_flags_t flags, const dpo_id_t *dpo); @@ -148,7 +155,7 @@ extern int fib_path_is_recursive_constrained(fib_node_index_t path_index); extern int fib_path_is_exclusive(fib_node_index_t path_index); extern int fib_path_is_deag(fib_node_index_t path_index); extern int fib_path_is_looped(fib_node_index_t path_index); -extern fib_protocol_t fib_path_get_proto(fib_node_index_t path_index); +extern dpo_proto_t fib_path_get_proto(fib_node_index_t path_index); extern void fib_path_destroy(fib_node_index_t path_index); extern uword fib_path_hash(fib_node_index_t path_index); extern load_balance_path_t * fib_path_append_nh_for_multipath_hash( diff --git a/src/vnet/fib/fib_path_ext.c b/src/vnet/fib/fib_path_ext.c index 26f2b9b6..4438671b 100644 --- a/src/vnet/fib/fib_path_ext.c +++ b/src/vnet/fib/fib_path_ext.c @@ -191,6 +191,9 @@ fib_path_ext_stack (fib_path_ext_t *path_ext, case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: parent_fct = child_fct; break; + case FIB_FORW_CHAIN_TYPE_ETHERNET: + parent_fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS; + break; default: return (nhs); break; diff --git a/src/vnet/fib/fib_path_list.c b/src/vnet/fib/fib_path_list.c index 7a9c328c..f30fd7ea 100644 --- a/src/vnet/fib/fib_path_list.c +++ b/src/vnet/fib/fib_path_list.c @@ -611,7 +611,7 @@ fib_path_list_get_resolving_interface (fib_node_index_t path_list_index) return (sw_if_index); } -fib_protocol_t +dpo_proto_t fib_path_list_get_proto (fib_node_index_t path_list_index) { fib_path_list_t *path_list; @@ -753,7 +753,7 @@ fib_path_list_flags_2_path_flags (fib_path_list_flags_t plf) } fib_node_index_t -fib_path_list_create_special (fib_protocol_t nh_proto, +fib_path_list_create_special (dpo_proto_t nh_proto, fib_path_list_flags_t flags, const dpo_id_t *dpo) { diff --git a/src/vnet/fib/fib_path_list.h b/src/vnet/fib/fib_path_list.h index b4b6985b..a54b79e2 100644 --- a/src/vnet/fib/fib_path_list.h +++ b/src/vnet/fib/fib_path_list.h @@ -106,7 +106,7 @@ typedef enum fib_path_list_flags_t_ { extern fib_node_index_t fib_path_list_create(fib_path_list_flags_t flags, const fib_route_path_t *paths); -extern fib_node_index_t fib_path_list_create_special(fib_protocol_t nh_proto, +extern fib_node_index_t fib_path_list_create_special(dpo_proto_t nh_proto, fib_path_list_flags_t flags, const dpo_id_t *dpo); @@ -150,7 +150,7 @@ extern int fib_path_list_recursive_loop_detect(fib_node_index_t path_list_index, extern u32 fib_path_list_get_resolving_interface(fib_node_index_t path_list_index); extern int fib_path_list_is_looped(fib_node_index_t path_list_index); extern int fib_path_list_is_popular(fib_node_index_t path_list_index); -extern fib_protocol_t fib_path_list_get_proto(fib_node_index_t path_list_index); +extern dpo_proto_t fib_path_list_get_proto(fib_node_index_t path_list_index); extern u8 * fib_path_list_format(fib_node_index_t pl_index, u8 * s); extern index_t fib_path_list_lb_map_add_or_lock(fib_node_index_t pl_index, diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index 5aa02dd0..6b6cc5cb 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -505,7 +505,7 @@ fib_table_entry_path_add (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -664,7 +664,7 @@ void fib_table_entry_path_remove (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -755,7 +755,7 @@ fib_table_entry_update_one_path (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h index a65fea74..579740e9 100644 --- a/src/vnet/fib/fib_table.h +++ b/src/vnet/fib/fib_table.h @@ -288,7 +288,7 @@ extern fib_node_index_t fib_table_entry_path_add(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -364,7 +364,7 @@ extern fib_node_index_t fib_table_entry_path_add2(u32 fib_index, extern void fib_table_entry_path_remove(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, @@ -471,7 +471,7 @@ extern fib_node_index_t fib_table_entry_update_one_path(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, fib_entry_flag_t flags, - fib_protocol_t next_hop_proto, + dpo_proto_t next_hop_proto, const ip46_address_t *next_hop, u32 next_hop_sw_if_index, u32 next_hop_fib_index, diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index 4c891667..59d5da2a 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -833,7 +833,7 @@ fib_test_v4 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -861,7 +861,7 @@ fib_test_v4 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -911,7 +911,7 @@ fib_test_v4 (void) fib_table_entry_path_add(fib_index, &pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -963,7 +963,7 @@ fib_test_v4 (void) pfx.fp_len = 0; fib_table_entry_path_remove(fib_index, &pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // non-recursive path, so no FIB index @@ -1029,7 +1029,7 @@ fib_test_v4 (void) &pfx_11_11_11_11_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1095,7 +1095,7 @@ fib_test_v4 (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1110,7 +1110,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_11_11_11_11_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1144,7 +1144,7 @@ fib_test_v4 (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1181,7 +1181,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1214,7 +1214,7 @@ fib_test_v4 (void) &pfx_1_1_2_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1241,7 +1241,7 @@ fib_test_v4 (void) &pfx_1_1_2_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1280,7 +1280,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_2_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -1327,7 +1327,7 @@ fib_test_v4 (void) &bgp_100_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1363,7 +1363,7 @@ fib_test_v4 (void) &bgp_101_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1487,7 +1487,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -1534,7 +1534,7 @@ fib_test_v4 (void) &pfx_1_2_3_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -1545,7 +1545,7 @@ fib_test_v4 (void) &pfx_1_2_3_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, @@ -1586,7 +1586,7 @@ fib_test_v4 (void) &pfx_1_2_3_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, @@ -1597,7 +1597,7 @@ fib_test_v4 (void) &pfx_1_2_3_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -1669,7 +1669,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1688,7 +1688,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1770,7 +1770,7 @@ fib_test_v4 (void) &pfx_6_6_6_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -1915,7 +1915,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -1995,7 +1995,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_6_6_6_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2026,7 +2026,7 @@ fib_test_v4 (void) &bgp_44_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_2_3_4_s_32.fp_addr, ~0, fib_index, @@ -2037,7 +2037,7 @@ fib_test_v4 (void) &bgp_44_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_2_3_5_s_32.fp_addr, ~0, fib_index, @@ -2107,7 +2107,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_200_s_32.fp_addr, ~0, // no index provided. fib_index, // nexthop in same fib as route @@ -2151,7 +2151,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2209,7 +2209,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_28, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2244,7 +2244,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_0_s_28, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -2275,7 +2275,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2316,7 +2316,7 @@ fib_test_v4 (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2351,7 +2351,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -2362,7 +2362,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -2375,7 +2375,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_200_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2405,7 +2405,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2446,7 +2446,7 @@ fib_test_v4 (void) &bgp_102, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route @@ -2457,7 +2457,7 @@ fib_test_v4 (void) &bgp_102, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2483,7 +2483,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_102, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2492,7 +2492,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_102, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2507,7 +2507,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_100_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2516,7 +2516,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_101_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2546,7 +2546,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // no index provided. fib_index, // Same as route's FIB @@ -2593,7 +2593,7 @@ fib_test_v4 (void) &bgp_201_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, ~0, // no index provided. fib_index, @@ -2639,7 +2639,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // no index provided. fib_index, // same as route's FIB @@ -2648,7 +2648,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_201_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, ~0, // no index provided. fib_index, // same as route's FIB @@ -2707,7 +2707,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2718,7 +2718,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_7_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2729,7 +2729,7 @@ fib_test_v4 (void) &pfx_5_5_5_7_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_5_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2768,7 +2768,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2801,7 +2801,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -2826,7 +2826,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -2868,7 +2868,7 @@ fib_test_v4 (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2892,7 +2892,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_5_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2901,7 +2901,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_7_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2910,7 +2910,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_7_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_5_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2919,7 +2919,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, ~0, // no index provided. fib_index, // same as route's FIB @@ -2943,7 +2943,7 @@ fib_test_v4 (void) &pfx_5_5_5_6_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, @@ -2957,7 +2957,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_5_5_5_6_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_5_5_5_6_s_32.fp_addr, ~0, // no index provided. fib_index, // same as route's FIB @@ -2991,7 +2991,7 @@ fib_test_v4 (void) &pfx_23_23_23_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_23_23_23_23_s_32.fp_addr, ~0, // recursive fib_index, @@ -3021,7 +3021,7 @@ fib_test_v4 (void) &pfx_0_0_0_0_s_0, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_23_23_23_23_s_32.fp_addr, ~0, // recursive fib_index, @@ -3051,7 +3051,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3081,7 +3081,7 @@ fib_test_v4 (void) &pfx_1_1_1_0_s_28, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3099,7 +3099,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3116,7 +3116,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3140,7 +3140,7 @@ fib_test_v4 (void) &pfx_1_1_1_3_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3152,7 +3152,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3177,7 +3177,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3188,7 +3188,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3238,7 +3238,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3277,7 +3277,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3307,7 +3307,7 @@ fib_test_v4 (void) &bgp_200_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3320,7 +3320,7 @@ fib_test_v4 (void) &bgp_78s[ii], FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3354,7 +3354,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3391,7 +3391,7 @@ fib_test_v4 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3412,7 +3412,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -3421,7 +3421,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_1_1_1_1, ~0, fib_index, @@ -3430,7 +3430,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &bgp_200_pfx, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_3_s_32.fp_addr, ~0, fib_index, @@ -3481,7 +3481,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3492,7 +3492,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, @@ -3503,7 +3503,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, ~0, @@ -3539,7 +3539,7 @@ fib_test_v4 (void) for (ii = 0; ii < 4; ii++) { fib_route_path_t r_path = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_addr = { .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a02 + ii), }, @@ -3588,7 +3588,7 @@ fib_test_v4 (void) &pfx_4_4_4_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, fib_index, @@ -3648,7 +3648,7 @@ fib_test_v4 (void) &pfx_34_34_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, 0, @@ -3659,7 +3659,7 @@ fib_test_v4 (void) &pfx_34_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_34_34_1_1_s_32.fp_addr, ~0, fib_index, @@ -3670,7 +3670,7 @@ fib_test_v4 (void) &pfx_34_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_34_34_1_1_s_32.fp_addr, ~0, fib_index, @@ -3691,7 +3691,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_2_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3700,7 +3700,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3709,7 +3709,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_2_0_s_24, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, @@ -3751,7 +3751,7 @@ fib_test_v4 (void) &pfx_4_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, tm->hw[0]->sw_if_index, fib_index, @@ -3805,7 +3805,7 @@ fib_test_v4 (void) &pfx_2001_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, fib_index, @@ -3863,7 +3863,7 @@ fib_test_v4 (void) &pfx_12_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_12_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -3897,7 +3897,7 @@ fib_test_v4 (void) &pfx_10_10_10_127_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_127_s_32.fp_addr, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -3945,7 +3945,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -3956,7 +3956,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, fib_index, @@ -3975,7 +3975,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -3992,7 +3992,7 @@ fib_test_v4 (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -4011,7 +4011,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_12_12_12_12, tm->hw[1]->sw_if_index, fib_index, @@ -4030,7 +4030,7 @@ fib_test_v4 (void) fib_table_entry_path_remove(fib_index, &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_3, tm->hw[0]->sw_if_index, fib_index, @@ -4269,7 +4269,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4300,7 +4300,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -4345,7 +4345,7 @@ fib_test_v6 (void) fib_table_entry_path_add(fib_index, &pfx_0_0, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4389,7 +4389,7 @@ fib_test_v6 (void) */ fib_table_entry_path_remove(fib_index, &pfx_0_0, FIB_SOURCE_API, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4466,7 +4466,7 @@ fib_test_v6 (void) &pfx_2001_1_2_s_128, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx_2001_1_2_s_128.fp_addr, tm->hw[0]->sw_if_index, ~0, @@ -4505,7 +4505,7 @@ fib_test_v6 (void) &pfx_2001_1_3_s_128, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx_2001_1_3_s_128.fp_addr, tm->hw[0]->sw_if_index, ~0, @@ -4559,7 +4559,7 @@ fib_test_v6 (void) &pfx_2001_a_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4573,7 +4573,7 @@ fib_test_v6 (void) &pfx_2001_b_s_64, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4608,7 +4608,7 @@ fib_test_v6 (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh_2001_2, tm->hw[0]->sw_if_index, ~0, @@ -4646,7 +4646,7 @@ fib_test_v6 (void) &pfx_2001_c_s_64, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4663,7 +4663,7 @@ fib_test_v6 (void) fib_table_entry_path_remove(fib_index, &pfx_2001_c_s_64, FIB_SOURCE_CLI, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, @@ -4748,7 +4748,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[1]->sw_if_index, ~0, @@ -4767,7 +4767,7 @@ fib_test_v6 (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5095,7 +5095,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -5111,7 +5111,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5140,7 +5140,7 @@ fib_test_ae (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5167,7 +5167,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5209,7 +5209,7 @@ fib_test_ae (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5243,7 +5243,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5280,7 +5280,7 @@ fib_test_ae (void) &pfx_10_10_10_3_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_3_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5352,7 +5352,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_2_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5375,7 +5375,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5407,7 +5407,7 @@ fib_test_ae (void) &pfx_10_0_0_0_s_8, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_3_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5463,7 +5463,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_10_10_10_1_s_32.fp_addr, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5499,7 +5499,7 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -5538,7 +5538,7 @@ fib_test_ae (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -5626,7 +5626,7 @@ fib_test_pref (void) * 2 high, 2 medium and 2 low preference non-recursive paths */ fib_route_path_t nr_path_hi_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5637,7 +5637,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_hi_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5648,7 +5648,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_med_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5659,7 +5659,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_med_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5670,7 +5670,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_low_1 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5681,7 +5681,7 @@ fib_test_pref (void) }, }; fib_route_path_t nr_path_low_2 = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, .frp_weight = 1, @@ -5897,7 +5897,7 @@ fib_test_pref (void) }, }; fib_route_path_t r_path_hi = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -5906,7 +5906,7 @@ fib_test_pref (void) .frp_addr = pfx_1_1_1_1_s_32.fp_addr, }; fib_route_path_t r_path_med = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -5915,7 +5915,7 @@ fib_test_pref (void) .frp_addr = pfx_1_1_1_2_s_32.fp_addr, }; fib_route_path_t r_path_low = { - .frp_proto = FIB_PROTOCOL_IP4, + .frp_proto = DPO_PROTO_IP4, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = 1, @@ -6099,7 +6099,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, @@ -6115,7 +6115,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6145,7 +6145,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[1]->sw_if_index, ~0, @@ -6161,7 +6161,7 @@ fib_test_label (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6243,7 +6243,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6282,7 +6282,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6360,7 +6360,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_2, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6440,7 +6440,7 @@ fib_test_label (void) &pfx_2_2_2_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -6612,7 +6612,7 @@ fib_test_label (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6669,7 +6669,7 @@ fib_test_label (void) fib_table_entry_path_remove(fib_index, &pfx_1_1_1_1_s_32, FIB_SOURCE_API, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6711,7 +6711,7 @@ fib_test_label (void) &pfx_1_1_1_1_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6842,7 +6842,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -6884,7 +6884,7 @@ fib_test_label (void) &pfx_2_2_2_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_2_s_32.fp_addr, ~0, fib_index, @@ -6912,7 +6912,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6945,7 +6945,7 @@ fib_test_label (void) &pfx_1_1_1_2_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -6987,7 +6987,7 @@ fib_test_label (void) &pfx_2_2_2_3_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -7031,7 +7031,7 @@ fib_test_label (void) &pfx_2_2_2_4_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx_1_1_1_1_s_32.fp_addr, ~0, fib_index, @@ -7081,7 +7081,7 @@ fib_test_label (void) &pfx_2_2_5_5_s_32, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_11_1, tm->hw[1]->sw_if_index, ~0, // invalid fib index @@ -7689,7 +7689,7 @@ fib_test_bfd (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7706,7 +7706,7 @@ fib_test_bfd (void) FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7780,7 +7780,7 @@ fib_test_bfd (void) &pfx_10_10_10_1_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7819,7 +7819,7 @@ fib_test_bfd (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7851,7 +7851,7 @@ fib_test_bfd (void) &pfx_10_10_10_2_s_32, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -7907,7 +7907,7 @@ fib_test_bfd (void) &pfx_200_0_0_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, ~0, // recursive 0, // default fib index @@ -7926,7 +7926,7 @@ fib_test_bfd (void) &pfx_200_0_0_0_s_24, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, ~0, // recursive 0, // default fib index @@ -8065,7 +8065,7 @@ fib_test_bfd (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8096,7 +8096,7 @@ fib_test_bfd (void) &pfx_5_5_5_5_s_32, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_2, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8234,7 +8234,7 @@ lfib_test (void) &pfx, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, fib_index, @@ -8285,7 +8285,7 @@ lfib_test (void) &pfx, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &zero_addr, ~0, lfib_index, @@ -8363,7 +8363,7 @@ lfib_test (void) &pfx_1200, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8389,7 +8389,7 @@ lfib_test (void) }, }; fib_route_path_t *rpaths = NULL, rpath = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = 1200, .frp_eos = MPLS_NON_EOS, .frp_sw_if_index = ~0, // recurive @@ -8545,7 +8545,7 @@ lfib_test (void) &pfx_2500, FIB_SOURCE_API, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8590,7 +8590,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8610,7 +8610,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -8637,7 +8637,7 @@ lfib_test (void) &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, NULL, 5, // rpf-id 0, // default table diff --git a/src/vnet/fib/fib_types.h b/src/vnet/fib/fib_types.h index a209ff3c..f11a55da 100644 --- a/src/vnet/fib/fib_types.h +++ b/src/vnet/fib/fib_types.h @@ -32,9 +32,9 @@ typedef u32 fib_node_index_t; * Protocol Type. packed so it consumes a u8 only */ typedef enum fib_protocol_t_ { - FIB_PROTOCOL_IP4 = 0, - FIB_PROTOCOL_IP6, - FIB_PROTOCOL_MPLS, + FIB_PROTOCOL_IP4 = DPO_PROTO_IP4, + FIB_PROTOCOL_IP6 = DPO_PROTO_IP6, + FIB_PROTOCOL_MPLS = DPO_PROTO_MPLS, } __attribute__ ((packed)) fib_protocol_t; #define FIB_PROTOCOLS { \ @@ -338,7 +338,7 @@ typedef struct fib_route_path_t_ { * The protocol of the address below. We need this since the all * zeros address is ambiguous. */ - fib_protocol_t frp_proto; + dpo_proto_t frp_proto; union { /** diff --git a/src/vnet/interface_format.c b/src/vnet/interface_format.c index df7e9388..5694bb2f 100644 --- a/src/vnet/interface_format.c +++ b/src/vnet/interface_format.c @@ -165,9 +165,15 @@ format_vnet_sw_if_index_name (u8 * s, va_list * args) { vnet_main_t *vnm = va_arg (*args, vnet_main_t *); u32 sw_if_index = va_arg (*args, u32); - return format (s, "%U", - format_vnet_sw_interface_name, vnm, - vnet_get_sw_interface (vnm, sw_if_index)); + vnet_sw_interface_t *si; + + si = vnet_get_sw_interface_safe (vnm, sw_if_index); + + if (NULL == si) + { + return format (s, "DELETED"); + } + return format (s, "%U", format_vnet_sw_interface_name, vnm, si); } u8 * diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index ee17ea88..7a8d7a0c 100755 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -724,7 +724,7 @@ ip4_add_interface_routes (u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, /* No next-hop address */ NULL, sw_if_index, @@ -767,7 +767,7 @@ ip4_add_interface_routes (u32 sw_if_index, fib_table_entry_update_one_path (fib_index, &net_pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &net_pfx.fp_addr, sw_if_index, // invalid FIB index @@ -803,7 +803,7 @@ ip4_add_interface_routes (u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &pfx.fp_addr, sw_if_index, // invalid FIB index diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index bc66416e..8ae08a01 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -355,7 +355,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_ATTACHED), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, /* No next-hop address */ NULL, sw_if_index, /* invalid FIB index */ @@ -390,7 +390,7 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_CONNECTED | FIB_ENTRY_FLAG_LOCAL), - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index e8eebd4e..6a9139ab 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -284,7 +284,7 @@ ip6_neighbor_sw_interface_up_down (vnet_main_t * vnm, (ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index), &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); pool_put (nm->neighbor_pool, n); @@ -645,7 +645,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, n->fib_entry_index = fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, FIB_ENTRY_FLAG_ATTACHED, - FIB_PROTOCOL_IP6, &pfx.fp_addr, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); } @@ -776,7 +776,7 @@ vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm, (ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index), &pfx, FIB_SOURCE_ADJ, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &pfx.fp_addr, n->key.sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); } pool_put (nm->neighbor_pool, n); @@ -4110,7 +4110,7 @@ ip6_neighbor_proxy_add_del (u32 sw_if_index, ip6_address_t * addr, u8 is_del) fib_table_entry_path_remove (fib_index, &pfx, FIB_SOURCE_IP6_ND_PROXY, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh, sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); @@ -4124,7 +4124,7 @@ ip6_neighbor_proxy_add_del (u32 sw_if_index, ip6_address_t * addr, u8 is_del) &pfx, FIB_SOURCE_IP6_ND_PROXY, FIB_ENTRY_FLAG_NONE, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, &nh, sw_if_index, ~0, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index 4cbf75a3..0676a387 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -156,9 +156,9 @@ copy_fib_next_hop (fib_route_path_encode_t * api_rpath, void *fp_arg) int is_ip4; vl_api_fib_path_t *fp = (vl_api_fib_path_t *) fp_arg; - if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP4) + if (api_rpath->rpath.frp_proto == DPO_PROTO_IP4) fp->afi = IP46_TYPE_IP4; - else if (api_rpath->rpath.frp_proto == FIB_PROTOCOL_IP6) + else if (api_rpath->rpath.frp_proto == DPO_PROTO_IP6) fp->afi = IP46_TYPE_IP6; else { @@ -714,7 +714,7 @@ add_del_route_t_handler (u8 is_multipath, u8 is_rpf_id, u32 fib_index, const fib_prefix_t * prefix, - u8 next_hop_proto_is_ip4, + dpo_proto_t next_hop_proto, const ip46_address_t * next_hop, u32 next_hop_sw_if_index, u8 next_hop_fib_index, @@ -726,8 +726,7 @@ add_del_route_t_handler (u8 is_multipath, vnet_classify_main_t *cm = &vnet_classify_main; fib_route_path_flags_t path_flags = FIB_ROUTE_PATH_FLAG_NONE; fib_route_path_t path = { - .frp_proto = (next_hop_proto_is_ip4 ? - FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6), + .frp_proto = next_hop_proto, .frp_addr = (NULL == next_hop ? zero_addr : *next_hop), .frp_sw_if_index = next_hop_sw_if_index, .frp_fib_index = next_hop_fib_index, @@ -740,7 +739,7 @@ add_del_route_t_handler (u8 is_multipath, if (MPLS_LABEL_INVALID != next_hop_via_label) { - path.frp_proto = FIB_PROTOCOL_MPLS; + path.frp_proto = DPO_PROTO_MPLS; path.frp_local_label = next_hop_via_label; path.frp_eos = MPLS_NON_EOS; } @@ -855,7 +854,7 @@ int add_del_route_check (fib_protocol_t table_proto, u32 table_id, u32 next_hop_sw_if_index, - fib_protocol_t next_hop_table_proto, + dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, u8 create_missing_tables, u8 is_rpf_id, u32 * fib_index, u32 * next_hop_fib_index) @@ -887,11 +886,18 @@ add_del_route_check (fib_protocol_t table_proto, } else { + fib_protocol_t fib_nh_proto; + + if (next_hop_table_proto > DPO_PROTO_MPLS) + return (0); + + fib_nh_proto = dpo_proto_to_fib (next_hop_table_proto); + if (is_rpf_id) - *next_hop_fib_index = mfib_table_find (next_hop_table_proto, + *next_hop_fib_index = mfib_table_find (fib_nh_proto, ntohl (next_hop_table_id)); else - *next_hop_fib_index = fib_table_find (next_hop_table_proto, + *next_hop_fib_index = fib_table_find (fib_nh_proto, ntohl (next_hop_table_id)); if (~0 == *next_hop_fib_index) @@ -900,12 +906,12 @@ add_del_route_check (fib_protocol_t table_proto, { if (is_rpf_id) *next_hop_fib_index = - mfib_table_find_or_create_and_lock (next_hop_table_proto, + mfib_table_find_or_create_and_lock (fib_nh_proto, ntohl (next_hop_table_id)); else *next_hop_fib_index = - fib_table_find_or_create_and_lock (next_hop_table_proto, + fib_table_find_or_create_and_lock (fib_nh_proto, ntohl (next_hop_table_id)); } @@ -930,7 +936,7 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) rv = add_del_route_check (FIB_PROTOCOL_IP4, mp->table_id, mp->next_hop_sw_if_index, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, mp->next_hop_table_id, mp->create_vrf_if_needed, 0, &fib_index, &next_hop_fib_index); @@ -970,7 +976,7 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->classify_table_index, mp->is_resolve_host, mp->is_resolve_attached, 0, 0, - fib_index, &pfx, 1, + fib_index, &pfx, DPO_PROTO_IP4, &nh, ntohl (mp->next_hop_sw_if_index), next_hop_fib_index, @@ -990,7 +996,7 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) rv = add_del_route_check (FIB_PROTOCOL_IP6, mp->table_id, mp->next_hop_sw_if_index, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, mp->next_hop_table_id, mp->create_vrf_if_needed, 0, &fib_index, &next_hop_fib_index); @@ -1030,7 +1036,7 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->classify_table_index, mp->is_resolve_host, mp->is_resolve_attached, 0, 0, - fib_index, &pfx, 0, + fib_index, &pfx, DPO_PROTO_IP6, &nh, ntohl (mp->next_hop_sw_if_index), next_hop_fib_index, mp->next_hop_weight, @@ -1106,7 +1112,7 @@ mroute_add_del_handler (u8 is_add, fib_route_path_t path = { .frp_sw_if_index = next_hop_sw_if_index, - .frp_proto = prefix->fp_proto, + .frp_proto = fib_proto_to_dpo (prefix->fp_proto), }; if (is_local) diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c index 533d010a..41e46070 100755 --- a/src/vnet/ip/lookup.c +++ b/src/vnet/ip/lookup.c @@ -423,7 +423,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_eos = MPLS_NON_EOS; - rpath.frp_proto = FIB_PROTOCOL_MPLS; + rpath.frp_proto = DPO_PROTO_MPLS; rpath.frp_sw_if_index = ~0; vec_add1 (rpaths, rpath); } @@ -449,7 +449,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } @@ -460,7 +460,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "weight %u", &weight)) @@ -479,7 +479,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -488,7 +488,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -501,7 +501,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -510,13 +510,13 @@ vnet_ip_route_cmd (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1 (rpaths, rpath); } else if (unformat (line_input, "lookup in table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = pfx.fp_proto; + rpath.frp_proto = fib_proto_to_dpo (pfx.fp_proto); rpath.frp_sw_if_index = ~0; vec_add1 (rpaths, rpath); } @@ -526,7 +526,7 @@ vnet_ip_route_cmd (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = prefixs[0].fp_proto; + rpath.frp_proto = fib_proto_to_dpo (prefixs[0].fp_proto); vec_add1 (rpaths, rpath); } else if (vec_len (prefixs) > 0 && diff --git a/src/vnet/lisp-gpe/lisp_gpe.c b/src/vnet/lisp-gpe/lisp_gpe.c index 0acc7349..018895ad 100644 --- a/src/vnet/lisp-gpe/lisp_gpe.c +++ b/src/vnet/lisp-gpe/lisp_gpe.c @@ -454,7 +454,7 @@ vnet_gpe_add_del_native_fwd_rpath (vnet_gpe_native_fwd_rpath_args_t * a) fib_route_path_t *rpath; u8 ip_version; - ip_version = a->rpath.frp_proto == FIB_PROTOCOL_IP4 ? IP4 : IP6; + ip_version = a->rpath.frp_proto == DPO_PROTO_IP4 ? IP4 : IP6; if (a->is_add) { @@ -511,7 +511,7 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U %U", unformat_ip6_address, @@ -520,21 +520,21 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip4_address, &rpath.frp_addr.ip4)) { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U", unformat_ip6_address, &rpath.frp_addr.ip6)) { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else { @@ -549,7 +549,8 @@ gpe_native_forward_command_fn (vlib_main_t * vm, unformat_input_t * input, } else { - rpath.frp_fib_index = fib_table_find (rpath.frp_proto, table_id); + rpath.frp_fib_index = + fib_table_find (dpo_proto_to_fib (rpath.frp_proto), table_id); if ((u32) ~ 0 == rpath.frp_fib_index) { error = clib_error_return (0, "Nonexistent table id %d", table_id); diff --git a/src/vnet/lisp-gpe/lisp_gpe_api.c b/src/vnet/lisp-gpe/lisp_gpe_api.c index f1663699..4367a719 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_api.c +++ b/src/vnet/lisp-gpe/lisp_gpe_api.c @@ -455,10 +455,10 @@ static void clib_memcpy (&a->rpath.frp_addr.ip6, mp->nh_addr, sizeof (ip6_address_t)); a->is_add = mp->is_add; - a->rpath.frp_proto = mp->is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6; - a->rpath.frp_fib_index = fib_table_find (a->rpath.frp_proto, - clib_net_to_host_u32 - (mp->table_id)); + a->rpath.frp_proto = mp->is_ip4 ? DPO_PROTO_IP4 : DPO_PROTO_IP6; + a->rpath.frp_fib_index = + fib_table_find (dpo_proto_to_fib (a->rpath.frp_proto), + clib_net_to_host_u32 (mp->table_id)); if (~0 == a->rpath.frp_fib_index) { rv = VNET_API_ERROR_INVALID_VALUE; @@ -484,7 +484,7 @@ gpe_native_fwd_rpaths_copy (vl_api_gpe_native_fwd_rpath_t * dst, vec_foreach (e, src) { memset (&dst[i], 0, sizeof (*dst)); - table = fib_table_get (e->frp_fib_index, e->frp_proto); + table = fib_table_get (e->frp_fib_index, dpo_proto_to_fib (e->frp_proto)); dst[i].fib_index = table->ft_table_id; dst[i].nh_sw_if_index = e->frp_sw_if_index; dst[i].is_ip4 = is_ip4; diff --git a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c index 395b493a..ac048149 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c +++ b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c @@ -225,6 +225,7 @@ lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths) { const lisp_gpe_adjacency_t *ladj; fib_route_path_t *rpaths = NULL; + fib_protocol_t fp; u8 best_priority; u32 ii; @@ -239,9 +240,9 @@ lisp_gpe_mk_fib_paths (const lisp_fwd_path_t * paths) ladj = lisp_gpe_adjacency_get (paths[ii].lisp_adj); - ip_address_to_46 (&ladj->remote_rloc, - &rpaths[ii].frp_addr, &rpaths[ii].frp_proto); + ip_address_to_46 (&ladj->remote_rloc, &rpaths[ii].frp_addr, &fp); + rpaths[ii].frp_proto = fib_proto_to_dpo (fp); rpaths[ii].frp_sw_if_index = ladj->sw_if_index; rpaths[ii].frp_weight = (paths[ii].weight ? paths[ii].weight : 1); } diff --git a/src/vnet/mfib/ip6_mfib.c b/src/vnet/mfib/ip6_mfib.c index 5c6f8126..5e48e919 100644 --- a/src/vnet/mfib/ip6_mfib.c +++ b/src/vnet/mfib/ip6_mfib.c @@ -158,7 +158,7 @@ ip6_create_mfib_with_table_id (u32 table_id) .fp_proto = FIB_PROTOCOL_IP6, }; const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -222,7 +222,7 @@ ip6_mfib_table_destroy (ip6_mfib_t *mfib) .fp_proto = FIB_PROTOCOL_IP6, }; const fib_route_path_t path_for_us = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -259,7 +259,7 @@ void ip6_mfib_interface_enable_disable (u32 sw_if_index, int is_enable) { const fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_IP6, + .frp_proto = DPO_PROTO_IP6, .frp_addr = zero_addr, .frp_sw_if_index = sw_if_index, .frp_fib_index = ~0, diff --git a/src/vnet/mfib/mfib_entry.c b/src/vnet/mfib/mfib_entry.c index cf25b67a..b37f8825 100644 --- a/src/vnet/mfib/mfib_entry.c +++ b/src/vnet/mfib/mfib_entry.c @@ -764,18 +764,16 @@ mfib_entry_update (fib_node_index_t mfib_entry_index, * entry */ fib_node_index_t old_pl_index; - fib_protocol_t fp; + dpo_proto_t dp; dpo_id_t dpo = DPO_INVALID; - fp = mfib_entry_get_proto(mfib_entry); + dp = fib_proto_to_dpo(mfib_entry_get_proto(mfib_entry)); old_pl_index = msrc->mfes_pl; - dpo_set(&dpo, DPO_REPLICATE, - fib_proto_to_dpo(fp), - repi); + dpo_set(&dpo, DPO_REPLICATE, dp, repi); msrc->mfes_pl = - fib_path_list_create_special(fp, + fib_path_list_create_special(dp, FIB_PATH_LIST_FLAG_EXCLUSIVE, &dpo); diff --git a/src/vnet/mfib/mfib_test.c b/src/vnet/mfib/mfib_test.c index 7c92ae99..57787eca 100644 --- a/src/vnet/mfib/mfib_test.c +++ b/src/vnet/mfib/mfib_test.c @@ -387,7 +387,7 @@ mfib_test_i (fib_protocol_t PROTO, fib_route_path_t path_via_if0 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[0]->sw_if_index, .frp_fib_index = ~0, @@ -411,7 +411,7 @@ mfib_test_i (fib_protocol_t PROTO, MFIB_ITF_FLAG_ACCEPT)); fib_route_path_t path_via_if1 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[1]->sw_if_index, .frp_fib_index = ~0, @@ -419,7 +419,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_via_if2 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[2]->sw_if_index, .frp_fib_index = ~0, @@ -427,7 +427,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_via_if3 = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = tm->hw[3]->sw_if_index, .frp_fib_index = ~0, @@ -435,7 +435,7 @@ mfib_test_i (fib_protocol_t PROTO, .frp_flags = 0, }; fib_route_path_t path_for_us = { - .frp_proto = PROTO, + .frp_proto = fib_proto_to_dpo(PROTO), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, @@ -1121,7 +1121,7 @@ mfib_test_i (fib_protocol_t PROTO, &pfx_3500, FIB_SOURCE_API, FIB_ENTRY_FLAG_MULTICAST, - FIB_PROTOCOL_IP4, + DPO_PROTO_IP4, &nh_10_10_10_1, tm->hw[0]->sw_if_index, ~0, // invalid fib index @@ -1138,7 +1138,7 @@ mfib_test_i (fib_protocol_t PROTO, * An (S,G) that resolves via the mLDP head-end */ fib_route_path_t path_via_mldp = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = pfx_3500.fp_label, .frp_eos = MPLS_EOS, .frp_sw_if_index = 0xffffffff, diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api index 67f1045d..5973a0a6 100644 --- a/src/vnet/mpls/mpls.api +++ b/src/vnet/mpls/mpls.api @@ -156,7 +156,7 @@ manual_endian manual_print define mpls_tunnel_details @param mr_is_interface_rx - Interface Receive path @param mr_is_interface_rx - RPF-ID Receive path. The next-hop interface is used as the RPF-ID - @param mr_next_hop_proto_is_ip4 - The next-hop is IPV4 + @param mr_next_hop_proto - The next-hop protocol, of type dpo_proto_t @param mr_next_hop_weight - The weight, for UCMP @param mr_next_hop[16] - the nextop address @param mr_next_hop_sw_if_index - the next-hop SW interface @@ -182,7 +182,7 @@ autoreply define mpls_route_add_del u8 mr_is_resolve_attached; u8 mr_is_interface_rx; u8 mr_is_rpf_id; - u8 mr_next_hop_proto_is_ip4; + u8 mr_next_hop_proto; u8 mr_next_hop_weight; u8 mr_next_hop_preference; u8 mr_next_hop[16]; diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c index 068d31f4..266ba42c 100644 --- a/src/vnet/mpls/mpls.c +++ b/src/vnet/mpls/mpls.c @@ -261,7 +261,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index, &rpath.frp_weight)) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } @@ -272,7 +272,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index, &rpath.frp_weight)) { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } @@ -283,7 +283,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "rx-ip4 %U", @@ -291,7 +291,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; rpath.frp_flags = FIB_ROUTE_PATH_INTF_RX; vec_add1(rpaths, rpath); } @@ -302,7 +302,7 @@ vnet_mpls_local_label (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -312,7 +312,7 @@ vnet_mpls_local_label (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U next-hop-table %d", @@ -322,7 +322,7 @@ vnet_mpls_local_label (vlib_main_t * vm, { rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -336,7 +336,7 @@ vnet_mpls_local_label (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); } else if (unformat (line_input, "via %U", @@ -346,7 +346,7 @@ vnet_mpls_local_label (vlib_main_t * vm, rpath.frp_fib_index = table_id; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; vec_add1(rpaths, rpath); } else if (unformat (line_input, "%d", &local_label)) @@ -355,7 +355,7 @@ vnet_mpls_local_label (vlib_main_t * vm, "ip4-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; pfx.fp_payload_proto = DPO_PROTO_IP4; vec_add1(rpaths, rpath); @@ -364,7 +364,7 @@ vnet_mpls_local_label (vlib_main_t * vm, "ip6-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; vec_add1(rpaths, rpath); pfx.fp_payload_proto = DPO_PROTO_IP6; @@ -373,11 +373,21 @@ vnet_mpls_local_label (vlib_main_t * vm, "mpls-lookup-in-table %d", &rpath.frp_fib_index)) { - rpath.frp_proto = FIB_PROTOCOL_MPLS; + rpath.frp_proto = DPO_PROTO_MPLS; rpath.frp_sw_if_index = FIB_NODE_INDEX_INVALID; pfx.fp_payload_proto = DPO_PROTO_MPLS; vec_add1(rpaths, rpath); } + else if (unformat (line_input, + "l2-input-on %U", + unformat_vnet_sw_interface, vnm, + &rpath.frp_sw_if_index)) + { + rpath.frp_proto = DPO_PROTO_ETHERNET; + pfx.fp_payload_proto = DPO_PROTO_ETHERNET; + rpath.frp_flags = FIB_ROUTE_PATH_INTF_RX; + vec_add1(rpaths, rpath); + } else if (unformat (line_input, "out-label %U", unformat_mpls_unicast_label, &out_label)) @@ -440,7 +450,7 @@ vnet_mpls_local_label (vlib_main_t * vm, pfx.fp_proto = FIB_PROTOCOL_MPLS; pfx.fp_len = 21; pfx.fp_label = local_label; - pfx.fp_payload_proto = fib_proto_to_dpo(rpaths[0].frp_proto); + pfx.fp_payload_proto = rpaths[0].frp_proto; /* * the CLI parsing stored table Ids, swap to FIB indicies diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c index 92fb24a6..737299e6 100644 --- a/src/vnet/mpls/mpls_api.c +++ b/src/vnet/mpls/mpls_api.c @@ -144,14 +144,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, }; if (pfx.fp_eos) { - if (mp->mr_next_hop_proto_is_ip4) - { - pfx.fp_payload_proto = DPO_PROTO_IP4; - } - else - { - pfx.fp_payload_proto = DPO_PROTO_IP6; - } + pfx.fp_payload_proto = mp->mr_next_hop_proto; } else { @@ -161,7 +154,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, rv = add_del_route_check (FIB_PROTOCOL_MPLS, mp->mr_table_id, mp->mr_next_hop_sw_if_index, - dpo_proto_to_fib (pfx.fp_payload_proto), + pfx.fp_payload_proto, mp->mr_next_hop_table_id, mp->mr_create_table_if_needed, mp->mr_is_rpf_id, @@ -173,9 +166,9 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, ip46_address_t nh; memset (&nh, 0, sizeof (nh)); - if (mp->mr_next_hop_proto_is_ip4) + if (DPO_PROTO_IP4 == mp->mr_next_hop_proto) memcpy (&nh.ip4, mp->mr_next_hop, sizeof (nh.ip4)); - else + else if (DPO_PROTO_IP6 == mp->mr_next_hop_proto) memcpy (&nh.ip6, mp->mr_next_hop, sizeof (nh.ip6)); n_labels = mp->mr_next_hop_n_out_labels; @@ -202,7 +195,7 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, mp->mr_is_interface_rx, mp->mr_is_rpf_id, fib_index, &pfx, - mp->mr_next_hop_proto_is_ip4, + mp->mr_next_hop_proto, &nh, ntohl (mp->mr_next_hop_sw_if_index), next_hop_fib_index, mp->mr_next_hop_weight, @@ -243,13 +236,13 @@ vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) if (mp->mt_next_hop_proto_is_ip4) { - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; clib_memcpy (&rpath.frp_addr.ip4, mp->mt_next_hop, sizeof (rpath.frp_addr.ip4)); } else { - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; clib_memcpy (&rpath.frp_addr.ip6, mp->mt_next_hop, sizeof (rpath.frp_addr.ip6)); } diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c index c025cc58..6452a60b 100644 --- a/src/vnet/mpls/mpls_tunnel.c +++ b/src/vnet/mpls/mpls_tunnel.c @@ -171,7 +171,7 @@ mpls_tunnel_mk_lb (mpls_tunnel_t *mt, vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list)); vec_reset_length(ctx.next_hops); - lb_proto = vnet_link_to_dpo_proto(linkt); + lb_proto = fib_forw_chain_type_to_dpo_proto(fct); fib_path_list_walk(mt->mt_path_list, mpls_tunnel_collect_forwarding, @@ -313,12 +313,34 @@ mpls_tunnel_restack (mpls_tunnel_t *mt) /* * walk all the adjacencies on the MPLS interface and restack them */ - FOR_EACH_FIB_PROTOCOL(proto) + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) { - adj_nbr_walk(mt->mt_sw_if_index, - proto, - mpls_adj_walk_cb, - NULL); + /* + * Stack a load-balance that drops, whilst we have no paths + */ + vnet_hw_interface_t * hi; + dpo_id_t dpo = DPO_INVALID; + + mpls_tunnel_mk_lb(mt, + VNET_LINK_MPLS, + FIB_FORW_CHAIN_TYPE_ETHERNET, + &dpo); + + hi = vnet_get_hw_interface(vnet_get_main(), mt->mt_hw_if_index); + dpo_stack_from_node(hi->tx_node_index, + &mt->mt_l2_lb, + &dpo); + dpo_reset(&dpo); + } + else + { + FOR_EACH_FIB_PROTOCOL(proto) + { + adj_nbr_walk(mt->mt_sw_if_index, + proto, + mpls_adj_walk_cb, + NULL); + } } } @@ -495,7 +517,7 @@ mpls_tunnel_tx (vlib_main_t * vm, b0 = vlib_get_buffer(vm, bi0); - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj; + vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_lb.dpoi_index; if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) { @@ -506,7 +528,7 @@ mpls_tunnel_tx (vlib_main_t * vm, vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, - bi0, mt->mt_l2_tx_arc); + bi0, mt->mt_l2_lb.dpoi_next_node); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); @@ -565,8 +587,7 @@ vnet_mpls_tunnel_del (u32 sw_if_index) if (FIB_NODE_INDEX_INVALID != mt->mt_path_list) fib_path_list_child_remove(mt->mt_path_list, mt->mt_sibling_index); - if (ADJ_INDEX_INVALID != mt->mt_l2_adj) - adj_unlock(mt->mt_l2_adj); + dpo_reset(&mt->mt_l2_lb); vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index); pool_put(mpls_tunnel_pool, mt); @@ -587,12 +608,13 @@ vnet_mpls_tunnel_create (u8 l2_only, memset (mt, 0, sizeof (*mt)); mti = mt - mpls_tunnel_pool; fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL); - mt->mt_l2_adj = ADJ_INDEX_INVALID; mt->mt_path_list = FIB_NODE_INDEX_INVALID; mt->mt_sibling_index = FIB_NODE_INDEX_INVALID; if (is_multicast) mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST; + if (l2_only) + mt->mt_flags |= MPLS_TUNNEL_FLAG_L2; /* * Create a new, or re=use and old, tunnel HW interface @@ -614,7 +636,7 @@ vnet_mpls_tunnel_create (u8 l2_only, mti, mpls_tunnel_hw_interface_class.index, mti); - hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index); + hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); } /* @@ -624,19 +646,6 @@ vnet_mpls_tunnel_create (u8 l2_only, vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0); mpls_tunnel_db[mt->mt_sw_if_index] = mti; - if (l2_only) - { - mt->mt_l2_adj = - adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list), - VNET_LINK_ETHERNET, - &zero_addr, - mt->mt_sw_if_index); - - mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), - hi->tx_node_index, - "adj-l2-midchain"); - } - return (mt->mt_sw_if_index); } @@ -803,7 +812,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "via %U %U", @@ -813,7 +822,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, &rpath.frp_sw_if_index)) { rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip6_address, @@ -822,7 +831,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, rpath.frp_fib_index = 0; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; + rpath.frp_proto = DPO_PROTO_IP6; } else if (unformat (line_input, "via %U", unformat_ip4_address, @@ -831,7 +840,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, rpath.frp_fib_index = 0; rpath.frp_weight = 1; rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; + rpath.frp_proto = DPO_PROTO_IP4; } else if (unformat (line_input, "l2-only")) l2_only = 1; @@ -915,6 +924,14 @@ format_mpls_tunnel (u8 * s, va_list * args) s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts); s = format(s, "\n"); + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + { + s = format(s, " forwarding: %U\n", + format_fib_forw_chain_type, + FIB_FORW_CHAIN_TYPE_ETHERNET); + s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2); + } + return (s); } diff --git a/src/vnet/mpls/mpls_tunnel.h b/src/vnet/mpls/mpls_tunnel.h index 4cb0a860..285817c3 100644 --- a/src/vnet/mpls/mpls_tunnel.h +++ b/src/vnet/mpls/mpls_tunnel.h @@ -22,15 +22,20 @@ typedef enum mpls_tunnel_attribute_t_ { MPLS_TUNNEL_ATTRIBUTE_FIRST = 0, + /** + * @brief The tunnel is L2 only + */ + MPLS_TUNNEL_ATTRIBUTE_L2 = MPLS_TUNNEL_ATTRIBUTE_FIRST, /** * @brief The tunnel has an underlying multicast LSP */ - MPLS_TUNNEL_ATTRIBUTE_MCAST = MPLS_TUNNEL_ATTRIBUTE_FIRST, + MPLS_TUNNEL_ATTRIBUTE_MCAST, MPLS_TUNNEL_ATTRIBUTE_LAST = MPLS_TUNNEL_ATTRIBUTE_MCAST, } mpls_tunnel_attribute_t; #define MPLS_TUNNEL_ATTRIBUTES { \ [MPLS_TUNNEL_ATTRIBUTE_MCAST] = "multicast", \ + [MPLS_TUNNEL_ATTRIBUTE_L2] = "L2", \ } #define FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(_item) \ for (_item = MPLS_TUNNEL_ATTRIBUTE_FIRST; \ @@ -39,6 +44,7 @@ typedef enum mpls_tunnel_attribute_t_ typedef enum mpls_tunnel_flag_t_ { MPLS_TUNNEL_FLAG_NONE = 0, + MPLS_TUNNEL_FLAG_L2 = (1 << MPLS_TUNNEL_ATTRIBUTE_L2), MPLS_TUNNEL_FLAG_MCAST = (1 << MPLS_TUNNEL_ATTRIBUTE_MCAST), } __attribute__ ((packed)) mpls_tunnel_flags_t; @@ -60,14 +66,19 @@ typedef struct mpls_tunnel_t_ /** * @brief If the tunnel is an L2 tunnel, this is the link type ETHERNET - * adjacency + * load-balance + */ + dpo_id_t mt_l2_lb; + + /** + * @brief The HW interface index of the tunnel interfaces */ - adj_index_t mt_l2_adj; + u32 mt_hw_if_index; /** - * @brief on a L2 tunnel this is the VLIB arc from the L2-tx to the l2-midchain + * @brief The SW interface index of the tunnel interfaces */ - u32 mt_l2_tx_arc; + u32 mt_sw_if_index; /** * @brief The path-list over which the tunnel's destination is reachable @@ -83,23 +94,6 @@ typedef struct mpls_tunnel_t_ * A vector of path extensions o hold the label stack for each path */ fib_path_ext_list_t mt_path_exts; - - /** - * @brief Flag to indicate the tunnel is only for L2 traffic, that is - * this tunnel belongs in a bridge domain. - */ - u8 mt_l2_only; - - /** - * @brief The HW interface index of the tunnel interfaces - */ - u32 mt_hw_if_index; - - /** - * @brief The SW interface index of the tunnel interfaces - */ - u32 mt_sw_if_index; - } mpls_tunnel_t; /** diff --git a/src/vnet/srmpls/sr_mpls_policy.c b/src/vnet/srmpls/sr_mpls_policy.c index 5ebbc60d..db4ad2a7 100755 --- a/src/vnet/srmpls/sr_mpls_policy.c +++ b/src/vnet/srmpls/sr_mpls_policy.c @@ -75,7 +75,7 @@ create_sl (mpls_sr_policy_t * sr_policy, mpls_label_t * sl, u32 weight) segment_list->segments = vec_dup (sl); fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, @@ -203,7 +203,7 @@ sr_mpls_policy_del (mpls_label_t bsid, u32 index) segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, @@ -308,7 +308,7 @@ sr_mpls_policy_mod (mpls_label_t bsid, u32 index, u8 operation, mpls_eos_bit_t eos; fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_sw_if_index = ~0, .frp_fib_index = 0, .frp_weight = segment_list->weight, diff --git a/src/vnet/srmpls/sr_mpls_steering.c b/src/vnet/srmpls/sr_mpls_steering.c index 37707049..3a9aea2d 100755 --- a/src/vnet/srmpls/sr_mpls_steering.c +++ b/src/vnet/srmpls/sr_mpls_steering.c @@ -218,7 +218,7 @@ sr_mpls_steering_policy (int is_del, mpls_label_t bsid, u32 sr_policy_index, update_fib:; fib_route_path_t path = { - .frp_proto = FIB_PROTOCOL_MPLS, + .frp_proto = DPO_PROTO_MPLS, .frp_local_label = sr_policy->bsid, .frp_eos = MPLS_EOS, .frp_sw_if_index = ~0, diff --git a/src/vnet/srv6/sr_steering.c b/src/vnet/srv6/sr_steering.c index a7903751..704adaa7 100755 --- a/src/vnet/srv6/sr_steering.c +++ b/src/vnet/srv6/sr_steering.c @@ -310,7 +310,7 @@ update_fib: table_id : 0)), &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, (ip46_address_t *) & sr_policy->bsid, ~0, sm->fib_table_ip6, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); @@ -327,7 +327,7 @@ update_fib: table_id : 0)), &pfx, FIB_SOURCE_SR, FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT, - FIB_PROTOCOL_IP6, + DPO_PROTO_IP6, (ip46_address_t *) & sr_policy->bsid, ~0, sm->fib_table_ip4, 1, NULL, FIB_ROUTE_PATH_FLAG_NONE); diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c index 97bb1b15..462c79a0 100644 --- a/src/vnet/vxlan-gpe/vxlan_gpe.c +++ b/src/vnet/vxlan-gpe/vxlan_gpe.c @@ -638,7 +638,7 @@ int vnet_vxlan_gpe_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo(fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c index 1b3df2a8..dc973372 100644 --- a/src/vnet/vxlan/vxlan.c +++ b/src/vnet/vxlan/vxlan.c @@ -505,7 +505,7 @@ int vnet_vxlan_add_del_tunnel fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { - .frp_proto = fp, + .frp_proto = fib_proto_to_dpo(fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, diff --git a/src/vpp/app/vpe_cli.c b/src/vpp/app/vpe_cli.c index 94bdc84c..fcc496ad 100644 --- a/src/vpp/app/vpe_cli.c +++ b/src/vpp/app/vpe_cli.c @@ -98,7 +98,7 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, vec_add2 (rpaths, rpath, 1); - rpath->frp_proto = FIB_PROTOCOL_IP4; + rpath->frp_proto = DPO_PROTO_IP4; rpath->frp_addr = next_hops[i]; rpath->frp_sw_if_index = sw_if_index; rpath->frp_fib_index = ~0; diff --git a/test/test_bfd.py b/test/test_bfd.py index be42cdad..4cb6d379 100644 --- a/test/test_bfd.py +++ b/test/test_bfd.py @@ -20,7 +20,7 @@ from vpp_pg_interface import CaptureTimeoutError, is_ipv6_misc from vpp_lo_interface import VppLoInterface from util import ppp from vpp_papi_provider import UnexpectedApiReturnValueError -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto USEC_IN_SEC = 1000000 @@ -1678,12 +1678,12 @@ class BFDFIBTestCase(VppTestCase): ip_2001_s_64 = VppIpRoute(self, "2001::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DPO_PROTO_IP6)], is_ip6=1) ip_2002_s_64 = VppIpRoute(self, "2002::", 64, [VppRoutePath(self.pg0.remote_ip6, 0xffffffff, - is_ip6=1)], + proto=DPO_PROTO_IP6)], is_ip6=1) ip_2001_s_64.add_vpp_config() ip_2002_s_64.add_vpp_config() diff --git a/test/test_gre.py b/test/test_gre.py index 18b67dbd..1afc44fb 100644 --- a/test/test_gre.py +++ b/test/test_gre.py @@ -6,7 +6,7 @@ from logging import * from framework import VppTestCase, VppTestRunner from vpp_sub_interface import VppDot1QSubint from vpp_gre_interface import VppGreInterface, VppGre6Interface -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from vpp_papi_provider import L2_VTR_OP from scapy.packet import Raw @@ -516,11 +516,12 @@ class TestGRE(VppTestCase): gre_if.admin_up() gre_if.config_ip6() - route_via_tun = VppIpRoute(self, "4004::1", 128, - [VppRoutePath("0::0", - gre_if.sw_if_index, - is_ip6=1)], - is_ip6=1) + route_via_tun = VppIpRoute( + self, "4004::1", 128, + [VppRoutePath("0::0", + gre_if.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) route_via_tun.add_vpp_config() @@ -542,11 +543,12 @@ class TestGRE(VppTestCase): # # Add a route that resolves the tunnel's destination # - route_tun_dst = VppIpRoute(self, "1002::1", 128, - [VppRoutePath(self.pg2.remote_ip6, - self.pg2.sw_if_index, - is_ip6=1)], - is_ip6=1) + route_tun_dst = VppIpRoute( + self, "1002::1", 128, + [VppRoutePath(self.pg2.remote_ip6, + self.pg2.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) route_tun_dst.add_vpp_config() # diff --git a/test/test_ip6.py b/test/test_ip6.py index 593f6868..285ce181 100644 --- a/test/test_ip6.py +++ b/test/test_ip6.py @@ -8,7 +8,7 @@ from vpp_sub_interface import VppSubInterface, VppDot1QSubint from vpp_pg_interface import is_ipv6_misc from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \ VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \ - VppMplsRoute + VppMplsRoute, DpoProto from vpp_neighbor import find_nbr, VppNeighbor from scapy.packet import Raw @@ -490,7 +490,7 @@ class TestIPv6(TestIPv6ND): inet=AF_INET6)) def test_ns_duplicates(self): - """ ARP Duplicates""" + """ ND Duplicates""" # # Generate some hosts on the LAN @@ -537,7 +537,7 @@ class TestIPv6(TestIPv6ND): # # remove the duplicate on pg1 - # packet stream shoud generate ARPs out of pg1 + # packet stream shoud generate NSs out of pg1 # ns_pg1.remove_vpp_config() @@ -1347,10 +1347,10 @@ class TestIP6LoadBalance(VppTestCase): route_3000_1 = VppIpRoute(self, "3000::1", 128, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3000_1.add_vpp_config() @@ -1367,11 +1367,11 @@ class TestIP6LoadBalance(VppTestCase): [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, labels=[67], - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg2.remote_ip6, self.pg2.sw_if_index, labels=[67], - is_ip6=1)]) + proto=DpoProto.DPO_PROTO_IP6)]) route_67.add_vpp_config() # @@ -1441,20 +1441,20 @@ class TestIP6LoadBalance(VppTestCase): route_3000_2 = VppIpRoute(self, "3000::2", 128, [VppRoutePath(self.pg3.remote_ip6, self.pg3.sw_if_index, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath(self.pg4.remote_ip6, self.pg4.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3000_2.add_vpp_config() route_4000_1 = VppIpRoute(self, "4000::1", 128, [VppRoutePath("3000::1", 0xffffffff, - is_ip6=1), + proto=DpoProto.DPO_PROTO_IP6), VppRoutePath("3000::2", 0xffffffff, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_4000_1.add_vpp_config() @@ -1485,14 +1485,14 @@ class TestIP6LoadBalance(VppTestCase): route_5000_2 = VppIpRoute(self, "5000::2", 128, [VppRoutePath(self.pg3.remote_ip6, self.pg3.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_5000_2.add_vpp_config() route_6000_1 = VppIpRoute(self, "6000::1", 128, [VppRoutePath("5000::2", 0xffffffff, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_6000_1.add_vpp_config() diff --git a/test/test_map.py b/test/test_map.py index 9ac3948a..bbf4aec2 100644 --- a/test/test_map.py +++ b/test/test_map.py @@ -4,7 +4,7 @@ import unittest import socket from framework import VppTestCase, VppTestRunner -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from scapy.layers.l2 import Ether, Raw from scapy.layers.inet import IP, UDP, ICMP @@ -75,7 +75,7 @@ class TestMAP(VppTestCase): map_br_pfx_len, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) map_route.add_vpp_config() @@ -138,13 +138,12 @@ class TestMAP(VppTestCase): # Add a route to 4001::1. Expect the encapped traffic to be # sent via that routes next-hop # - pre_res_route = VppIpRoute(self, - "4001::1", - 128, - [VppRoutePath(self.pg1.remote_hosts[2].ip6, - self.pg1.sw_if_index, - is_ip6=1)], - is_ip6=1) + pre_res_route = VppIpRoute( + self, "4001::1", 128, + [VppRoutePath(self.pg1.remote_hosts[2].ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) pre_res_route.add_vpp_config() self.send_and_assert_encapped(v4, map_src, @@ -156,7 +155,7 @@ class TestMAP(VppTestCase): # pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6, self.pg1.sw_if_index, - is_ip6=1)]) + proto=DpoProto.DPO_PROTO_IP6)]) pre_res_route.add_vpp_config() self.send_and_assert_encapped(v4, map_src, diff --git a/test/test_mpls.py b/test/test_mpls.py index e3d013af..b2226a74 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -6,7 +6,7 @@ import socket from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ - MRouteItfFlags, MRouteEntryFlags + MRouteItfFlags, MRouteEntryFlags, DpoProto from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface from scapy.packet import Raw @@ -16,6 +16,38 @@ from scapy.layers.inet6 import IPv6 from scapy.contrib.mpls import MPLS +def verify_filter(capture, sent): + if not len(capture) == len(sent): + # filter out any IPv6 RAs from the capture + for p in capture: + if p.haslayer(IPv6): + capture.remove(p) + return capture + + +def verify_mpls_stack(tst, rx, mpls_labels, ttl=255, num=0): + # the rx'd packet has the MPLS label popped + eth = rx[Ether] + tst.assertEqual(eth.type, 0x8847) + + rx_mpls = rx[MPLS] + + for ii in range(len(mpls_labels)): + tst.assertEqual(rx_mpls.label, mpls_labels[ii]) + tst.assertEqual(rx_mpls.cos, 0) + if ii == num: + tst.assertEqual(rx_mpls.ttl, ttl) + else: + tst.assertEqual(rx_mpls.ttl, 255) + if ii == len(mpls_labels) - 1: + tst.assertEqual(rx_mpls.s, 1) + else: + # not end of stack + tst.assertEqual(rx_mpls.s, 0) + # pop the label to expose the next + rx_mpls = rx_mpls[MPLS].payload + + class TestMPLS(VppTestCase): """ MPLS Test Case """ @@ -120,18 +152,9 @@ class TestMPLS(VppTestCase): pkts.append(p) return pkts - @staticmethod - def verify_filter(capture, sent): - if not len(capture) == len(sent): - # filter out any IPv6 RAs from the capture - for p in capture: - if p.haslayer(IPv6): - capture.remove(p) - return capture - def verify_capture_ip4(self, src_if, capture, sent, ping_resp=0): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -158,33 +181,10 @@ class TestMPLS(VppTestCase): except: raise - def verify_mpls_stack(self, rx, mpls_labels, ttl=255, num=0): - # the rx'd packet has the MPLS label popped - eth = rx[Ether] - self.assertEqual(eth.type, 0x8847) - - rx_mpls = rx[MPLS] - - for ii in range(len(mpls_labels)): - self.assertEqual(rx_mpls.label, mpls_labels[ii]) - self.assertEqual(rx_mpls.cos, 0) - if ii == num: - self.assertEqual(rx_mpls.ttl, ttl) - else: - self.assertEqual(rx_mpls.ttl, 255) - - if ii == len(mpls_labels) - 1: - self.assertEqual(rx_mpls.s, 1) - else: - # not end of stack - self.assertEqual(rx_mpls.s, 0) - # pop the label to expose the next - rx_mpls = rx_mpls[MPLS].payload - def verify_capture_labelled_ip4(self, src_if, capture, sent, mpls_labels): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -195,8 +195,8 @@ class TestMPLS(VppTestCase): rx_ip = rx[IP] # the MPLS TTL is copied from the IP - self.verify_mpls_stack( - rx, mpls_labels, rx_ip.ttl, len(mpls_labels) - 1) + verify_mpls_stack(self, rx, mpls_labels, rx_ip.ttl, + len(mpls_labels) - 1) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) @@ -211,7 +211,7 @@ class TestMPLS(VppTestCase): if top is None: top = len(mpls_labels) - 1 try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) @@ -222,8 +222,7 @@ class TestMPLS(VppTestCase): rx_ip = rx[IP] # the MPLS TTL is 255 since it enters a new tunnel - self.verify_mpls_stack( - rx, mpls_labels, ttl, top) + verify_mpls_stack(self, rx, mpls_labels, ttl, top) self.assertEqual(rx_ip.src, tx_ip.src) self.assertEqual(rx_ip.dst, tx_ip.dst) @@ -236,13 +235,13 @@ class TestMPLS(VppTestCase): def verify_capture_labelled(self, src_if, capture, sent, mpls_labels, ttl=254, num=0): try: - capture = self.verify_filter(capture, sent) + capture = verify_filter(capture, sent) self.assertEqual(len(capture), len(sent)) for i in range(len(capture)): rx = capture[i] - self.verify_mpls_stack(rx, mpls_labels, ttl, num) + verify_mpls_stack(self, rx, mpls_labels, ttl, num) except: raise @@ -1049,7 +1048,7 @@ class TestMPLS(VppTestCase): self.pg1.sw_if_index, nh_table_id=1, rpf_id=55, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_multicast=1) route_34_eos.add_vpp_config() @@ -1440,19 +1439,20 @@ class TestMPLSPIC(VppTestCase): for ii in range(64): dst = "3000::%d" % ii local_label = 1600 + ii - vpn_routes.append(VppIpRoute(self, dst, 128, - [VppRoutePath(self.pg2.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_resolve_attached=1, - is_ip6=1), - VppRoutePath(self.pg3.remote_ip6, - 0xffffffff, - nh_table_id=1, - is_ip6=1, - is_resolve_attached=1)], - table_id=1, - is_ip6=1)) + vpn_routes.append(VppIpRoute( + self, dst, 128, + [VppRoutePath(self.pg2.remote_ip6, + 0xffffffff, + nh_table_id=1, + is_resolve_attached=1, + proto=DpoProto.DPO_PROTO_IP6), + VppRoutePath(self.pg3.remote_ip6, + 0xffffffff, + nh_table_id=1, + proto=DpoProto.DPO_PROTO_IP6, + is_resolve_attached=1)], + table_id=1, + is_ip6=1)) vpn_routes[ii].add_vpp_config() vpn_bindings.append(VppMplsIpBind(self, local_label, dst, 128, @@ -1525,5 +1525,211 @@ class TestMPLSPIC(VppTestCase): self.assertNotEqual(0, len(rx1)) +class TestMPLSL2(VppTestCase): + """ MPLS-L2 """ + + def setUp(self): + super(TestMPLSL2, self).setUp() + + # create 2 pg interfaces + self.create_pg_interfaces(range(2)) + + # use pg0 as the core facing interface + self.pg0.admin_up() + self.pg0.config_ip4() + self.pg0.resolve_arp() + self.pg0.enable_mpls() + + # use the other 2 for customer facg L2 links + for i in self.pg_interfaces[1:]: + i.admin_up() + + def tearDown(self): + super(TestMPLSL2, self).tearDown() + for i in self.pg_interfaces[1:]: + i.admin_down() + + self.pg0.disable_mpls() + self.pg0.unconfig_ip4() + self.pg0.admin_down() + + def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels, + ttl=255, top=None): + if top is None: + top = len(mpls_labels) - 1 + + capture = verify_filter(capture, sent) + + self.assertEqual(len(capture), len(sent)) + + for i in range(len(capture)): + tx = sent[i] + rx = capture[i] + + # the MPLS TTL is 255 since it enters a new tunnel + verify_mpls_stack(self, rx, mpls_labels, ttl, top) + + tx_eth = tx[Ether] + rx_eth = Ether(str(rx[MPLS].payload)) + + self.assertEqual(rx_eth.src, tx_eth.src) + self.assertEqual(rx_eth.dst, tx_eth.dst) + + def test_vpws(self): + """ Virtual Private Wire Service """ + + # + # Create an MPLS tunnel that pushes 1 label + # + mpls_tun_1 = VppMPLSTunnelInterface(self, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[42])], + is_l2=1) + mpls_tun_1.add_vpp_config() + mpls_tun_1.admin_up() + + # + # Create a label entry to for 55 that does L2 input to the tunnel + # + route_55_eos = VppMplsRoute( + self, 55, 1, + [VppRoutePath("0.0.0.0", + mpls_tun_1.sw_if_index, + is_interface_rx=1, + proto=DpoProto.DPO_PROTO_ETHERNET)]) + route_55_eos.add_vpp_config() + + # + # Cross-connect the tunnel with one of the customers L2 interfaces + # + self.vapi.sw_interface_set_l2_xconnect(self.pg1.sw_if_index, + mpls_tun_1.sw_if_index, + enable=1) + self.vapi.sw_interface_set_l2_xconnect(mpls_tun_1.sw_if_index, + self.pg1.sw_if_index, + enable=1) + + # + # inject a packet from the core + # + pcore = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=55, ttl=64) / + Ether(dst="00:00:de:ad:ba:be", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + self.pg0.add_stream(pcore * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg1.get_capture(65) + tx = pcore[MPLS].payload + + self.assertEqual(rx0[0][Ether].dst, tx[Ether].dst) + self.assertEqual(rx0[0][Ether].src, tx[Ether].src) + + # + # Inject a packet from the custoer/L2 side + # + self.pg1.add_stream(tx * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(65) + + self.verify_capture_tunneled_ethernet(rx0, tx*65, [42]) + + def test_vpls(self): + """ Virtual Private LAN Service """ + # + # Create an L2 MPLS tunnel + # + mpls_tun = VppMPLSTunnelInterface(self, + [VppRoutePath(self.pg0.remote_ip4, + self.pg0.sw_if_index, + labels=[42])], + is_l2=1) + mpls_tun.add_vpp_config() + mpls_tun.admin_up() + + # + # Create a label entry to for 55 that does L2 input to the tunnel + # + route_55_eos = VppMplsRoute( + self, 55, 1, + [VppRoutePath("0.0.0.0", + mpls_tun.sw_if_index, + is_interface_rx=1, + proto=DpoProto.DPO_PROTO_ETHERNET)]) + route_55_eos.add_vpp_config() + + # + # add to tunnel to the customers bridge-domain + # + self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, + bd_id=1) + self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, + bd_id=1) + + # + # Packet from the customer interface and from the core + # + p_cust = (Ether(dst="00:00:de:ad:ba:be", + src="00:00:de:ad:be:ef") / + IP(src="10.10.10.10", dst="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + p_core = (Ether(src="00:00:de:ad:ba:be", + dst="00:00:de:ad:be:ef") / + IP(dst="10.10.10.10", src="11.11.11.11") / + UDP(sport=1234, dport=1234) / + Raw('\xa5' * 100)) + + # + # The BD is learning, so send in one of each packet to learn + # + p_core_encap = (Ether(dst=self.pg0.local_mac, + src=self.pg0.remote_mac) / + MPLS(label=55, ttl=64) / + p_core) + + self.pg1.add_stream(p_cust) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + self.pg0.add_stream(p_core_encap) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + # we've learnt this so expect it be be forwarded + rx0 = self.pg1.get_capture(1) + + self.assertEqual(rx0[0][Ether].dst, p_core[Ether].dst) + self.assertEqual(rx0[0][Ether].src, p_core[Ether].src) + + # + # now a stream in each direction + # + self.pg1.add_stream(p_cust * 65) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx0 = self.pg0.get_capture(65) + + self.verify_capture_tunneled_ethernet(rx0, p_cust*65, [42]) + + # + # remove interfaces from customers bridge-domain + # + self.vapi.sw_interface_set_l2_bridge(mpls_tun.sw_if_index, + bd_id=1, + enable=0) + self.vapi.sw_interface_set_l2_bridge(self.pg1.sw_if_index, + bd_id=1, + enable=0) + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/test_p2p_ethernet.py b/test/test_p2p_ethernet.py index 37a1d18b..8688f7e6 100644 --- a/test/test_p2p_ethernet.py +++ b/test/test_p2p_ethernet.py @@ -11,7 +11,7 @@ from scapy.layers.inet6 import IPv6 from framework import VppTestCase, VppTestRunner, running_extended_tests from vpp_sub_interface import VppP2PSubint -from vpp_ip_route import VppIpRoute, VppRoutePath +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto from util import mactobinary @@ -219,7 +219,7 @@ class P2PEthernetIPV6(VppTestCase): route_8000 = VppIpRoute(self, "8000::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8000.add_vpp_config() @@ -239,7 +239,7 @@ class P2PEthernetIPV6(VppTestCase): route_9001 = VppIpRoute(self, "9001::", 64, [VppRoutePath(self.pg1.remote_ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_9001.add_vpp_config() @@ -264,7 +264,7 @@ class P2PEthernetIPV6(VppTestCase): route_3 = VppIpRoute(self, "9000::", 64, [VppRoutePath(self.pg1._remote_hosts[0].ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_3.add_vpp_config() @@ -289,7 +289,7 @@ class P2PEthernetIPV6(VppTestCase): route_9001 = VppIpRoute(self, "9000::", 64, [VppRoutePath(self.pg1._remote_hosts[0].ip6, self.pg1.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_9001.add_vpp_config() @@ -310,19 +310,19 @@ class P2PEthernetIPV6(VppTestCase): route_8000 = VppIpRoute(self, "8000::", 64, [VppRoutePath(self.pg0.remote_ip6, self.pg0.sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8000.add_vpp_config() route_8001 = VppIpRoute(self, "8001::", 64, [VppRoutePath(self.p2p_sub_ifs[0].remote_ip6, self.p2p_sub_ifs[0].sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8001.add_vpp_config() route_8002 = VppIpRoute(self, "8002::", 64, [VppRoutePath(self.p2p_sub_ifs[1].remote_ip6, self.p2p_sub_ifs[1].sw_if_index, - is_ip6=1)], + proto=DpoProto.DPO_PROTO_IP6)], is_ip6=1) route_8002.add_vpp_config() diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py index badb3102..2c489e3c 100644 --- a/test/vpp_ip_route.py +++ b/test/vpp_ip_route.py @@ -29,6 +29,14 @@ class MRouteEntryFlags: MFIB_ENTRY_FLAG_INHERIT_ACCEPT = 8 +class DpoProto: + DPO_PROTO_IP4 = 0 + DPO_PROTO_IP6 = 1 + DPO_PROTO_MPLS = 2 + DPO_PROTO_ETHERNET = 3 + DPO_PROTO_NSH = 4 + + def find_route(test, ip_addr, len, table_id=0, inet=AF_INET): if inet == AF_INET: s = 4 @@ -55,22 +63,24 @@ class VppRoutePath(object): nh_table_id=0, labels=[], nh_via_label=MPLS_LABEL_INVALID, - is_ip6=0, rpf_id=0, is_interface_rx=0, is_resolve_host=0, - is_resolve_attached=0): + is_resolve_attached=0, + proto=DpoProto.DPO_PROTO_IP4): self.nh_itf = nh_sw_if_index self.nh_table_id = nh_table_id self.nh_via_label = nh_via_label self.nh_labels = labels self.weight = 1 self.rpf_id = rpf_id - self.is_ip4 = 1 if is_ip6 == 0 else 0 - if self.is_ip4: + self.proto = proto + if self.proto is DpoProto.DPO_PROTO_IP6: + self.nh_addr = inet_pton(AF_INET6, nh_addr) + elif self.proto is DpoProto.DPO_PROTO_IP4: self.nh_addr = inet_pton(AF_INET, nh_addr) else: - self.nh_addr = inet_pton(AF_INET6, nh_addr) + self.nh_addr = inet_pton(AF_INET6, "::") self.is_resolve_host = is_resolve_host self.is_resolve_attached = is_resolve_attached self.is_interface_rx = is_interface_rx @@ -401,7 +411,7 @@ class VppMplsRoute(VppObject): self._test.vapi.mpls_route_add_del( self.local_label, self.eos_bit, - path.is_ip4, + path.proto, path.nh_addr, path.nh_itf, is_multicast=self.is_multicast, @@ -420,7 +430,7 @@ class VppMplsRoute(VppObject): for path in self.paths: self._test.vapi.mpls_route_add_del(self.local_label, self.eos_bit, - 1, + path.proto, path.nh_addr, path.nh_itf, is_rpf_id=path.is_rpf_id, diff --git a/test/vpp_mpls_tunnel_interface.py b/test/vpp_mpls_tunnel_interface.py index f2001574..0542b05c 100644 --- a/test/vpp_mpls_tunnel_interface.py +++ b/test/vpp_mpls_tunnel_interface.py @@ -9,13 +9,14 @@ class VppMPLSTunnelInterface(VppInterface): VPP MPLS Tunnel interface """ - def __init__(self, test, paths, is_multicast=0): + def __init__(self, test, paths, is_multicast=0, is_l2=0): """ Create MPLS Tunnel interface """ self._sw_if_index = 0 super(VppMPLSTunnelInterface, self).__init__(test) self._test = test self.t_paths = paths self.is_multicast = is_multicast + self.is_l2 = is_l2 def add_vpp_config(self): self._sw_if_index = 0xffffffff @@ -29,7 +30,8 @@ class VppMPLSTunnelInterface(VppInterface): path.weight, next_hop_out_label_stack=path.nh_labels, next_hop_n_out_labels=len(path.nh_labels), - is_multicast=self.is_multicast) + is_multicast=self.is_multicast, + l2_only=self.is_l2) self._sw_if_index = reply.sw_if_index def remove_vpp_config(self): diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 801a6c2d..3ba2ad4a 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -921,7 +921,7 @@ class VppPapiProvider(object): self, label, eos, - next_hop_proto_is_ip4, + next_hop_proto, next_hop_address, next_hop_sw_if_index=0xFFFFFFFF, table_id=0, @@ -982,7 +982,7 @@ class VppPapiProvider(object): 'mr_is_resolve_attached': is_resolve_attached, 'mr_is_interface_rx': is_interface_rx, 'mr_is_rpf_id': is_rpf_id, - 'mr_next_hop_proto_is_ip4': next_hop_proto_is_ip4, + 'mr_next_hop_proto': next_hop_proto, 'mr_next_hop_weight': next_hop_weight, 'mr_next_hop': next_hop_address, 'mr_next_hop_n_out_labels': next_hop_n_out_labels, -- cgit 1.2.3-korg From 910744394f2529ecac0fa91a16237777e023c5e6 Mon Sep 17 00:00:00 2001 From: Kris Michielsen Date: Thu, 22 Jun 2017 13:00:20 +0200 Subject: SRv6 tests Change-Id: Ib1d2fc5a83d9d007a0468591a73881675f1bec9b Signed-off-by: Kris Michielsen --- src/vnet/srv6/sr_localsid.c | 225 +++- src/vnet/srv6/sr_policy_rewrite.c | 6 +- src/vnet/srv6/sr_steering.c | 2 +- test/patches/scapy-2.3.3/inet6.py.patch | 185 +++ test/test_srv6.py | 1997 +++++++++++++++++++++++++++++++ test/vpp_papi_provider.py | 118 ++ test/vpp_srv6.py | 238 ++++ 7 files changed, 2739 insertions(+), 32 deletions(-) create mode 100644 test/patches/scapy-2.3.3/inet6.py.patch create mode 100644 test/test_srv6.py create mode 100644 test/vpp_srv6.py (limited to 'src/vnet/srv6') diff --git a/src/vnet/srv6/sr_localsid.c b/src/vnet/srv6/sr_localsid.c index adeb5c03..1be68334 100755 --- a/src/vnet/srv6/sr_localsid.c +++ b/src/vnet/srv6/sr_localsid.c @@ -587,12 +587,11 @@ VLIB_CLI_COMMAND (clear_sr_localsid_counters_command, static) = { */ typedef struct { - u32 localsid_index; - ip6_address_t src, out_dst; + ip6_address_t localsid; + u16 behavior; u8 sr[256]; u8 num_segments; u8 segments_left; - //With SRv6 header update include flags here. } sr_localsid_trace_t; #define foreach_sr_localsid_error \ @@ -643,16 +642,12 @@ format_sr_localsid_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - ip6_sr_main_t *sm = &sr_main; sr_localsid_trace_t *t = va_arg (*args, sr_localsid_trace_t *); - ip6_sr_localsid_t *ls = - pool_elt_at_index (sm->localsids, t->localsid_index); - s = format (s, "SR-LOCALSID:\n\tLocalsid: %U\n", format_ip6_address, - &ls->localsid); - switch (ls->behavior) + &t->localsid); + switch (t->behavior) { case SR_BEHAVIOR_END: s = format (s, "\tBehavior: End\n"); @@ -686,7 +681,7 @@ format_sr_localsid_trace (u8 * s, va_list * args) { if (t->num_segments > 0) { - s = format (s, "\tSegments left: %d\n", t->num_segments); + s = format (s, "\tSegments left: %d\n", t->segments_left); s = format (s, "\tSID list: [in ietf order]"); int i = 0; for (i = 0; i < t->num_segments; i++) @@ -882,7 +877,7 @@ end_decaps_srh_processing (vlib_node_runtime_t * node, } /** - * @brief SR LocalSID graph node. Supports all default SR Endpoint variants + * @brief SR LocalSID graph node. Supports all default SR Endpoint variants with decaps */ static uword sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -975,7 +970,97 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, end_decaps_srh_processing (node, b2, ip2, sr2, ls2, &next2); end_decaps_srh_processing (node, b3, ip3, sr3, ls3, &next3); - //TODO: trace. + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls0->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls0->behavior; + if (ip0 == vlib_buffer_get_current (b0)) + { + if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr0->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr0->segments, sr0->length * 8); + tr->num_segments = + sr0->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr0->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls1->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls1->behavior; + if (ip1 == vlib_buffer_get_current (b1)) + { + if (ip1->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr1->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr1->segments, sr1->length * 8); + tr->num_segments = + sr1->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr1->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls2->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls2->behavior; + if (ip2 == vlib_buffer_get_current (b2)) + { + if (ip2->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr2->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr2->segments, sr2->length * 8); + tr->num_segments = + sr2->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr2->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls3->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls3->behavior; + if (ip3 == vlib_buffer_get_current (b3)) + { + if (ip3->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr3->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr3->segments, sr3->length * 8); + tr->num_segments = + sr3->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr3->segments_left; + } + } + else + tr->num_segments = 0xFF; + } vlib_increment_combined_counter (((next0 == @@ -1043,14 +1128,11 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, sr_localsid_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->num_segments = 0; - tr->localsid_index = ls0 - sm->localsids; - + clib_memcpy (tr->localsid.as_u8, ls0->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls0->behavior; if (ip0 == vlib_buffer_get_current (b0)) { - clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, - sizeof (tr->src.as_u8)); - clib_memcpy (tr->out_dst.as_u8, ip0->dst_address.as_u8, - sizeof (tr->out_dst.as_u8)); if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE && sr0->type == ROUTING_HEADER_TYPE_SR) { @@ -1098,7 +1180,7 @@ VLIB_REGISTER_NODE (sr_localsid_d_node) = { /* *INDENT-ON* */ /** - * @brief SR LocalSID graph node. Supports all default SR Endpoint variants + * @brief SR LocalSID graph node. Supports all default SR Endpoint without decaps */ static uword sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -1195,7 +1277,97 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, end_srh_processing (node, b3, ip3, sr3, ls3, &next3, ls3->end_psp, prev3); - //TODO: proper trace. + if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls0->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls0->behavior; + if (ip0 == vlib_buffer_get_current (b0)) + { + if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr0->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr0->segments, sr0->length * 8); + tr->num_segments = + sr0->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr0->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b1, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls1->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls1->behavior; + if (ip1 == vlib_buffer_get_current (b1)) + { + if (ip1->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr1->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr1->segments, sr1->length * 8); + tr->num_segments = + sr1->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr1->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b2->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b2, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls2->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls2->behavior; + if (ip2 == vlib_buffer_get_current (b2)) + { + if (ip2->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr2->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr2->segments, sr2->length * 8); + tr->num_segments = + sr2->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr2->segments_left; + } + } + else + tr->num_segments = 0xFF; + } + + if (PREDICT_FALSE (b3->flags & VLIB_BUFFER_IS_TRACED)) + { + sr_localsid_trace_t *tr = + vlib_add_trace (vm, node, b3, sizeof (*tr)); + tr->num_segments = 0; + clib_memcpy (tr->localsid.as_u8, ls3->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls3->behavior; + if (ip3 == vlib_buffer_get_current (b3)) + { + if (ip3->protocol == IP_PROTOCOL_IPV6_ROUTE + && sr3->type == ROUTING_HEADER_TYPE_SR) + { + clib_memcpy (tr->sr, sr3->segments, sr3->length * 8); + tr->num_segments = + sr3->length * 8 / sizeof (ip6_address_t); + tr->segments_left = sr3->segments_left; + } + } + else + tr->num_segments = 0xFF; + } vlib_increment_combined_counter (((next0 == @@ -1262,14 +1434,11 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, sr_localsid_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr)); tr->num_segments = 0; - tr->localsid_index = ls0 - sm->localsids; - + clib_memcpy (tr->localsid.as_u8, ls0->localsid.as_u8, + sizeof (tr->localsid.as_u8)); + tr->behavior = ls0->behavior; if (ip0 == vlib_buffer_get_current (b0)) { - clib_memcpy (tr->src.as_u8, ip0->src_address.as_u8, - sizeof (tr->src.as_u8)); - clib_memcpy (tr->out_dst.as_u8, ip0->dst_address.as_u8, - sizeof (tr->out_dst.as_u8)); if (ip0->protocol == IP_PROTOCOL_IPV6_ROUTE && sr0->type == ROUTING_HEADER_TYPE_SR) { @@ -1280,9 +1449,7 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, } } else - { - tr->num_segments = 0xFF; - } + tr->num_segments = 0xFF; } vlib_increment_combined_counter diff --git a/src/vnet/srv6/sr_policy_rewrite.c b/src/vnet/srv6/sr_policy_rewrite.c index 7a37a66b..f427bbf3 100755 --- a/src/vnet/srv6/sr_policy_rewrite.c +++ b/src/vnet/srv6/sr_policy_rewrite.c @@ -672,7 +672,8 @@ sr_policy_del (ip6_address_t * bsid, u32 index) segment_list = pool_elt_at_index (sm->sid_lists, *sl_index); vec_free (segment_list->segments); vec_free (segment_list->rewrite); - vec_free (segment_list->rewrite_bsid); + if (!sr_policy->is_encap) + vec_free (segment_list->rewrite_bsid); pool_put_index (sm->sid_lists, *sl_index); } @@ -766,7 +767,8 @@ sr_policy_mod (ip6_address_t * bsid, u32 index, u32 fib_table, segment_list = pool_elt_at_index (sm->sid_lists, sl_index); vec_free (segment_list->segments); vec_free (segment_list->rewrite); - vec_free (segment_list->rewrite_bsid); + if (!sr_policy->is_encap) + vec_free (segment_list->rewrite_bsid); pool_put_index (sm->sid_lists, sl_index); vec_del1 (sr_policy->segments_lists, sl_index_iterate - sr_policy->segments_lists); diff --git a/src/vnet/srv6/sr_steering.c b/src/vnet/srv6/sr_steering.c index 704adaa7..57fe21f6 100755 --- a/src/vnet/srv6/sr_steering.c +++ b/src/vnet/srv6/sr_steering.c @@ -165,7 +165,7 @@ sr_steering_policy (int is_del, ip6_address_t * bsid, u32 sr_policy_index, sm->fib_table_ip4 = (u32) ~ 0; } - return 1; + return 0; } else /* It means user requested to update an existing SR steering policy */ { diff --git a/test/patches/scapy-2.3.3/inet6.py.patch b/test/patches/scapy-2.3.3/inet6.py.patch new file mode 100644 index 00000000..f98e7091 --- /dev/null +++ b/test/patches/scapy-2.3.3/inet6.py.patch @@ -0,0 +1,185 @@ +diff --git a/scapy/layers/inet6.py b/scapy/layers/inet6.py +--- a/scapy/layers/inet6.py 2017-06-01 14:04:18.160881034 +0200 ++++ b/scapy/layers/inet6.py 2017-06-02 09:08:40.133800208 +0200 +@@ -369,6 +369,8 @@ + return Raw + elif self.nh == 135 and len(p) > 3: # Mobile IPv6 + return _mip6_mhtype2cls.get(ord(p[2]), MIP6MH_Generic) ++ elif self.nh == 43 and ord(p[2]) == 4: # Segment Routing header ++ return IPv6ExtHdrSegmentRouting + else: + return get_cls(ipv6nhcls.get(self.nh,"Raw"), "Raw") + +@@ -430,6 +432,14 @@ + sd = strxor(sd, a) + sd = inet_ntop(socket.AF_INET6, sd) + ++ if self.nh == 43 and isinstance(self.payload, IPv6ExtHdrSegmentRouting): ++ # With segment routing header (rh == 4), the destination is ++ # the first address of the IPv6 addresses list ++ try: ++ sd = self.addresses[0] ++ except IndexError: ++ sd = self.dst ++ + if self.nh == 44 and isinstance(self.payload, IPv6ExtHdrFragment): + nh = self.payload.nh + +@@ -489,6 +499,8 @@ + return self.payload.answers(other.payload.payload) + elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrRouting): + return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting ++ elif other.nh == 43 and isinstance(other.payload, IPv6ExtHdrSegmentRouting): ++ return self.payload.answers(other.payload.payload) # Buggy if self.payload is a IPv6ExtHdrRouting + elif other.nh == 60 and isinstance(other.payload, IPv6ExtHdrDestOpt): + return self.payload.payload.answers(other.payload.payload) + elif self.nh == 60 and isinstance(self.payload, IPv6ExtHdrDestOpt): # BU in reply to BRR, for instance +@@ -919,6 +931,148 @@ + pkt = pkt[:3]+struct.pack("B", len(self.addresses))+pkt[4:] + return _IPv6ExtHdr.post_build(self, pkt, pay) + ++######################### Segment Routing Header ############################ ++ ++# This implementation is based on draft 06, available at: ++# https://tools.ietf.org/html/draft-ietf-6man-segment-routing-header-06 ++ ++class IPv6ExtHdrSegmentRoutingTLV(Packet): ++ name = "IPv6 Option Header Segment Routing - Generic TLV" ++ fields_desc = [ ByteField("type", 0), ++ ByteField("len", 0), ++ ByteField("reserved", 0), ++ ByteField("flags", 0), ++ StrLenField("value", "", length_from=lambda pkt: pkt.len) ] ++ ++ def extract_padding(self, p): ++ return "",p ++ ++ registered_sr_tlv = {} ++ @classmethod ++ def register_variant(cls): ++ cls.registered_sr_tlv[cls.type.default] = cls ++ ++ @classmethod ++ def dispatch_hook(cls, pkt=None, *args, **kargs): ++ if pkt: ++ tmp_type = ord(pkt[0]) ++ return cls.registered_sr_tlv.get(tmp_type, cls) ++ return cls ++ ++ ++class IPv6ExtHdrSegmentRoutingTLVIngressNode(IPv6ExtHdrSegmentRoutingTLV): ++ name = "IPv6 Option Header Segment Routing - Ingress Node TLV" ++ fields_desc = [ ByteField("type", 1), ++ ByteField("len", 18), ++ ByteField("reserved", 0), ++ ByteField("flags", 0), ++ IP6Field("ingress_node", "::1") ] ++ ++ ++class IPv6ExtHdrSegmentRoutingTLVEgressNode(IPv6ExtHdrSegmentRoutingTLV): ++ name = "IPv6 Option Header Segment Routing - Egress Node TLV" ++ fields_desc = [ ByteField("type", 2), ++ ByteField("len", 18), ++ ByteField("reserved", 0), ++ ByteField("flags", 0), ++ IP6Field("egress_node", "::1") ] ++ ++ ++class IPv6ExtHdrSegmentRoutingTLVPadding(IPv6ExtHdrSegmentRoutingTLV): ++ name = "IPv6 Option Header Segment Routing - Padding TLV" ++ fields_desc = [ ByteField("type", 4), ++ FieldLenField("len", None, length_of="padding", fmt="B"), ++ StrLenField("padding", b"\x00", length_from=lambda pkt: pkt.len) ] ++ ++ ++class IPv6ExtHdrSegmentRouting(_IPv6ExtHdr): ++ # 0 1 2 3 ++ # 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #| Next Header | Hdr Ext Len | Routing Type | Segments Left | ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #| Last Entry | Flags | Tag | ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #| | ++ #| Segment List[0] (128 bits IPv6 address) | ++ #| | ++ #| | ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #| | ++ #| | ++ # ... ++ #| | ++ #| | ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #| | ++ #| Segment List[n] (128 bits IPv6 address) | ++ #| | ++ #| | ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ #// // ++ #// Optional Type Length Value objects (variable) // ++ #// // ++ #+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ++ # ++ # 0 1 2 3 4 5 6 7 ++ # +-+-+-+-+-+-+-+-+ ++ # |U|P|O|A|H| U | ++ # +-+-+-+-+-+-+-+-+ ++ ++ name = "IPv6 Segment Routing Extension Header" ++ fields_desc = [ ByteEnumField("nh", 59, ipv6nh), ++ ByteField("len", None), ++ ByteField("type", 4), ++ ByteField("segleft", None), ++ ByteField("lastentry", None), ++ BitField("unused1", 0, 1), ++ BitField("protected", 0, 1), ++ BitField("oam", 0, 1), ++ BitField("alert", 0, 1), ++ BitField("hmac", 0, 1), ++ BitField("unused2", 0, 3), ++ ShortField("tag", 0), ++ IP6ListField("addresses", ["::1"], ++ count_from=lambda pkt: pkt.lastentry+1), ++ PacketListField("tlv_objects", [], IPv6ExtHdrSegmentRoutingTLV, ++ length_from=lambda pkt: 8*pkt.len - 16*(pkt.lastentry+1)) ] ++ ++ overload_fields = { IPv6: { "nh": 43 } } ++ ++ def post_build(self, pkt, pay): ++ ++ if self.len is None: ++ ++ # The extension must be align on 8 bytes ++ tmp_mod = (len(pkt) - 8) % 8 ++ if tmp_mod == 1: ++ warning("IPv6ExtHdrSegmentRouting(): can't pad 1 byte !") ++ elif tmp_mod >= 2: ++ #Add the padding extension ++ tmp_pad = b"\x00" * (tmp_mod-2) ++ tlv = IPv6ExtHdrSegmentRoutingTLVPadding(padding=tmp_pad) ++ pkt += str(tlv) ++ ++ tmp_len = (len(pkt) - 8) / 8 ++ pkt = pkt[:1] + struct.pack("B", tmp_len)+ pkt[2:] ++ ++ if self.segleft is None: ++ tmp_len = len(self.addresses) ++ if tmp_len: ++ tmp_len -= 1 ++ pkt = pkt[:3] + struct.pack("B", tmp_len) + pkt[4:] ++ ++ if self.lastentry is None: ++ #km - changed to contain n-1 ++ tmp_len = len(self.addresses) ++ if tmp_len: ++ tmp_len -= 1 ++ #pkt = pkt[:4] + struct.pack("B", len(self.addresses)) + pkt[5:] ++ pkt = pkt[:4] + struct.pack("B", tmp_len) + pkt[5:] ++ ++ return _IPv6ExtHdr.post_build(self, pkt, pay) ++ ++ + ########################### Fragmentation Header ############################ + + class IPv6ExtHdrFragment(_IPv6ExtHdr): diff --git a/test/test_srv6.py b/test/test_srv6.py new file mode 100644 index 00000000..a31b30eb --- /dev/null +++ b/test/test_srv6.py @@ -0,0 +1,1997 @@ +#!/usr/bin/env python + +import unittest +from socket import AF_INET6 + +from framework import VppTestCase, VppTestRunner +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto +from vpp_srv6 import SRv6LocalSIDBehaviors, VppSRv6LocalSID, VppSRv6Policy, \ + SRv6PolicyType, VppSRv6Steering, SRv6PolicySteeringTypes + +from scapy.packet import Raw +from scapy.layers.l2 import Ether, Dot1Q +from scapy.layers.inet6 import IPv6, UDP, IPv6ExtHdrSegmentRouting +from scapy.layers.inet import IP, UDP + +from scapy.utils import inet_pton, inet_ntop + +from util import ppp + + +class TestSRv6(VppTestCase): + """ SRv6 Test Case """ + + @classmethod + def setUpClass(self): + super(TestSRv6, self).setUpClass() + + def setUp(self): + """ Perform test setup before each test case. + """ + super(TestSRv6, self).setUp() + + # packet sizes, inclusive L2 overhead + self.pg_packet_sizes = [64, 512, 1518, 9018] + + # reset packet_infos + self.reset_packet_infos() + + def tearDown(self): + """ Clean up test setup after each test case. + """ + self.teardown_interfaces() + + super(TestSRv6, self).tearDown() + + def configure_interface(self, + interface, + ipv6=False, ipv4=False, + ipv6_table_id=0, ipv4_table_id=0): + """ Configure interface. + :param ipv6: configure IPv6 on interface + :param ipv4: configure IPv4 on interface + :param ipv6_table_id: FIB table_id for IPv6 + :param ipv4_table_id: FIB table_id for IPv4 + """ + self.logger.debug("Configuring interface %s" % (interface.name)) + if ipv6: + self.logger.debug("Configuring IPv6") + interface.set_table_ip6(ipv6_table_id) + interface.config_ip6() + interface.resolve_ndp(timeout=5) + if ipv4: + self.logger.debug("Configuring IPv4") + interface.set_table_ip4(ipv4_table_id) + interface.config_ip4() + interface.resolve_arp() + interface.admin_up() + + def setup_interfaces(self, ipv6=[], ipv4=[], + ipv6_table_id=[], ipv4_table_id=[]): + """ Create and configure interfaces. + + :param ipv6: list of interface IPv6 capabilities + :param ipv4: list of interface IPv4 capabilities + :param ipv6_table_id: list of intf IPv6 FIB table_ids + :param ipv4_table_id: list of intf IPv4 FIB table_ids + :returns: List of created interfaces. + """ + # how many interfaces? + if len(ipv6): + count = len(ipv6) + else: + count = len(ipv4) + self.logger.debug("Creating and configuring %d interfaces" % (count)) + + # fill up ipv6 and ipv4 lists if needed + # not enabled (False) is the default + if len(ipv6) < count: + ipv6 += (count - len(ipv6)) * [False] + if len(ipv4) < count: + ipv4 += (count - len(ipv4)) * [False] + + # fill up table_id lists if needed + # table_id 0 (global) is the default + if len(ipv6_table_id) < count: + ipv6_table_id += (count - len(ipv6_table_id)) * [0] + if len(ipv4_table_id) < count: + ipv4_table_id += (count - len(ipv4_table_id)) * [0] + + # create 'count' pg interfaces + self.create_pg_interfaces(range(count)) + + # setup all interfaces + for i in range(count): + intf = self.pg_interfaces[i] + self.configure_interface(intf, + ipv6[i], ipv4[i], + ipv6_table_id[i], ipv4_table_id[i]) + + if any(ipv6): + self.logger.debug(self.vapi.cli("show ip6 neighbors")) + if any(ipv4): + self.logger.debug(self.vapi.cli("show ip arp")) + self.logger.debug(self.vapi.cli("show interface")) + self.logger.debug(self.vapi.cli("show hardware")) + + return self.pg_interfaces + + def teardown_interfaces(self): + """ Unconfigure and bring down interface. + """ + self.logger.debug("Tearing down interfaces") + # tear down all interfaces + # AFAIK they cannot be deleted + for i in self.pg_interfaces: + self.logger.debug("Tear down interface %s" % (i.name)) + i.admin_down() + i.unconfig() + + def test_SRv6_T_Encaps(self): + """ Test SRv6 Transit.Encaps behavior for IPv6. + """ + # send traffic to one destination interface + # source and destination are IPv6 only + self.setup_interfaces(ipv6=[True, True]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure encaps IPv6 source address + # needs to be done before SR Policy config + # TODO: API? + self.vapi.cli("set sr encaps source addr a3::") + + bsid = 'a3::9999:1' + # configure SRv6 Policy + # Note: segment list order: first -> last + sr_policy = VppSRv6Policy( + self, bsid=bsid, + is_encap=1, + sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, + weight=1, fib_table=0, + segments=['a4::', 'a5::', 'a6::c7'], + source='a3::') + sr_policy.add_vpp_config() + self.sr_policy = sr_policy + + # log the sr policies + self.logger.info(self.vapi.cli("show sr policies")) + + # steer IPv6 traffic to a7::/64 into SRv6 Policy + # use the bsid of the above self.sr_policy + pol_steering = VppSRv6Steering( + self, + bsid=self.sr_policy.bsid, + prefix="a7::", mask_width=64, + traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, + sr_policy_index=0, table_id=0, + sw_if_index=0) + pol_steering.add_vpp_config() + + # log the sr steering policies + self.logger.info(self.vapi.cli("show sr steering policies")) + + # create packets + count = len(self.pg_packet_sizes) + dst_inner = 'a7::1234' + pkts = [] + + # create IPv6 packets without SRH + packet_header = self.create_packet_header_IPv6(dst_inner) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # create IPv6 packets with SRH + # packets with segments-left 1, active segment a7:: + packet_header = self.create_packet_header_IPv6_SRH( + sidlist=['a8::', 'a7::', 'a6::'], + segleft=1) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # create IPv6 packets with SRH and IPv6 + # packets with segments-left 1, active segment a7:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a8::', 'a7::', 'a6::'], + segleft=1) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_T_Encaps) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SR steering + pol_steering.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr steering policies")) + + # remove SR Policies + self.sr_policy.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr policies")) + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_T_Insert(self): + """ Test SRv6 Transit.Insert behavior (IPv6 only). + """ + # send traffic to one destination interface + # source and destination are IPv6 only + self.setup_interfaces(ipv6=[True, True]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure encaps IPv6 source address + # needs to be done before SR Policy config + # TODO: API? + self.vapi.cli("set sr encaps source addr a3::") + + bsid = 'a3::9999:1' + # configure SRv6 Policy + # Note: segment list order: first -> last + sr_policy = VppSRv6Policy( + self, bsid=bsid, + is_encap=0, + sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, + weight=1, fib_table=0, + segments=['a4::', 'a5::', 'a6::c7'], + source='a3::') + sr_policy.add_vpp_config() + self.sr_policy = sr_policy + + # log the sr policies + self.logger.info(self.vapi.cli("show sr policies")) + + # steer IPv6 traffic to a7::/64 into SRv6 Policy + # use the bsid of the above self.sr_policy + pol_steering = VppSRv6Steering( + self, + bsid=self.sr_policy.bsid, + prefix="a7::", mask_width=64, + traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV6, + sr_policy_index=0, table_id=0, + sw_if_index=0) + pol_steering.add_vpp_config() + + # log the sr steering policies + self.logger.info(self.vapi.cli("show sr steering policies")) + + # create packets + count = len(self.pg_packet_sizes) + dst_inner = 'a7::1234' + pkts = [] + + # create IPv6 packets without SRH + packet_header = self.create_packet_header_IPv6(dst_inner) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # create IPv6 packets with SRH + # packets with segments-left 1, active segment a7:: + packet_header = self.create_packet_header_IPv6_SRH( + sidlist=['a8::', 'a7::', 'a6::'], + segleft=1) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_T_Insert) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SR steering + pol_steering.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr steering policies")) + + # remove SR Policies + self.sr_policy.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr policies")) + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_T_Encaps_IPv4(self): + """ Test SRv6 Transit.Encaps behavior for IPv4. + """ + # send traffic to one destination interface + # source interface is IPv4 only + # destination interface is IPv6 only + self.setup_interfaces(ipv6=[False, True], ipv4=[True, False]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure encaps IPv6 source address + # needs to be done before SR Policy config + # TODO: API? + self.vapi.cli("set sr encaps source addr a3::") + + bsid = 'a3::9999:1' + # configure SRv6 Policy + # Note: segment list order: first -> last + sr_policy = VppSRv6Policy( + self, bsid=bsid, + is_encap=1, + sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, + weight=1, fib_table=0, + segments=['a4::', 'a5::', 'a6::c7'], + source='a3::') + sr_policy.add_vpp_config() + self.sr_policy = sr_policy + + # log the sr policies + self.logger.info(self.vapi.cli("show sr policies")) + + # steer IPv4 traffic to 7.1.1.0/24 into SRv6 Policy + # use the bsid of the above self.sr_policy + pol_steering = VppSRv6Steering( + self, + bsid=self.sr_policy.bsid, + prefix="7.1.1.0", mask_width=24, + traffic_type=SRv6PolicySteeringTypes.SR_STEER_IPV4, + sr_policy_index=0, table_id=0, + sw_if_index=0) + pol_steering.add_vpp_config() + + # log the sr steering policies + self.logger.info(self.vapi.cli("show sr steering policies")) + + # create packets + count = len(self.pg_packet_sizes) + dst_inner = '7.1.1.123' + pkts = [] + + # create IPv4 packets + packet_header = self.create_packet_header_IPv4(dst_inner) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_T_Encaps_IPv4) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SR steering + pol_steering.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr steering policies")) + + # remove SR Policies + self.sr_policy.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr policies")) + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + @unittest.skip("VPP crashes after running this test") + def test_SRv6_T_Encaps_L2(self): + """ Test SRv6 Transit.Encaps behavior for L2. + """ + # send traffic to one destination interface + # source interface is IPv4 only TODO? + # destination interface is IPv6 only + self.setup_interfaces(ipv6=[False, True], ipv4=[False, False]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure encaps IPv6 source address + # needs to be done before SR Policy config + # TODO: API? + self.vapi.cli("set sr encaps source addr a3::") + + bsid = 'a3::9999:1' + # configure SRv6 Policy + # Note: segment list order: first -> last + sr_policy = VppSRv6Policy( + self, bsid=bsid, + is_encap=1, + sr_type=SRv6PolicyType.SR_POLICY_TYPE_DEFAULT, + weight=1, fib_table=0, + segments=['a4::', 'a5::', 'a6::c7'], + source='a3::') + sr_policy.add_vpp_config() + self.sr_policy = sr_policy + + # log the sr policies + self.logger.info(self.vapi.cli("show sr policies")) + + # steer L2 traffic into SRv6 Policy + # use the bsid of the above self.sr_policy + pol_steering = VppSRv6Steering( + self, + bsid=self.sr_policy.bsid, + prefix="::", mask_width=0, + traffic_type=SRv6PolicySteeringTypes.SR_STEER_L2, + sr_policy_index=0, table_id=0, + sw_if_index=self.pg0.sw_if_index) + pol_steering.add_vpp_config() + + # log the sr steering policies + self.logger.info(self.vapi.cli("show sr steering policies")) + + # create packets + count = len(self.pg_packet_sizes) + pkts = [] + + # create L2 packets without dot1q header + packet_header = self.create_packet_header_L2() + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # create L2 packets with dot1q header + packet_header = self.create_packet_header_L2(vlan=123) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_T_Encaps_L2) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SR steering + pol_steering.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr steering policies")) + + # remove SR Policies + self.sr_policy.remove_vpp_config() + self.logger.info(self.vapi.cli("show sr policies")) + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End(self): + """ Test SRv6 End (without PSP) behavior. + """ + # send traffic to one destination interface + # source and destination interfaces are IPv6 only + self.setup_interfaces(ipv6=[True, True]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure SRv6 localSID End without PSP behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='A3::0', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, + nh_addr='::', + end_psp=0, + sw_if_index=0, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=2, SL=1, SL=0) + # send one packet per SL value per packet size + # SL=0 packet with localSID End with USP needs 2nd SRH + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' + pkts = [] + + # packets with segments-left 2, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a5::', 'a4::', 'a3::'], + segleft=2) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets with segments-left 1, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a4::', 'a3::', 'a2::'], + segleft=1) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # TODO: test behavior with SL=0 packet (needs 2*SRH?) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_with_PSP(self): + """ Test SRv6 End with PSP behavior. + """ + # send traffic to one destination interface + # source and destination interfaces are IPv6 only + self.setup_interfaces(ipv6=[True, True]) + + # configure FIB entries + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure SRv6 localSID End with PSP behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='A3::0', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_END, + nh_addr='::', + end_psp=1, + sw_if_index=0, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=2, SL=1) + # send one packet per SL value per packet size + # SL=0 packet with localSID End with PSP is dropped + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' + pkts = [] + + # packets with segments-left 2, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a5::', 'a4::', 'a3::'], + segleft=2) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets with segments-left 1, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a4::', 'a3::', 'a2::'], + segleft=1) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End_PSP) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_X(self): + """ Test SRv6 End.X (without PSP) behavior. + """ + # create three interfaces (1 source, 2 destinations) + # source and destination interfaces are IPv6 only + self.setup_interfaces(ipv6=[True, True, True]) + + # configure FIB entries + # a4::/64 via pg1 and pg2 + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6), + VppRoutePath(self.pg2.remote_ip6, + self.pg2.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + self.logger.debug(self.vapi.cli("show ip6 fib")) + + # configure SRv6 localSID End.X without PSP behavior + # End.X points to interface pg1 + localsid = VppSRv6LocalSID( + self, localsid_addr='A3::C4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X, + nh_addr=self.pg1.remote_ip6, + end_psp=0, + sw_if_index=self.pg1.sw_if_index, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=2, SL=1) + # send one packet per SL value per packet size + # SL=0 packet with localSID End with PSP is dropped + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' + pkts = [] + + # packets with segments-left 2, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a5::', 'a4::', 'a3::c4'], + segleft=2) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets with segments-left 1, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a4::', 'a3::c4', 'a2::'], + segleft=1) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + # using same comparison function as End (no PSP) + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End) + + # assert nothing was received on the other interface (pg2) + self.pg2.assert_nothing_captured("mis-directed packet(s)") + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_X_with_PSP(self): + """ Test SRv6 End.X with PSP behavior. + """ + # create three interfaces (1 source, 2 destinations) + # source and destination interfaces are IPv6 only + self.setup_interfaces(ipv6=[True, True, True]) + + # configure FIB entries + # a4::/64 via pg1 and pg2 + route = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6), + VppRoutePath(self.pg2.remote_ip6, + self.pg2.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6)], + is_ip6=1) + route.add_vpp_config() + + # configure SRv6 localSID End with PSP behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='A3::C4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_X, + nh_addr=self.pg1.remote_ip6, + end_psp=1, + sw_if_index=self.pg1.sw_if_index, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=2, SL=1) + # send one packet per SL value per packet size + # SL=0 packet with localSID End with PSP is dropped + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' + pkts = [] + + # packets with segments-left 2, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a5::', 'a4::', 'a3::c4'], + segleft=2) + # create traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets with segments-left 1, active segment a3:: + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a4::', 'a3::c4', 'a2::'], + segleft=1) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + # using same comparison function as End with PSP + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End_PSP) + + # assert nothing was received on the other interface (pg2) + self.pg2.assert_nothing_captured("mis-directed packet(s)") + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_DX6(self): + """ Test SRv6 End.DX6 behavior. + """ + # send traffic to one destination interface + # source and destination interfaces are IPv6 only + self.setup_interfaces(ipv6=[True, True]) + + # configure SRv6 localSID End.DX6 behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='a3::c4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX6, + nh_addr=self.pg1.remote_ip6, + end_psp=0, + sw_if_index=self.pg1.sw_if_index, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=0) + # send one packet per packet size + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' # inner header destination address + pkts = [] + + # packets with SRH, segments-left 0, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, IPv6 in IPv6 + # outer IPv6 dest addr is the localsid End.DX6 + packet_header = self.create_packet_header_IPv6_IPv6( + dst_inner, + dst_outer='a3::c4') + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End_DX6) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_DT6(self): + """ Test SRv6 End.DT6 behavior. + """ + # create three interfaces (1 source, 2 destinations) + # all interfaces are IPv6 only + # source interface in global FIB (0) + # destination interfaces in global and vrf + vrf_1 = 1 + self.setup_interfaces(ipv6=[True, True, True], + ipv6_table_id=[0, 0, vrf_1]) + + # configure FIB entries + # a4::/64 is reachable + # via pg1 in table 0 (global) + # and via pg2 in table vrf_1 + route0 = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6, + nh_table_id=0)], + table_id=0, + is_ip6=1) + route0.add_vpp_config() + route1 = VppIpRoute(self, "a4::", 64, + [VppRoutePath(self.pg2.remote_ip6, + self.pg2.sw_if_index, + proto=DpoProto.DPO_PROTO_IP6, + nh_table_id=vrf_1)], + table_id=vrf_1, + is_ip6=1) + route1.add_vpp_config() + self.logger.debug(self.vapi.cli("show ip6 fib")) + + # configure SRv6 localSID End.DT6 behavior + # Note: + # fib_table: where the localsid is installed + # sw_if_index: in T-variants of localsid this is the vrf table_id + localsid = VppSRv6LocalSID( + self, localsid_addr='a3::c4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DT6, + nh_addr='::', + end_psp=0, + sw_if_index=vrf_1, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=0) + # send one packet per packet size + count = len(self.pg_packet_sizes) + dst_inner = 'a4::1234' # inner header destination address + pkts = [] + + # packets with SRH, segments-left 0, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv6( + dst_inner, + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, IPv6 in IPv6 + # outer IPv6 dest addr is the localsid End.DT6 + packet_header = self.create_packet_header_IPv6_IPv6( + dst_inner, + dst_outer='a3::c4') + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + # using same comparison function as End.DX6 + self.send_and_verify_pkts(self.pg0, pkts, self.pg2, + self.compare_rx_tx_packet_End_DX6) + + # assert nothing was received on the other interface (pg2) + self.pg1.assert_nothing_captured("mis-directed packet(s)") + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_DX4(self): + """ Test SRv6 End.DX4 behavior. + """ + # send traffic to one destination interface + # source interface is IPv6 only + # destination interface is IPv4 only + self.setup_interfaces(ipv6=[True, False], ipv4=[False, True]) + + # configure SRv6 localSID End.DX4 behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='a3::c4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX4, + nh_addr=self.pg1.remote_ip4, + end_psp=0, + sw_if_index=self.pg1.sw_if_index, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # send one packet per packet size + count = len(self.pg_packet_sizes) + dst_inner = '4.1.1.123' # inner header destination address + pkts = [] + + # packets with SRH, segments-left 0, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv4( + dst_inner, + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, IPv4 in IPv6 + # outer IPv6 dest addr is the localsid End.DX4 + packet_header = self.create_packet_header_IPv6_IPv4( + dst_inner, + dst_outer='a3::c4') + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End_DX4) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_DT4(self): + """ Test SRv6 End.DT4 behavior. + """ + # create three interfaces (1 source, 2 destinations) + # source interface is IPv6-only + # destination interfaces are IPv4 only + # source interface in global FIB (0) + # destination interfaces in global and vrf + vrf_1 = 1 + self.setup_interfaces(ipv6=[True, False, False], + ipv4=[False, True, True], + ipv6_table_id=[0, 0, 0], + ipv4_table_id=[0, 0, vrf_1]) + + # configure FIB entries + # 4.1.1.0/24 is reachable + # via pg1 in table 0 (global) + # and via pg2 in table vrf_1 + route0 = VppIpRoute(self, "4.1.1.0", 24, + [VppRoutePath(self.pg1.remote_ip4, + self.pg1.sw_if_index, + nh_table_id=0)], + table_id=0, + is_ip6=0) + route0.add_vpp_config() + route1 = VppIpRoute(self, "4.1.1.0", 24, + [VppRoutePath(self.pg2.remote_ip4, + self.pg2.sw_if_index, + nh_table_id=vrf_1)], + table_id=vrf_1, + is_ip6=0) + route1.add_vpp_config() + self.logger.debug(self.vapi.cli("show ip fib")) + + # configure SRv6 localSID End.DT6 behavior + # Note: + # fib_table: where the localsid is installed + # sw_if_index: in T-variants of localsid: vrf table_id + localsid = VppSRv6LocalSID( + self, localsid_addr='a3::c4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DT4, + nh_addr='::', + end_psp=0, + sw_if_index=vrf_1, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # create IPv6 packets with SRH (SL=0) + # send one packet per packet size + count = len(self.pg_packet_sizes) + dst_inner = '4.1.1.123' # inner header destination address + pkts = [] + + # packets with SRH, segments-left 0, active segment a3::c4 + packet_header = self.create_packet_header_IPv6_SRH_IPv4( + dst_inner, + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, IPv6 in IPv6 + # outer IPv6 dest addr is the localsid End.DX4 + packet_header = self.create_packet_header_IPv6_IPv4( + dst_inner, + dst_outer='a3::c4') + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg2, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + # using same comparison function as End.DX4 + self.send_and_verify_pkts(self.pg0, pkts, self.pg2, + self.compare_rx_tx_packet_End_DX4) + + # assert nothing was received on the other interface (pg2) + self.pg1.assert_nothing_captured("mis-directed packet(s)") + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # remove FIB entries + # done by tearDown + + # cleanup interfaces + self.teardown_interfaces() + + def test_SRv6_End_DX2(self): + """ Test SRv6 End.DX2 behavior. + """ + # send traffic to one destination interface + # source interface is IPv6 only + self.setup_interfaces(ipv6=[True, False], ipv4=[False, False]) + + # configure SRv6 localSID End.DX2 behavior + localsid = VppSRv6LocalSID( + self, localsid_addr='a3::c4', + behavior=SRv6LocalSIDBehaviors.SR_BEHAVIOR_DX2, + nh_addr='::', + end_psp=0, + sw_if_index=self.pg1.sw_if_index, + vlan_index=0, + fib_table=0) + localsid.add_vpp_config() + # log the localsids + self.logger.debug(self.vapi.cli("show sr localsid")) + + # send one packet per packet size + count = len(self.pg_packet_sizes) + pkts = [] + + # packets with SRH, segments-left 0, active segment a3::c4 + # L2 has no dot1q header + packet_header = self.create_packet_header_IPv6_SRH_L2( + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0, + vlan=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets with SRH, segments-left 0, active segment a3::c4 + # L2 has dot1q header + packet_header = self.create_packet_header_IPv6_SRH_L2( + sidlist=['a3::c4', 'a2::', 'a1::'], + segleft=0, + vlan=123) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, L2 in IPv6 + # outer IPv6 dest addr is the localsid End.DX2 + # L2 has no dot1q header + packet_header = self.create_packet_header_IPv6_L2( + dst_outer='a3::c4', + vlan=0) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # packets without SRH, L2 in IPv6 + # outer IPv6 dest addr is the localsid End.DX2 + # L2 has dot1q header + packet_header = self.create_packet_header_IPv6_L2( + dst_outer='a3::c4', + vlan=123) + # add to traffic stream pg0->pg1 + pkts.extend(self.create_stream(self.pg0, self.pg1, packet_header, + self.pg_packet_sizes, count)) + + # send packets and verify received packets + self.send_and_verify_pkts(self.pg0, pkts, self.pg1, + self.compare_rx_tx_packet_End_DX2) + + # log the localsid counters + self.logger.info(self.vapi.cli("show sr localsid")) + + # remove SRv6 localSIDs + localsid.remove_vpp_config() + + # cleanup interfaces + self.teardown_interfaces() + + def compare_rx_tx_packet_T_Encaps(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing T.Encaps + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # T.Encaps updates the headers as follows: + # SR Policy seglist (S3, S2, S1) + # SR Policy source C + # IPv6: + # in: IPv6(A, B2) + # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv6(A, B2) + # IPv6 + SRH: + # in: IPv6(A, B2)SRH(B3, B2, B1; SL=1) + # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv6(a, B2)SRH(B3, B2, B1; SL=1) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + + tx_ip = tx_pkt.getlayer(IPv6) + + # expected segment-list + seglist = self.sr_policy.segments + # reverse list to get order as in SRH + tx_seglist = seglist[::-1] + + # get source address of SR Policy + sr_policy_source = self.sr_policy.source + + # rx'ed packet should have SRH + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + # get SRH + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # received ip.src should be equal to SR Policy source + self.assertEqual(rx_ip.src, sr_policy_source) + # received ip.dst should be equal to expected sidlist[lastentry] + self.assertEqual(rx_ip.dst, tx_seglist[-1]) + # rx'ed seglist should be equal to expected seglist + self.assertEqual(rx_srh.addresses, tx_seglist) + # segleft should be equal to size expected seglist-1 + self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) + # segleft should be equal to lastentry + self.assertEqual(rx_srh.segleft, rx_srh.lastentry) + + # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt + # except for the hop-limit field + # -> update tx'ed hlim to the expected hlim + tx_ip.hlim = tx_ip.hlim - 1 + + self.assertEqual(rx_srh.payload, tx_ip) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_T_Encaps_IPv4(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing T.Encaps for IPv4 + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # T.Encaps for IPv4 updates the headers as follows: + # SR Policy seglist (S3, S2, S1) + # SR Policy source C + # IPv4: + # in: IPv4(A, B2) + # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)IPv4(A, B2) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + + tx_ip = tx_pkt.getlayer(IP) + + # expected segment-list + seglist = self.sr_policy.segments + # reverse list to get order as in SRH + tx_seglist = seglist[::-1] + + # get source address of SR Policy + sr_policy_source = self.sr_policy.source + + # checks common to cases tx with and without SRH + # rx'ed packet should have SRH and IPv4 header + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + self.assertTrue(rx_ip.payload.haslayer(IP)) + # get SRH + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # received ip.src should be equal to SR Policy source + self.assertEqual(rx_ip.src, sr_policy_source) + # received ip.dst should be equal to sidlist[lastentry] + self.assertEqual(rx_ip.dst, tx_seglist[-1]) + # rx'ed seglist should be equal to seglist + self.assertEqual(rx_srh.addresses, tx_seglist) + # segleft should be equal to size seglist-1 + self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) + # segleft should be equal to lastentry + self.assertEqual(rx_srh.segleft, rx_srh.lastentry) + + # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt + # except for the ttl field and ip checksum + # -> adjust tx'ed ttl to expected ttl + tx_ip.ttl = tx_ip.ttl - 1 + # -> set tx'ed ip checksum to None and let scapy recompute + tx_ip.chksum = None + # read back the pkt (with str()) to force computing these fields + # probably other ways to accomplish this are possible + tx_ip = IP(str(tx_ip)) + + self.assertEqual(rx_srh.payload, tx_ip) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_T_Encaps_L2(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing T.Encaps for L2 + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # T.Encaps for L2 updates the headers as follows: + # SR Policy seglist (S3, S2, S1) + # SR Policy source C + # L2: + # in: L2 + # out: IPv6(C, S1)SRH(S3, S2, S1; SL=2)L2 + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + + tx_ether = tx_pkt.getlayer(Ether) + + # expected segment-list + seglist = self.sr_policy.segments + # reverse list to get order as in SRH + tx_seglist = seglist[::-1] + + # get source address of SR Policy + sr_policy_source = self.sr_policy.source + + # rx'ed packet should have SRH + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + # get SRH + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # received ip.src should be equal to SR Policy source + self.assertEqual(rx_ip.src, sr_policy_source) + # received ip.dst should be equal to sidlist[lastentry] + self.assertEqual(rx_ip.dst, tx_seglist[-1]) + # rx'ed seglist should be equal to seglist + self.assertEqual(rx_srh.addresses, tx_seglist) + # segleft should be equal to size seglist-1 + self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) + # segleft should be equal to lastentry + self.assertEqual(rx_srh.segleft, rx_srh.lastentry) + # nh should be "No Next Header" (59) + self.assertEqual(rx_srh.nh, 59) + + # the whole rx'ed pkt beyond SRH should be equal to tx'ed pkt + self.assertEqual(Ether(str(rx_srh.payload)), tx_ether) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_T_Insert(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing T.Insert + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # T.Insert updates the headers as follows: + # IPv6: + # in: IPv6(A, B2) + # out: IPv6(A, S1)SRH(B2, S3, S2, S1; SL=3) + # IPv6 + SRH: + # in: IPv6(A, B2)SRH(B3, B2, B1; SL=1) + # out: IPv6(A, S1)SRH(B2, S3, S2, S1; SL=3)SRH(B3, B2, B1; SL=1) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + rx_ip2 = None + rx_srh2 = None + rx_ip3 = None + rx_udp = rx_pkt[UDP] + + tx_ip = tx_pkt.getlayer(IPv6) + tx_srh = None + tx_ip2 = None + # some packets have been tx'ed with an SRH, some without it + # get SRH if tx'ed packet has it + if tx_pkt.haslayer(IPv6ExtHdrSegmentRouting): + tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + tx_ip2 = tx_pkt.getlayer(IPv6, 2) + tx_udp = tx_pkt[UDP] + + # expected segment-list (make copy of SR Policy segment list) + seglist = self.sr_policy.segments[:] + # expected seglist has initial dest addr as last segment + seglist.append(tx_ip.dst) + # reverse list to get order as in SRH + tx_seglist = seglist[::-1] + + # get source address of SR Policy + sr_policy_source = self.sr_policy.source + + # checks common to cases tx with and without SRH + # rx'ed packet should have SRH and only one IPv6 header + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + self.assertFalse(rx_ip.payload.haslayer(IPv6)) + # get SRH + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # rx'ed ip.src should be equal to tx'ed ip.src + self.assertEqual(rx_ip.src, tx_ip.src) + # rx'ed ip.dst should be equal to sidlist[lastentry] + self.assertEqual(rx_ip.dst, tx_seglist[-1]) + + # rx'ed seglist should be equal to expected seglist + self.assertEqual(rx_srh.addresses, tx_seglist) + # segleft should be equal to size(expected seglist)-1 + self.assertEqual(rx_srh.segleft, len(tx_seglist)-1) + # segleft should be equal to lastentry + self.assertEqual(rx_srh.segleft, rx_srh.lastentry) + + if tx_srh: # packet was tx'ed with SRH + # packet should have 2nd SRH + self.assertTrue(rx_srh.payload.haslayer(IPv6ExtHdrSegmentRouting)) + # get 2nd SRH + rx_srh2 = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting, 2) + + # rx'ed srh2.addresses should be equal to tx'ed srh.addresses + self.assertEqual(rx_srh2.addresses, tx_srh.addresses) + # rx'ed srh2.segleft should be equal to tx'ed srh.segleft + self.assertEqual(rx_srh2.segleft, tx_srh.segleft) + # rx'ed srh2.lastentry should be equal to tx'ed srh.lastentry + self.assertEqual(rx_srh2.lastentry, tx_srh.lastentry) + + else: # packet was tx'ed without SRH + # rx packet should have no other SRH + self.assertFalse(rx_srh.payload.haslayer(IPv6ExtHdrSegmentRouting)) + + # UDP layer should be unchanged + self.assertEqual(rx_udp, tx_udp) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_End(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing End (without PSP) + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # End (no PSP) updates the headers as follows: + # IPv6 + SRH: + # in: IPv6(A, S1)SRH(S3, S2, S1; SL=2) + # out: IPv6(A, S2)SRH(S3, S2, S1; SL=1) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + rx_ip2 = None + rx_udp = rx_pkt[UDP] + + tx_ip = tx_pkt.getlayer(IPv6) + # we know the packet has been tx'ed + # with an inner IPv6 header and an SRH + tx_ip2 = tx_pkt.getlayer(IPv6, 2) + tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + tx_udp = tx_pkt[UDP] + + # common checks, regardless of tx segleft value + # rx'ed packet should have 2nd IPv6 header + self.assertTrue(rx_ip.payload.haslayer(IPv6)) + # get second (inner) IPv6 header + rx_ip2 = rx_pkt.getlayer(IPv6, 2) + + if tx_ip.segleft > 0: + # SRH should NOT have been popped: + # End SID without PSP does not pop SRH if segleft>0 + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # received ip.src should be equal to expected ip.src + self.assertEqual(rx_ip.src, tx_ip.src) + # sidlist should be unchanged + self.assertEqual(rx_srh.addresses, tx_srh.addresses) + # segleft should have been decremented + self.assertEqual(rx_srh.segleft, tx_srh.segleft-1) + # received ip.dst should be equal to sidlist[segleft] + self.assertEqual(rx_ip.dst, rx_srh.addresses[rx_srh.segleft]) + # lastentry should be unchanged + self.assertEqual(rx_srh.lastentry, tx_srh.lastentry) + # inner IPv6 packet (ip2) should be unchanged + self.assertEqual(rx_ip2.src, tx_ip2.src) + self.assertEqual(rx_ip2.dst, tx_ip2.dst) + # else: # tx_ip.segleft == 0 + # TODO: Does this work with 2 SRHs in ingress packet? + + # UDP layer should be unchanged + self.assertEqual(rx_udp, tx_udp) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_End_PSP(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing End with PSP + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # End (PSP) updates the headers as follows: + # IPv6 + SRH (SL>1): + # in: IPv6(A, S1)SRH(S3, S2, S1; SL=2) + # out: IPv6(A, S2)SRH(S3, S2, S1; SL=1) + # IPv6 + SRH (SL=1): + # in: IPv6(A, S2)SRH(S3, S2, S1; SL=1) + # out: IPv6(A, S3) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + rx_srh = None + rx_ip2 = None + rx_udp = rx_pkt[UDP] + + tx_ip = tx_pkt.getlayer(IPv6) + # we know the packet has been tx'ed + # with an inner IPv6 header and an SRH + tx_ip2 = tx_pkt.getlayer(IPv6, 2) + tx_srh = tx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + tx_udp = tx_pkt[UDP] + + # common checks, regardless of tx segleft value + self.assertTrue(rx_ip.payload.haslayer(IPv6)) + rx_ip2 = rx_pkt.getlayer(IPv6, 2) + # inner IPv6 packet (ip2) should be unchanged + self.assertEqual(rx_ip2.src, tx_ip2.src) + self.assertEqual(rx_ip2.dst, tx_ip2.dst) + + if tx_ip.segleft > 1: + # SRH should NOT have been popped: + # End SID with PSP does not pop SRH if segleft>1 + # rx'ed packet should have SRH + self.assertTrue(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + rx_srh = rx_pkt.getlayer(IPv6ExtHdrSegmentRouting) + + # received ip.src should be equal to expected ip.src + self.assertEqual(rx_ip.src, tx_ip.src) + # sidlist should be unchanged + self.assertEqual(rx_srh.addresses, tx_srh.addresses) + # segleft should have been decremented + self.assertEqual(rx_srh.segleft, tx_srh.segleft-1) + # received ip.dst should be equal to sidlist[segleft] + self.assertEqual(rx_ip.dst, rx_srh.addresses[rx_srh.segleft]) + # lastentry should be unchanged + self.assertEqual(rx_srh.lastentry, tx_srh.lastentry) + + else: # tx_ip.segleft <= 1 + # SRH should have been popped: + # End SID with PSP and segleft=1 pops SRH + # the two IPv6 headers are still present + # outer IPv6 header has DA == last segment of popped SRH + # SRH should not be present + self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + # outer IPv6 header ip.src should be equal to tx'ed ip.src + self.assertEqual(rx_ip.src, tx_ip.src) + # outer IPv6 header ip.dst should be = to tx'ed sidlist[segleft-1] + self.assertEqual(rx_ip.dst, tx_srh.addresses[tx_srh.segleft-1]) + + # UDP layer should be unchanged + self.assertEqual(rx_udp, tx_udp) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_End_DX6(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing End.DX6 + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # End.DX6 updates the headers as follows: + # IPv6 + SRH (SL=0): + # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)IPv6(B, D) + # out: IPv6(B, D) + # IPv6: + # in: IPv6(A, S3)IPv6(B, D) + # out: IPv6(B, D) + + # get first (outer) IPv6 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IPv6) + + tx_ip = tx_pkt.getlayer(IPv6) + tx_ip2 = tx_pkt.getlayer(IPv6, 2) + + # verify if rx'ed packet has no SRH + self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + + # the whole rx_ip pkt should be equal to tx_ip2 + # except for the hlim field + # -> adjust tx'ed hlim to expected hlim + tx_ip2.hlim = tx_ip2.hlim - 1 + + self.assertEqual(rx_ip, tx_ip2) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_End_DX4(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing End.DX4 + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # End.DX4 updates the headers as follows: + # IPv6 + SRH (SL=0): + # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)IPv4(B, D) + # out: IPv4(B, D) + # IPv6: + # in: IPv6(A, S3)IPv4(B, D) + # out: IPv4(B, D) + + # get IPv4 header of rx'ed packet + rx_ip = rx_pkt.getlayer(IP) + + tx_ip = tx_pkt.getlayer(IPv6) + tx_ip2 = tx_pkt.getlayer(IP) + + # verify if rx'ed packet has no SRH + self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + + # the whole rx_ip pkt should be equal to tx_ip2 + # except for the ttl field and ip checksum + # -> adjust tx'ed ttl to expected ttl + tx_ip2.ttl = tx_ip2.ttl - 1 + # -> set tx'ed ip checksum to None and let scapy recompute + tx_ip2.chksum = None + # read back the pkt (with str()) to force computing these fields + # probably other ways to accomplish this are possible + tx_ip2 = IP(str(tx_ip2)) + + self.assertEqual(rx_ip, tx_ip2) + + self.logger.debug("packet verification: SUCCESS") + + def compare_rx_tx_packet_End_DX2(self, tx_pkt, rx_pkt): + """ Compare input and output packet after passing End.DX2 + + :param tx_pkt: transmitted packet + :param rx_pkt: received packet + """ + # End.DX2 updates the headers as follows: + # IPv6 + SRH (SL=0): + # in: IPv6(A, S3)SRH(S3, S2, S1; SL=0)L2 + # out: L2 + # IPv6: + # in: IPv6(A, S3)L2 + # out: L2 + + # get IPv4 header of rx'ed packet + rx_eth = rx_pkt.getlayer(Ether) + + tx_ip = tx_pkt.getlayer(IPv6) + # we can't just get the 2nd Ether layer + # get the Raw content and dissect it as Ether + tx_eth1 = Ether(str(tx_pkt[Raw])) + + # verify if rx'ed packet has no SRH + self.assertFalse(rx_pkt.haslayer(IPv6ExtHdrSegmentRouting)) + + # the whole rx_eth pkt should be equal to tx_eth1 + self.assertEqual(rx_eth, tx_eth1) + + self.logger.debug("packet verification: SUCCESS") + + def create_stream(self, src_if, dst_if, packet_header, packet_sizes, + count): + """Create SRv6 input packet stream for defined interface. + + :param VppInterface src_if: Interface to create packet stream for + :param VppInterface dst_if: destination interface of packet stream + :param packet_header: Layer3 scapy packet headers, + L2 is added when not provided, + Raw(payload) with packet_info is added + :param list packet_sizes: packet stream pckt sizes,sequentially applied + to packets in stream have + :param int count: number of packets in packet stream + :return: list of packets + """ + self.logger.info("Creating packets") + pkts = [] + for i in range(0, count-1): + payload_info = self.create_packet_info(src_if, dst_if) + self.logger.debug( + "Creating packet with index %d" % (payload_info.index)) + payload = self.info_to_payload(payload_info) + # add L2 header if not yet provided in packet_header + if packet_header.getlayer(0).name == 'Ethernet': + p = (packet_header / + Raw(payload)) + else: + p = (Ether(dst=src_if.local_mac, src=src_if.remote_mac) / + packet_header / + Raw(payload)) + size = packet_sizes[i % len(packet_sizes)] + self.logger.debug("Packet size %d" % (size)) + self.extend_packet(p, size) + # we need to store the packet with the automatic fields computed + # read back the dumped packet (with str()) + # to force computing these fields + # probably other ways are possible + p = Ether(str(p)) + payload_info.data = p.copy() + self.logger.debug(ppp("Created packet:", p)) + pkts.append(p) + self.logger.info("Done creating packets") + return pkts + + def send_and_verify_pkts(self, input, pkts, output, compare_func): + """Send packets and verify received packets using compare_func + + :param input: ingress interface of DUT + :param pkts: list of packets to transmit + :param output: egress interface of DUT + :param compare_func: function to compare in and out packets + """ + # add traffic stream to input interface + input.add_stream(pkts) + + # enable capture on all interfaces + self.pg_enable_capture(self.pg_interfaces) + + # start traffic + self.logger.info("Starting traffic") + self.pg_start() + + # get output capture + self.logger.info("Getting packet capture") + capture = output.get_capture() + + # assert nothing was captured on input interface + input.assert_nothing_captured() + + # verify captured packets + self.verify_captured_pkts(output, capture, compare_func) + + def create_packet_header_IPv6(self, dst): + """Create packet header: IPv6 header, UDP header + + :param dst: IPv6 destination address + + IPv6 source address is 1234::1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=dst) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_SRH(self, sidlist, segleft): + """Create packet header: IPv6 header with SRH, UDP header + + :param list sidlist: segment list + :param int segleft: segments-left field value + + IPv6 destination address is set to sidlist[segleft] + IPv6 source addresses are 1234::1 and 4321::1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=sidlist[segleft]) / + IPv6ExtHdrSegmentRouting(addresses=sidlist) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_SRH_IPv6(self, dst, sidlist, segleft): + """Create packet header: IPv6 encapsulated in SRv6: + IPv6 header with SRH, IPv6 header, UDP header + + :param ipv6address dst: inner IPv6 destination address + :param list sidlist: segment list of outer IPv6 SRH + :param int segleft: segments-left field of outer IPv6 SRH + + Outer IPv6 destination address is set to sidlist[segleft] + IPv6 source addresses are 1234::1 and 4321::1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=sidlist[segleft]) / + IPv6ExtHdrSegmentRouting(addresses=sidlist, + segleft=segleft, nh=41) / + IPv6(src='4321::1', dst=dst) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_IPv6(self, dst_inner, dst_outer): + """Create packet header: IPv6 encapsulated in IPv6: + IPv6 header, IPv6 header, UDP header + + :param ipv6address dst_inner: inner IPv6 destination address + :param ipv6address dst_outer: outer IPv6 destination address + + IPv6 source addresses are 1234::1 and 4321::1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=dst_outer) / + IPv6(src='4321::1', dst=dst_inner) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_SRH_SRH_IPv6(self, dst, sidlist1, segleft1, + sidlist2, segleft2): + """Create packet header: IPv6 encapsulated in SRv6 with 2 SRH: + IPv6 header with SRH, 2nd SRH, IPv6 header, UDP header + + :param ipv6address dst: inner IPv6 destination address + :param list sidlist1: segment list of outer IPv6 SRH + :param int segleft1: segments-left field of outer IPv6 SRH + :param list sidlist2: segment list of inner IPv6 SRH + :param int segleft2: segments-left field of inner IPv6 SRH + + Outer IPv6 destination address is set to sidlist[segleft] + IPv6 source addresses are 1234::1 and 4321::1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=sidlist1[segleft1]) / + IPv6ExtHdrSegmentRouting(addresses=sidlist1, + segleft=segleft1, nh=43) / + IPv6ExtHdrSegmentRouting(addresses=sidlist2, + segleft=segleft2, nh=41) / + IPv6(src='4321::1', dst=dst) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv4(self, dst): + """Create packet header: IPv4 header, UDP header + + :param dst: IPv4 destination address + + IPv4 source address is 123.1.1.1 + UDP source port and destination port are 1234 + """ + + p = (IP(src='123.1.1.1', dst=dst) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_IPv4(self, dst_inner, dst_outer): + """Create packet header: IPv4 encapsulated in IPv6: + IPv6 header, IPv4 header, UDP header + + :param ipv4address dst_inner: inner IPv4 destination address + :param ipv6address dst_outer: outer IPv6 destination address + + IPv6 source address is 1234::1 + IPv4 source address is 123.1.1.1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=dst_outer) / + IP(src='123.1.1.1', dst=dst_inner) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_IPv6_SRH_IPv4(self, dst, sidlist, segleft): + """Create packet header: IPv4 encapsulated in SRv6: + IPv6 header with SRH, IPv4 header, UDP header + + :param ipv4address dst: inner IPv4 destination address + :param list sidlist: segment list of outer IPv6 SRH + :param int segleft: segments-left field of outer IPv6 SRH + + Outer IPv6 destination address is set to sidlist[segleft] + IPv6 source address is 1234::1 + IPv4 source address is 123.1.1.1 + UDP source port and destination port are 1234 + """ + + p = (IPv6(src='1234::1', dst=sidlist[segleft]) / + IPv6ExtHdrSegmentRouting(addresses=sidlist, + segleft=segleft, nh=4) / + IP(src='123.1.1.1', dst=dst) / + UDP(sport=1234, dport=1234)) + return p + + def create_packet_header_L2(self, vlan=0): + """Create packet header: L2 header + + :param vlan: if vlan!=0 then add 802.1q header + """ + # Note: the dst addr ('00:55:44:33:22:11') is used in + # the compare function compare_rx_tx_packet_T_Encaps_L2 + # to detect presence of L2 in SRH payload + p = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11') + etype = 0x8137 # IPX + if vlan: + # add 802.1q layer + p /= Dot1Q(vlan=vlan, type=etype) + else: + p.type = etype + return p + + def create_packet_header_IPv6_SRH_L2(self, sidlist, segleft, vlan=0): + """Create packet header: L2 encapsulated in SRv6: + IPv6 header with SRH, L2 + + :param list sidlist: segment list of outer IPv6 SRH + :param int segleft: segments-left field of outer IPv6 SRH + :param vlan: L2 vlan; if vlan!=0 then add 802.1q header + + Outer IPv6 destination address is set to sidlist[segleft] + IPv6 source address is 1234::1 + """ + eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11') + etype = 0x8137 # IPX + if vlan: + # add 802.1q layer + eth /= Dot1Q(vlan=vlan, type=etype) + else: + eth.type = etype + + p = (IPv6(src='1234::1', dst=sidlist[segleft]) / + IPv6ExtHdrSegmentRouting(addresses=sidlist, + segleft=segleft, nh=59) / + eth) + return p + + def create_packet_header_IPv6_L2(self, dst_outer, vlan=0): + """Create packet header: L2 encapsulated in IPv6: + IPv6 header, L2 + + :param ipv6address dst_outer: outer IPv6 destination address + :param vlan: L2 vlan; if vlan!=0 then add 802.1q header + """ + eth = Ether(src='00:11:22:33:44:55', dst='00:55:44:33:22:11') + etype = 0x8137 # IPX + if vlan: + # add 802.1q layer + eth /= Dot1Q(vlan=vlan, type=etype) + else: + eth.type = etype + + p = (IPv6(src='1234::1', dst=dst_outer, nh=59) / eth) + return p + + def get_payload_info(self, packet): + """ Extract the payload_info from the packet + """ + # in most cases, payload_info is in packet[Raw] + # but packet[Raw] gives the complete payload + # (incl L2 header) for the T.Encaps L2 case + try: + payload_info = self.payload_to_info(str(packet[Raw])) + + except: + # remote L2 header from packet[Raw]: + # take packet[Raw], convert it to an Ether layer + # and then extract Raw from it + payload_info = self.payload_to_info( + str(Ether(str(packet[Raw]))[Raw])) + + return payload_info + + def verify_captured_pkts(self, dst_if, capture, compare_func): + """ + Verify captured packet stream for specified interface. + Compare ingress with egress packets using the specified compare fn + + :param dst_if: egress interface of DUT + :param capture: captured packets + :param compare_func: function to compare in and out packet + """ + self.logger.info("Verifying capture on interface %s using function %s" + % (dst_if.name, compare_func.func_name)) + + last_info = dict() + for i in self.pg_interfaces: + last_info[i.sw_if_index] = None + dst_sw_if_index = dst_if.sw_if_index + + for packet in capture: + try: + # extract payload_info from packet's payload + payload_info = self.get_payload_info(packet) + packet_index = payload_info.index + + self.logger.debug("Verifying packet with index %d" + % (packet_index)) + # packet should have arrived on the expected interface + self.assertEqual(payload_info.dst, dst_sw_if_index) + self.logger.debug( + "Got packet on interface %s: src=%u (idx=%u)" % + (dst_if.name, payload_info.src, packet_index)) + + # search for payload_info with same src and dst if_index + # this will give us the transmitted packet + next_info = self.get_next_packet_info_for_interface2( + payload_info.src, dst_sw_if_index, + last_info[payload_info.src]) + last_info[payload_info.src] = next_info + # next_info should not be None + self.assertTrue(next_info is not None) + # index of tx and rx packets should be equal + self.assertEqual(packet_index, next_info.index) + # data field of next_info contains the tx packet + txed_packet = next_info.data + + self.logger.debug(ppp("Transmitted packet:", + txed_packet)) # ppp=Pretty Print Packet + + self.logger.debug(ppp("Received packet:", packet)) + + # compare rcvd packet with expected packet using compare_func + compare_func(txed_packet, packet) + + except: + print packet.command() + self.logger.error(ppp("Unexpected or invalid packet:", packet)) + raise + + # have all expected packets arrived? + for i in self.pg_interfaces: + remaining_packet = self.get_next_packet_info_for_interface2( + i.sw_if_index, dst_sw_if_index, last_info[i.sw_if_index]) + self.assertTrue(remaining_packet is None, + "Interface %s: Packet expected from interface %s " + "didn't arrive" % (dst_if.name, i.name)) + + +if __name__ == '__main__': + unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 1daa2a9e..95de0be6 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -2108,3 +2108,121 @@ class VppPapiProvider(object): 'client_ip': client_ip, 'decap_vrf_id': decap_vrf_id, 'client_mac': client_mac}) + + def sr_localsid_add_del(self, + localsid_addr, + behavior, + nh_addr, + is_del=0, + end_psp=0, + sw_if_index=0xFFFFFFFF, + vlan_index=0, + fib_table=0, + ): + """ Add/del IPv6 SR local-SID. + + :param localsid_addr: + :param behavior: END=1; END.X=2; END.DX2=4; END.DX6=5; + :param behavior: END.DX4=6; END.DT6=7; END.DT4=8 + :param nh_addr: + :param is_del: (Default value = 0) + :param end_psp: (Default value = 0) + :param sw_if_index: (Default value = 0xFFFFFFFF) + :param vlan_index: (Default value = 0) + :param fib_table: (Default value = 0) + """ + return self.api( + self.papi.sr_localsid_add_del, + {'is_del': is_del, + 'localsid_addr': localsid_addr, + 'end_psp': end_psp, + 'behavior': behavior, + 'sw_if_index': sw_if_index, + 'vlan_index': vlan_index, + 'fib_table': fib_table, + 'nh_addr': nh_addr + } + ) + + def sr_policy_add( + self, + bsid_addr, + weight=1, + is_encap=1, + type=0, + fib_table=0, + n_segments=0, + segments=[]): + """ + :param bsid_addr: bindingSID of the SR Policy + :param weight: weight of the sid list. optional. (default: 1) + :param is_encap: (bool) whether SR policy should Encap or SRH insert \ + (default: Encap) + :param type: type/behavior of the SR policy. (default or spray) \ + (default: default) + :param fib_table: VRF where to install the FIB entry for the BSID \ + (default: 0) + :param n_segments: number of segments \ + (default: 0) + :param segments: a vector of IPv6 address composing the segment list \ + (default: []) + """ + return self.api( + self.papi.sr_policy_add, + {'bsid_addr': bsid_addr, + 'weight': weight, + 'is_encap': is_encap, + 'type': type, + 'fib_table': fib_table, + 'n_segments': n_segments, + 'segments': segments + } + ) + + def sr_policy_del( + self, + bsid_addr, + sr_policy_index=0): + """ + :param bsid: bindingSID of the SR Policy + :param sr_policy_index: index of the sr policy (default: 0) + """ + return self.api( + self.papi.sr_policy_del, + {'bsid_addr': bsid_addr, + 'sr_policy_index': sr_policy_index + }) + + def sr_steering_add_del( + self, + is_del, + bsid_addr, + sr_policy_index, + table_id, + prefix_addr, + mask_width, + sw_if_index, + traffic_type): + """ + Steer traffic L2 and L3 traffic through a given SR policy + + :param is_del: delete or add + :param bsid_addr: bindingSID of the SR Policy (alt to sr_policy_index) + :param sr_policy: is the index of the SR Policy (alt to bsid) + :param table_id: is the VRF where to install the FIB entry for the BSID + :param prefix_addr: is the IPv4/v6 address for L3 traffic type + :param mask_width: is the mask for L3 traffic type + :param sw_if_index: is the incoming interface for L2 traffic + :param traffic_type: type of traffic (IPv4: 4, IPv6: 6, L2: 2) + """ + return self.api( + self.papi.sr_steering_add_del, + {'is_del': is_del, + 'bsid_addr': bsid_addr, + 'sr_policy_index': sr_policy_index, + 'table_id': table_id, + 'prefix_addr': prefix_addr, + 'mask_width': mask_width, + 'sw_if_index': sw_if_index, + 'traffic_type': traffic_type + }) diff --git a/test/vpp_srv6.py b/test/vpp_srv6.py new file mode 100644 index 00000000..28ff4b85 --- /dev/null +++ b/test/vpp_srv6.py @@ -0,0 +1,238 @@ +""" + SRv6 LocalSIDs + + object abstractions for representing SRv6 localSIDs in VPP +""" + +from vpp_object import * +from socket import inet_pton, inet_ntop, AF_INET, AF_INET6 + + +class SRv6LocalSIDBehaviors(): + # from src/vnet/srv6/sr.h + SR_BEHAVIOR_END = 1 + SR_BEHAVIOR_X = 2 + SR_BEHAVIOR_T = 3 + SR_BEHAVIOR_D_FIRST = 4 # Unused. Separator in between regular and D + SR_BEHAVIOR_DX2 = 5 + SR_BEHAVIOR_DX6 = 6 + SR_BEHAVIOR_DX4 = 7 + SR_BEHAVIOR_DT6 = 8 + SR_BEHAVIOR_DT4 = 9 + SR_BEHAVIOR_LAST = 10 # Must always be the last one + + +class SRv6PolicyType(): + # from src/vnet/srv6/sr.h + SR_POLICY_TYPE_DEFAULT = 0 + SR_POLICY_TYPE_SPRAY = 1 + + +class SRv6PolicySteeringTypes(): + # from src/vnet/srv6/sr.h + SR_STEER_L2 = 2 + SR_STEER_IPV4 = 4 + SR_STEER_IPV6 = 6 + + +class VppSRv6LocalSID(VppObject): + """ + SRv6 LocalSID + """ + + def __init__(self, test, localsid_addr, behavior, nh_addr, end_psp, + sw_if_index, vlan_index, fib_table): + self._test = test + self.localsid_addr = localsid_addr + # keep binary format in _localsid_addr + self._localsid_addr = inet_pton(AF_INET6, self.localsid_addr) + self.behavior = behavior + self.nh_addr = nh_addr + # keep binary format in _nh_addr + if ':' in nh_addr: + # IPv6 + self._nh_addr = inet_pton(AF_INET6, nh_addr) + else: + # IPv4 + # API expects 16 octets (128 bits) + # last 4 octets are used for IPv4 + # --> prepend 12 octets + self._nh_addr = ('\x00' * 12) + inet_pton(AF_INET, nh_addr) + self.end_psp = end_psp + self.sw_if_index = sw_if_index + self.vlan_index = vlan_index + self.fib_table = fib_table + self._configured = False + + def add_vpp_config(self): + self._test.vapi.sr_localsid_add_del( + self._localsid_addr, + self.behavior, + self._nh_addr, + is_del=0, + end_psp=self.end_psp, + sw_if_index=self.sw_if_index, + vlan_index=self.vlan_index, + fib_table=self.fib_table) + self._configured = True + + def remove_vpp_config(self): + self._test.vapi.sr_localsid_add_del( + self._localsid_addr, + self.behavior, + self._nh_addr, + is_del=1, + end_psp=self.end_psp, + sw_if_index=self.sw_if_index, + vlan_index=self.vlan_index, + fib_table=self.fib_table) + self._configured = False + + def query_vpp_config(self): + # sr_localsids_dump API is disabled + # use _configured flag for now + return self._configured + + def __str__(self): + return self.object_id() + + def object_id(self): + return ("%d;%s,%d" + % (self.fib_table, + self.localsid_addr, + self.behavior)) + + +class VppSRv6Policy(VppObject): + """ + SRv6 Policy + """ + + def __init__(self, test, bsid, + is_encap, sr_type, weight, fib_table, + segments, source): + self._test = test + self.bsid = bsid + # keep binary format in _bsid + self._bsid = inet_pton(AF_INET6, bsid) + self.is_encap = is_encap + self.sr_type = sr_type + self.weight = weight + self.fib_table = fib_table + self.segments = segments + # keep binary format in _segments + self._segments = [] + for seg in segments: + self._segments.extend(inet_pton(AF_INET6, seg)) + self.n_segments = len(segments) + # source not passed to API + # self.source = inet_pton(AF_INET6, source) + self.source = source + self._configured = False + + def add_vpp_config(self): + self._test.vapi.sr_policy_add( + self._bsid, + self.weight, + self.is_encap, + self.sr_type, + self.fib_table, + self.n_segments, + self._segments) + self._configured = True + + def remove_vpp_config(self): + self._test.vapi.sr_policy_del( + self._bsid) + self._configured = False + + def query_vpp_config(self): + # no API to query SR Policies + # use _configured flag for now + return self._configured + + def __str__(self): + return self.object_id() + + def object_id(self): + return ("%d;%s-><%s>;%d" + % (self.sr_type, + self.bsid, + ','.join(self.segments), + self.is_encap)) + + +class VppSRv6Steering(VppObject): + """ + SRv6 Steering + """ + + def __init__(self, test, + bsid, + prefix, + mask_width, + traffic_type, + sr_policy_index, + table_id, + sw_if_index): + self._test = test + self.bsid = bsid + # keep binary format in _bsid + self._bsid = inet_pton(AF_INET6, bsid) + self.prefix = prefix + # keep binary format in _prefix + if ':' in prefix: + # IPv6 + self._prefix = inet_pton(AF_INET6, prefix) + else: + # IPv4 + # API expects 16 octets (128 bits) + # last 4 octets are used for IPv4 + # --> prepend 12 octets + self._prefix = ('\x00' * 12) + inet_pton(AF_INET, prefix) + self.mask_width = mask_width + self.traffic_type = traffic_type + self.sr_policy_index = sr_policy_index + self.sw_if_index = sw_if_index + self.table_id = table_id + self._configured = False + + def add_vpp_config(self): + self._test.vapi.sr_steering_add_del( + 0, + self._bsid, + self.sr_policy_index, + self.table_id, + self._prefix, + self.mask_width, + self.sw_if_index, + self.traffic_type) + self._configured = True + + def remove_vpp_config(self): + self._test.vapi.sr_steering_add_del( + 1, + self._bsid, + self.sr_policy_index, + self.table_id, + self._prefix, + self.mask_width, + self.sw_if_index, + self.traffic_type) + self._configured = False + + def query_vpp_config(self): + # no API to query steering entries + # use _configured flag for now + return self._configured + + def __str__(self): + return self.object_id() + + def object_id(self): + return ("%d;%d;%s/%d->%s" + % (self.table_id, + self.traffic_type, + self.prefix, + self.mask_width, + self.bsid)) -- cgit 1.2.3-korg From 1500254bee11355bbd69cc1dd9705be4f002f2bd Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Sun, 10 Sep 2017 04:39:11 -0700 Subject: FIB table add/delete API part 2; - this adds the code to create an IP and MPLS table via the API. - but the enforcement that the table must be created before it is used is still missing, this is so that CSIT can pass. Change-Id: Id124d884ade6cb7da947225200e3bb193454c555 Signed-off-by: Neale Ranns --- src/plugins/nat/nat.c | 17 +- src/plugins/nat/nat64.c | 13 +- src/vnet/classify/vnet_classify.c | 16 +- src/vnet/dhcp/dhcp4_proxy_node.c | 9 +- src/vnet/dhcp/dhcp6_proxy_node.c | 9 +- src/vnet/dhcp/dhcp_proxy.c | 19 ++- src/vnet/dpo/lookup_dpo.c | 20 ++- src/vnet/dpo/mpls_label_dpo.c | 12 +- src/vnet/ethernet/arp.c | 127 +++++++++++---- src/vnet/fib/fib_api.h | 1 - src/vnet/fib/fib_entry.c | 15 +- src/vnet/fib/fib_entry.h | 1 + src/vnet/fib/fib_entry_src_mpls.c | 7 +- src/vnet/fib/fib_table.c | 43 +++-- src/vnet/fib/fib_table.h | 32 +++- src/vnet/fib/fib_test.c | 27 ++-- src/vnet/fib/ip4_fib.c | 41 +++-- src/vnet/fib/ip4_fib.h | 5 +- src/vnet/fib/ip6_fib.c | 41 +++-- src/vnet/fib/ip6_fib.h | 5 +- src/vnet/fib/mpls_fib.c | 16 +- src/vnet/fib/mpls_fib.h | 5 +- src/vnet/interface_api.c | 177 ++++++++++++++++---- src/vnet/ip/ip.h | 7 + src/vnet/ip/ip4.h | 13 ++ src/vnet/ip/ip4_forward.c | 101 +----------- src/vnet/ip/ip4_source_and_port_range_check.c | 11 +- src/vnet/ip/ip6.h | 13 ++ src/vnet/ip/ip6_forward.c | 103 +----------- src/vnet/ip/ip6_neighbor.c | 108 +++++++++---- src/vnet/ip/ip_api.c | 122 +++++++++++--- src/vnet/ip/lookup.c | 225 ++++++++++++++++++++++++++ src/vnet/lisp-gpe/interface.c | 11 +- src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c | 9 +- src/vnet/lisp-gpe/lisp_gpe_sub_interface.c | 11 +- src/vnet/mfib/ip4_mfib.c | 12 +- src/vnet/mfib/ip4_mfib.h | 5 +- src/vnet/mfib/ip6_mfib.c | 12 +- src/vnet/mfib/ip6_mfib.h | 5 +- src/vnet/mfib/mfib_entry.c | 11 ++ src/vnet/mfib/mfib_entry.h | 2 + src/vnet/mfib/mfib_table.c | 88 ++++++++-- src/vnet/mfib/mfib_table.h | 29 +++- src/vnet/mfib/mfib_test.c | 11 +- src/vnet/mfib/mfib_types.h | 8 +- src/vnet/mpls/interface.c | 26 ++- src/vnet/mpls/mpls.c | 76 ++++++++- src/vnet/mpls/mpls.h | 16 +- src/vnet/mpls/mpls_api.c | 66 ++++++-- src/vnet/srv6/sr_policy_rewrite.c | 6 +- src/vnet/srv6/sr_steering.c | 6 +- src/vpp/api/api.c | 5 +- src/vpp/api/custom_dump.c | 3 - test/test_dhcp.py | 24 ++- test/test_gre.py | 8 +- test/test_ip4.py | 11 +- test/test_ip4_vrf_multi_instance.py | 4 +- test/test_ip6.py | 7 +- test/test_ip6_vrf_multi_instance.py | 4 +- test/test_ip_mcast.py | 98 ++++++++++- test/test_mpls.py | 48 +++++- test/test_nat.py | 13 ++ test/test_neighbor.py | 66 +++++++- test/vpp_ip_route.py | 73 +++++++++ test/vpp_papi_provider.py | 46 ++++-- 65 files changed, 1643 insertions(+), 538 deletions(-) (limited to 'src/vnet/srv6') diff --git a/src/plugins/nat/nat.c b/src/plugins/nat/nat.c index aa7ef10a..8aecac6d 100644 --- a/src/plugins/nat/nat.c +++ b/src/plugins/nat/nat.c @@ -167,7 +167,8 @@ void snat_add_address (snat_main_t *sm, ip4_address_t *addr, u32 vrf_id) ap->addr = *addr; if (vrf_id != ~0) ap->fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, vrf_id); + fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, vrf_id, + FIB_SOURCE_PLUGIN_HI); else ap->fib_index = ~0; #define _(N, i, n, s) \ @@ -625,7 +626,8 @@ int nat44_add_del_lb_static_mapping (ip4_address_t e_addr, u16 e_port, return VNET_API_ERROR_INVALID_VALUE; fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, - vrf_id); + vrf_id, + FIB_SOURCE_PLUGIN_HI); /* Find external address in allocated addresses and reserve port for address and port pair mapping when dynamic translations enabled */ @@ -754,7 +756,7 @@ int nat44_add_del_lb_static_mapping (ip4_address_t e_addr, u16 e_port, if (!m) return VNET_API_ERROR_NO_SUCH_ENTRY; - fib_table_unlock (m->fib_index, FIB_PROTOCOL_IP4); + fib_table_unlock (m->fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_PLUGIN_HI); /* Free external address port */ if (!sm->static_mapping_only) @@ -874,7 +876,8 @@ int snat_del_address (snat_main_t *sm, ip4_address_t addr, u8 delete_sm) } if (a->fib_index != ~0) - fib_table_unlock(a->fib_index, FIB_PROTOCOL_IP4); + fib_table_unlock(a->fib_index, FIB_PROTOCOL_IP4, + FIB_SOURCE_PLUGIN_HI); /* Delete sessions using address */ if (a->busy_tcp_ports || a->busy_udp_ports || a->busy_icmp_ports) @@ -2151,10 +2154,12 @@ snat_config (vlib_main_t * vm, unformat_input_t * input) sm->max_translations_per_user = max_translations_per_user; sm->outside_vrf_id = outside_vrf_id; sm->outside_fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, - outside_vrf_id); + outside_vrf_id, + FIB_SOURCE_PLUGIN_HI); sm->inside_vrf_id = inside_vrf_id; sm->inside_fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, - inside_vrf_id); + inside_vrf_id, + FIB_SOURCE_PLUGIN_HI); sm->static_mapping_only = static_mapping_only; sm->static_mapping_connection_tracking = static_mapping_connection_tracking; diff --git a/src/plugins/nat/nat64.c b/src/plugins/nat/nat64.c index b04901fa..bfcfa9b3 100644 --- a/src/plugins/nat/nat64.c +++ b/src/plugins/nat/nat64.c @@ -107,7 +107,8 @@ nat64_add_del_pool_addr (ip4_address_t * addr, u32 vrf_id, u8 is_add) a->fib_index = 0; if (vrf_id != ~0) a->fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id); + fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id, + FIB_SOURCE_PLUGIN_HI); #define _(N, i, n, s) \ clib_bitmap_alloc (a->busy_##n##_port_bitmap, 65535); foreach_snat_protocol @@ -119,7 +120,8 @@ nat64_add_del_pool_addr (ip4_address_t * addr, u32 vrf_id, u8 is_add) return VNET_API_ERROR_NO_SUCH_ENTRY; if (a->fib_index) - fib_table_unlock (a->fib_index, FIB_PROTOCOL_IP6); + fib_table_unlock (a->fib_index, FIB_PROTOCOL_IP6, + FIB_SOURCE_PLUGIN_HI); #define _(N, id, n, s) \ clib_bitmap_free (a->busy_##n##_port_bitmap); @@ -353,8 +355,8 @@ nat64_add_del_static_bib_entry (ip6_address_t * in_addr, { nat64_main_t *nm = &nat64_main; nat64_db_bib_entry_t *bibe; - u32 fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id); + u32 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id, + FIB_SOURCE_PLUGIN_HI); snat_protocol_t p = ip_proto_to_snat_proto (proto); ip46_address_t addr; int i; @@ -644,7 +646,8 @@ nat64_add_del_prefix (ip6_address_t * prefix, u8 plen, u32 vrf_id, u8 is_add) { vec_add2 (nm->pref64, p, 1); p->fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id); + fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, vrf_id, + FIB_SOURCE_PLUGIN_HI); p->vrf_id = vrf_id; } diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 879fba3c..57d86748 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -368,10 +368,10 @@ vnet_classify_entry_claim_resource (vnet_classify_entry_t *e) switch (e->action) { case CLASSIFY_ACTION_SET_IP4_FIB_INDEX: - fib_table_lock (e->metadata, FIB_PROTOCOL_IP4); + fib_table_lock (e->metadata, FIB_PROTOCOL_IP4, FIB_SOURCE_CLASSIFY); break; case CLASSIFY_ACTION_SET_IP6_FIB_INDEX: - fib_table_lock (e->metadata, FIB_PROTOCOL_IP6); + fib_table_lock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY); break; } } @@ -382,10 +382,10 @@ vnet_classify_entry_release_resource (vnet_classify_entry_t *e) switch (e->action) { case CLASSIFY_ACTION_SET_IP4_FIB_INDEX: - fib_table_unlock (e->metadata, FIB_PROTOCOL_IP4); + fib_table_unlock (e->metadata, FIB_PROTOCOL_IP4, FIB_SOURCE_CLASSIFY); break; case CLASSIFY_ACTION_SET_IP6_FIB_INDEX: - fib_table_unlock (e->metadata, FIB_PROTOCOL_IP6); + fib_table_unlock (e->metadata, FIB_PROTOCOL_IP6, FIB_SOURCE_CLASSIFY); break; } } @@ -2096,9 +2096,13 @@ int vnet_classify_add_del_session (vnet_classify_main_t * cm, e->flags = 0; e->action = action; if (e->action == CLASSIFY_ACTION_SET_IP4_FIB_INDEX) - e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, metadata); + e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, + metadata, + FIB_SOURCE_CLASSIFY); else if (e->action == CLASSIFY_ACTION_SET_IP6_FIB_INDEX) - e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, metadata); + e->metadata = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, + metadata, + FIB_SOURCE_CLASSIFY); else e->metadata = 0; diff --git a/src/vnet/dhcp/dhcp4_proxy_node.c b/src/vnet/dhcp/dhcp4_proxy_node.c index 1b59cdea..339a7885 100644 --- a/src/vnet/dhcp/dhcp4_proxy_node.c +++ b/src/vnet/dhcp/dhcp4_proxy_node.c @@ -785,7 +785,8 @@ dhcp4_proxy_set_server (ip46_address_t *addr, return VNET_API_ERROR_INVALID_SRC_ADDRESS; rx_fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, - rx_table_id); + rx_table_id, + FIB_SOURCE_DHCP); if (is_del) { @@ -795,7 +796,7 @@ dhcp4_proxy_set_server (ip46_address_t *addr, fib_table_entry_special_remove(rx_fib_index, &all_1s, FIB_SOURCE_DHCP); - fib_table_unlock (rx_fib_index, FIB_PROTOCOL_IP4); + fib_table_unlock (rx_fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_DHCP); } } else @@ -808,10 +809,10 @@ dhcp4_proxy_set_server (ip46_address_t *addr, &all_1s, FIB_SOURCE_DHCP, FIB_ENTRY_FLAG_LOCAL); - fib_table_lock (rx_fib_index, FIB_PROTOCOL_IP4); + fib_table_lock (rx_fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_DHCP); } } - fib_table_unlock (rx_fib_index, FIB_PROTOCOL_IP4); + fib_table_unlock (rx_fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_DHCP); return (rc); } diff --git a/src/vnet/dhcp/dhcp6_proxy_node.c b/src/vnet/dhcp/dhcp6_proxy_node.c index 9c2f5220..ce7a8fca 100644 --- a/src/vnet/dhcp/dhcp6_proxy_node.c +++ b/src/vnet/dhcp/dhcp6_proxy_node.c @@ -841,7 +841,8 @@ dhcp6_proxy_set_server (ip46_address_t *addr, return VNET_API_ERROR_INVALID_SRC_ADDRESS; rx_fib_index = mfib_table_find_or_create_and_lock(FIB_PROTOCOL_IP6, - rx_table_id); + rx_table_id, + MFIB_SOURCE_DHCP); if (is_del) { @@ -851,7 +852,7 @@ dhcp6_proxy_set_server (ip46_address_t *addr, mfib_table_entry_delete(rx_fib_index, &all_dhcp_servers, MFIB_SOURCE_DHCP); - mfib_table_unlock(rx_fib_index, FIB_PROTOCOL_IP6); + mfib_table_unlock(rx_fib_index, FIB_PROTOCOL_IP6, MFIB_SOURCE_DHCP); } } else @@ -885,11 +886,11 @@ dhcp6_proxy_set_server (ip46_address_t *addr, MFIB_SOURCE_DHCP, MFIB_RPF_ID_NONE, MFIB_ENTRY_FLAG_ACCEPT_ALL_ITF); - mfib_table_lock(rx_fib_index, FIB_PROTOCOL_IP6); + mfib_table_lock(rx_fib_index, FIB_PROTOCOL_IP6, MFIB_SOURCE_DHCP); } } - mfib_table_unlock(rx_fib_index, FIB_PROTOCOL_IP6); + mfib_table_unlock(rx_fib_index, FIB_PROTOCOL_IP6, MFIB_SOURCE_DHCP); return (rc); } diff --git a/src/vnet/dhcp/dhcp_proxy.c b/src/vnet/dhcp/dhcp_proxy.c index ba7f354e..1784906b 100644 --- a/src/vnet/dhcp/dhcp_proxy.c +++ b/src/vnet/dhcp/dhcp_proxy.c @@ -29,9 +29,9 @@ dhcp_proxy_rx_table_lock (fib_protocol_t proto, u32 fib_index) { if (FIB_PROTOCOL_IP4 == proto) - fib_table_lock(fib_index, proto); + fib_table_lock(fib_index, proto, FIB_SOURCE_DHCP); else - mfib_table_lock(fib_index, proto); + mfib_table_lock(fib_index, proto, MFIB_SOURCE_DHCP); } static void @@ -39,9 +39,9 @@ dhcp_proxy_rx_table_unlock (fib_protocol_t proto, u32 fib_index) { if (FIB_PROTOCOL_IP4 == proto) - fib_table_unlock(fib_index, proto); + fib_table_unlock(fib_index, proto, FIB_SOURCE_DHCP); else - mfib_table_unlock(fib_index, proto); + mfib_table_unlock(fib_index, proto, MFIB_SOURCE_DHCP); } u32 @@ -169,7 +169,7 @@ dhcp_proxy_server_del (fib_protocol_t proto, if (~0 != index) { server = &proxy->dhcp_servers[index]; - fib_table_unlock (server->server_fib_index, proto); + fib_table_unlock (server->server_fib_index, proto, FIB_SOURCE_DHCP); vec_del1(proxy->dhcp_servers, index); @@ -228,7 +228,8 @@ dhcp_proxy_server_add (fib_protocol_t proto, dhcp_server_t server = { .dhcp_server = *addr, .server_fib_index = fib_table_find_or_create_and_lock(proto, - server_table_id), + server_table_id, + FIB_SOURCE_DHCP), }; vec_add1(proxy->dhcp_servers, server); @@ -297,9 +298,11 @@ int dhcp_proxy_set_vss (fib_protocol_t proto, int rc = 0; if (proto == FIB_PROTOCOL_IP4) - rx_fib_index = fib_table_find_or_create_and_lock(proto, tbl_id); + rx_fib_index = fib_table_find_or_create_and_lock(proto, tbl_id, + FIB_SOURCE_DHCP); else - rx_fib_index = mfib_table_find_or_create_and_lock(proto, tbl_id); + rx_fib_index = mfib_table_find_or_create_and_lock(proto, tbl_id, + MFIB_SOURCE_DHCP); v = dhcp_get_vss_info(dm, rx_fib_index, proto); if (NULL != v) diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c index 26363a2f..af189eda 100644 --- a/src/vnet/dpo/lookup_dpo.c +++ b/src/vnet/dpo/lookup_dpo.c @@ -135,11 +135,15 @@ lookup_dpo_add_or_lock_w_fib_index (fib_node_index_t fib_index, { if (LOOKUP_UNICAST == cast) { - fib_table_lock(fib_index, dpo_proto_to_fib(proto)); + fib_table_lock(fib_index, + dpo_proto_to_fib(proto), + FIB_SOURCE_RR); } else { - mfib_table_lock(fib_index, dpo_proto_to_fib(proto)); + mfib_table_lock(fib_index, + dpo_proto_to_fib(proto), + MFIB_SOURCE_RR); } } lookup_dpo_add_or_lock_i(fib_index, proto, cast, input, table_config, dpo); @@ -161,13 +165,15 @@ lookup_dpo_add_or_lock_w_table_id (u32 table_id, { fib_index = fib_table_find_or_create_and_lock(dpo_proto_to_fib(proto), - table_id); + table_id, + FIB_SOURCE_RR); } else { fib_index = mfib_table_find_or_create_and_lock(dpo_proto_to_fib(proto), - table_id); + table_id, + MFIB_SOURCE_RR); } } @@ -238,12 +244,14 @@ lookup_dpo_unlock (dpo_id_t *dpo) if (LOOKUP_UNICAST == lkd->lkd_cast) { fib_table_unlock(lkd->lkd_fib_index, - dpo_proto_to_fib(lkd->lkd_proto)); + dpo_proto_to_fib(lkd->lkd_proto), + FIB_SOURCE_RR); } else { mfib_table_unlock(lkd->lkd_fib_index, - dpo_proto_to_fib(lkd->lkd_proto)); + dpo_proto_to_fib(lkd->lkd_proto), + MFIB_SOURCE_RR); } } pool_put(lookup_dpo_pool, lkd); diff --git a/src/vnet/dpo/mpls_label_dpo.c b/src/vnet/dpo/mpls_label_dpo.c index b178a902..2a6e7dd5 100644 --- a/src/vnet/dpo/mpls_label_dpo.c +++ b/src/vnet/dpo/mpls_label_dpo.c @@ -105,10 +105,18 @@ format_mpls_label_dpo (u8 *s, va_list *args) mpls_label_dpo_t *mld; u32 ii; - mld = mpls_label_dpo_get(index); - s = format(s, "mpls-label:[%d]:", index); + if (pool_is_free_index(mpls_label_dpo_pool, index)) + { + /* + * the packet trace can be printed after the DPO has been deleted + */ + return (s); + } + + mld = mpls_label_dpo_get(index); + for (ii = 0; ii < mld->mld_n_labels; ii++) { hdr.label_exp_s_ttl = diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index c84ff47b..08e91373 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -522,6 +522,24 @@ arp_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai) } } +static void +arp_adj_fib_add (ethernet_arp_ip4_entry_t * e, uint32_t fib_index) +{ + fib_prefix_t pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4 = e->ip4_address, + }; + + e->fib_entry_index = + fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, + FIB_ENTRY_FLAG_ATTACHED, + DPO_PROTO_IP4, &pfx.fp_addr, + e->sw_if_index, ~0, 1, NULL, + FIB_ROUTE_PATH_FLAG_NONE); + fib_table_lock (fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_ADJ); +} + int vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm, vnet_arp_set_ip4_over_ethernet_rpc_args_t @@ -576,21 +594,9 @@ vnet_arp_set_ip4_over_ethernet_internal (vnet_main_t * vnm, if (!is_no_fib_entry) { - fib_prefix_t pfx = { - .fp_len = 32, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4 = a->ip4, - }; - u32 fib_index; - - fib_index = - ip4_fib_table_get_index_for_sw_if_index (e->sw_if_index); - e->fib_entry_index = - fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_ENTRY_FLAG_ATTACHED, - DPO_PROTO_IP4, &pfx.fp_addr, - e->sw_if_index, ~0, 1, NULL, - FIB_ROUTE_PATH_FLAG_NONE); + arp_adj_fib_add (e, + ip4_fib_table_get_index_for_sw_if_index + (e->sw_if_index)); } else { @@ -1561,6 +1567,65 @@ arp_add_del_interface_address (ip4_main_t * im, } } +void +arp_adj_fib_remove (ethernet_arp_ip4_entry_t * e, uint32_t fib_index) +{ + if (FIB_NODE_INDEX_INVALID != e->fib_entry_index) + { + fib_prefix_t pfx = { + .fp_len = 32, + .fp_proto = FIB_PROTOCOL_IP4, + .fp_addr.ip4 = e->ip4_address, + }; + u32 fib_index; + + fib_index = ip4_fib_table_get_index_for_sw_if_index (e->sw_if_index); + + fib_table_entry_path_remove (fib_index, &pfx, + FIB_SOURCE_ADJ, + DPO_PROTO_IP4, + &pfx.fp_addr, + e->sw_if_index, ~0, 1, + FIB_ROUTE_PATH_FLAG_NONE); + fib_table_unlock (fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_ADJ); + } +} + +static void +arp_table_bind (ip4_main_t * im, + uword opaque, + u32 sw_if_index, u32 new_fib_index, u32 old_fib_index) +{ + ethernet_arp_main_t *am = ðernet_arp_main; + ethernet_arp_interface_t *eai; + ethernet_arp_ip4_entry_t *e; + hash_pair_t *pair; + + /* + * the IP table that the interface is bound to has changed. + * reinstall all the adj fibs. + */ + + if (vec_len (am->ethernet_arp_by_sw_if_index) <= sw_if_index) + return; + + eai = &am->ethernet_arp_by_sw_if_index[sw_if_index]; + + /* *INDENT-OFF* */ + hash_foreach_pair (pair, eai->arp_entries, + ({ + e = pool_elt_at_index(am->ip4_entry_pool, + pair->value[0]); + /* + * remove the adj-fib from the old table and add to the new + */ + arp_adj_fib_remove(e, old_fib_index); + arp_adj_fib_add(e, new_fib_index); + })); + /* *INDENT-ON* */ + +} + static clib_error_t * ethernet_arp_init (vlib_main_t * vm) { @@ -1606,6 +1671,11 @@ ethernet_arp_init (vlib_main_t * vm) cb.function_opaque = 0; vec_add1 (im->add_del_interface_address_callbacks, cb); + ip4_table_bind_callback_t cbt; + cbt.function = arp_table_bind; + cbt.function_opaque = 0; + vec_add1 (im->table_bind_callbacks, cbt); + return 0; } @@ -1616,24 +1686,9 @@ arp_entry_free (ethernet_arp_interface_t * eai, ethernet_arp_ip4_entry_t * e) { ethernet_arp_main_t *am = ðernet_arp_main; - if (FIB_NODE_INDEX_INVALID != e->fib_entry_index) - { - fib_prefix_t pfx = { - .fp_len = 32, - .fp_proto = FIB_PROTOCOL_IP4, - .fp_addr.ip4 = e->ip4_address, - }; - u32 fib_index; - - fib_index = ip4_fib_table_get_index_for_sw_if_index (e->sw_if_index); - - fib_table_entry_path_remove (fib_index, &pfx, - FIB_SOURCE_ADJ, - DPO_PROTO_IP4, - &pfx.fp_addr, - e->sw_if_index, ~0, 1, - FIB_ROUTE_PATH_FLAG_NONE); - } + arp_adj_fib_remove (e, + ip4_fib_table_get_index_for_sw_if_index + (e->sw_if_index)); hash_unset (eai->arp_entries, e->ip4_address.as_u32); pool_put (am->ip4_entry_pool, e); } @@ -1693,7 +1748,11 @@ vnet_arp_flush_ip4_over_ethernet_internal (vnet_main_t * vnm, * does in response to interface events. unset is only done * by the control plane. */ - if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC) + if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_STATIC) + { + e->flags &= ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC; + } + else if (e->flags & ETHERNET_ARP_IP4_ENTRY_FLAG_DYNAMIC) { arp_entry_free (eai, e); } diff --git a/src/vnet/fib/fib_api.h b/src/vnet/fib/fib_api.h index d07d6cae..f5a107ca 100644 --- a/src/vnet/fib/fib_api.h +++ b/src/vnet/fib/fib_api.h @@ -23,7 +23,6 @@ add_del_route_check (fib_protocol_t table_proto, u32 next_hop_sw_if_index, dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, - u8 create_missing_tables, u8 is_rpf_id, u32 * fib_index, u32 * next_hop_fib_index); diff --git a/src/vnet/fib/fib_entry.c b/src/vnet/fib/fib_entry.c index 2027f2be..4cb6cf60 100644 --- a/src/vnet/fib/fib_entry.c +++ b/src/vnet/fib/fib_entry.c @@ -89,6 +89,17 @@ fib_entry_get_default_chain_type (const fib_entry_t *fib_entry) return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); } +u8 * +format_fib_source (u8 * s, va_list * args) +{ + fib_source_t source = va_arg (*args, int); + + s = format (s, "\n src:%s ", + fib_source_names[source]); + + return (s); +} + u8 * format_fib_entry (u8 * s, va_list * args) { @@ -114,8 +125,8 @@ format_fib_entry (u8 * s, va_list * args) FOR_EACH_SRC_ADDED(fib_entry, src, source, ({ - s = format (s, "\n src:%s ", - fib_source_names[source]); + s = format (s, "\n src:%U ", + format_fib_source, source); s = fib_entry_src_format(fib_entry, source, s); s = format (s, " refs:%d ", src->fes_ref_count); if (FIB_ENTRY_FLAG_NONE != src->fes_entry_flags) { diff --git a/src/vnet/fib/fib_entry.h b/src/vnet/fib/fib_entry.h index 93b8016d..2f6e37fe 100644 --- a/src/vnet/fib/fib_entry.h +++ b/src/vnet/fib/fib_entry.h @@ -431,6 +431,7 @@ typedef struct fib_entry_t_ { #define FIB_ENTRY_FORMAT_DETAIL2 (0x2) extern u8 *format_fib_entry (u8 * s, va_list * args); +extern u8 *format_fib_source (u8 * s, va_list * args); extern fib_node_index_t fib_entry_create_special(u32 fib_index, const fib_prefix_t *prefix, diff --git a/src/vnet/fib/fib_entry_src_mpls.c b/src/vnet/fib/fib_entry_src_mpls.c index a616458f..6fdd5c0a 100644 --- a/src/vnet/fib/fib_entry_src_mpls.c +++ b/src/vnet/fib/fib_entry_src_mpls.c @@ -94,7 +94,9 @@ fib_entry_src_mpls_set_data (fib_entry_src_t *src, fib_table_entry_delete_index(src->mpls.fesm_lfes[eos], FIB_SOURCE_SPECIAL); } - fib_table_unlock(MPLS_FIB_DEFAULT_TABLE_ID, FIB_PROTOCOL_MPLS); + fib_table_unlock(MPLS_FIB_DEFAULT_TABLE_ID, + FIB_PROTOCOL_MPLS, + FIB_SOURCE_MPLS); src->mpls.fesm_label = label; } else @@ -113,7 +115,8 @@ fib_entry_src_mpls_set_data (fib_entry_src_t *src, { fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_MPLS, - MPLS_FIB_DEFAULT_TABLE_ID); + MPLS_FIB_DEFAULT_TABLE_ID, + FIB_SOURCE_MPLS); } else { diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index 6b6cc5cb..75d15628 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -1039,7 +1039,8 @@ fib_table_find (fib_protocol_t proto, u32 fib_table_find_or_create_and_lock (fib_protocol_t proto, - u32 table_id) + u32 table_id, + fib_source_t src) { fib_table_t *fib_table; fib_node_index_t fi; @@ -1047,13 +1048,13 @@ fib_table_find_or_create_and_lock (fib_protocol_t proto, switch (proto) { case FIB_PROTOCOL_IP4: - fi = ip4_fib_table_find_or_create_and_lock(table_id); + fi = ip4_fib_table_find_or_create_and_lock(table_id, src); break; case FIB_PROTOCOL_IP6: - fi = ip6_fib_table_find_or_create_and_lock(table_id); + fi = ip6_fib_table_find_or_create_and_lock(table_id, src); break; case FIB_PROTOCOL_MPLS: - fi = mpls_fib_table_find_or_create_and_lock(table_id); + fi = mpls_fib_table_find_or_create_and_lock(table_id, src); break; default: return (~0); @@ -1070,6 +1071,7 @@ fib_table_find_or_create_and_lock (fib_protocol_t proto, u32 fib_table_create_and_lock (fib_protocol_t proto, + fib_source_t src, const char *const fmt, ...) { @@ -1082,13 +1084,13 @@ fib_table_create_and_lock (fib_protocol_t proto, switch (proto) { case FIB_PROTOCOL_IP4: - fi = ip4_fib_table_create_and_lock(); + fi = ip4_fib_table_create_and_lock(src); break; case FIB_PROTOCOL_IP6: - fi = ip6_fib_table_create_and_lock(); + fi = ip6_fib_table_create_and_lock(src); break; case FIB_PROTOCOL_MPLS: - fi = mpls_fib_table_create_and_lock(); + fi = mpls_fib_table_create_and_lock(src); break; default: return (~0); @@ -1143,26 +1145,43 @@ fib_table_walk (u32 fib_index, void fib_table_unlock (u32 fib_index, - fib_protocol_t proto) + fib_protocol_t proto, + fib_source_t source) { fib_table_t *fib_table; fib_table = fib_table_get(fib_index, proto); - fib_table->ft_locks--; + fib_table->ft_locks[source]--; + fib_table->ft_locks[FIB_TABLE_TOTAL_LOCKS]--; - if (0 == fib_table->ft_locks) + if (0 == fib_table->ft_locks[source]) { + /* + * The source no longer needs the table. flush any routes + * from it just in case + */ + fib_table_flush(fib_index, proto, source); + } + + if (0 == fib_table->ft_locks[FIB_TABLE_TOTAL_LOCKS]) + { + /* + * no more locak from any source - kill it + */ fib_table_destroy(fib_table); } } + void fib_table_lock (u32 fib_index, - fib_protocol_t proto) + fib_protocol_t proto, + fib_source_t source) { fib_table_t *fib_table; fib_table = fib_table_get(fib_index, proto); - fib_table->ft_locks++; + fib_table->ft_locks[source]++; + fib_table->ft_locks[FIB_TABLE_TOTAL_LOCKS]++; } u32 diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h index 579740e9..6b7011b3 100644 --- a/src/vnet/fib/fib_table.h +++ b/src/vnet/fib/fib_table.h @@ -22,6 +22,12 @@ #include #include +/** + * Keep a lock per-source and a total + */ +#define FIB_TABLE_N_LOCKS (FIB_SOURCE_MAX+1) +#define FIB_TABLE_TOTAL_LOCKS FIB_SOURCE_MAX + /** * @brief * A protocol Independent FIB table @@ -34,9 +40,9 @@ typedef struct fib_table_t_ fib_protocol_t ft_proto; /** - * number of locks on the table + * per-source number of locks on the table */ - u16 ft_locks; + u16 ft_locks[FIB_TABLE_N_LOCKS]; /** * Table ID (hash key) for this FIB. @@ -628,9 +634,13 @@ extern u32 fib_table_find(fib_protocol_t proto, u32 table_id); * * @return fib_index * The index of the FIB + * + * @param source + * The ID of the client/source. */ extern u32 fib_table_find_or_create_and_lock(fib_protocol_t proto, - u32 table_id); + u32 table_id, + fib_source_t source); /** * @brief @@ -643,10 +653,14 @@ extern u32 fib_table_find_or_create_and_lock(fib_protocol_t proto, * @param fmt * A string to describe the table * + * @param source + * The ID of the client/source. + * * @return fib_index * The index of the FIB */ extern u32 fib_table_create_and_lock(fib_protocol_t proto, + fib_source_t source, const char *const fmt, ...); @@ -704,9 +718,13 @@ extern void fib_table_set_flow_hash_config(u32 fib_index, * * @paran proto * The protocol of the FIB (and thus the entries therein) + * + * @param source + * The ID of the client/source. */ extern void fib_table_unlock(u32 fib_index, - fib_protocol_t proto); + fib_protocol_t proto, + fib_source_t source); /** * @brief @@ -718,9 +736,13 @@ extern void fib_table_unlock(u32 fib_index, * * @paran proto * The protocol of the FIB (and thus the entries therein) + * + * @param source + * The ID of the client/source. */ extern void fib_table_lock(u32 fib_index, - fib_protocol_t proto); + fib_protocol_t proto, + fib_source_t source); /** * @brief diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index 6867cca8..572d7f0d 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -739,7 +739,8 @@ fib_test_v4 (void) lb_count = pool_elts(load_balance_pool); /* Find or create FIB table 11 */ - fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11); + fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11, + FIB_SOURCE_API); for (ii = 0; ii < 4; ii++) { @@ -4150,7 +4151,7 @@ fib_test_v4 (void) FIB_SOURCE_INTERFACE)), "NO INterface Source'd prefixes"); - fib_table_unlock(fib_index, FIB_PROTOCOL_IP4); + fib_table_unlock(fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_API); FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d", fib_path_list_db_size()); @@ -4201,7 +4202,8 @@ fib_test_v6 (void) dpo_drop = drop_dpo_get(DPO_PROTO_IP6); /* Find or create FIB table 11 */ - fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP6, 11); + fib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP6, 11, + FIB_SOURCE_API); for (ii = 0; ii < 4; ii++) { @@ -5025,7 +5027,7 @@ fib_test_v6 (void) /* * now remove the VRF */ - fib_table_unlock(fib_index, FIB_PROTOCOL_IP6); + fib_table_unlock(fib_index, FIB_PROTOCOL_IP6, FIB_SOURCE_API); FIB_TEST((0 == fib_path_list_db_size()), "path list DB population:%d", fib_path_list_db_size()); @@ -5157,7 +5159,9 @@ fib_test_ae (void) */ u32 import_fib_index1; - import_fib_index1 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 11); + import_fib_index1 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, + 11, + FIB_SOURCE_CLI); /* * Add an attached route in the import FIB @@ -5233,7 +5237,8 @@ fib_test_ae (void) */ u32 import_fib_index2; - import_fib_index2 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 12); + import_fib_index2 = fib_table_find_or_create_and_lock(FIB_PROTOCOL_IP4, 12, + FIB_SOURCE_CLI); /* * Add an attached route in the import FIB @@ -5595,8 +5600,8 @@ fib_test_ae (void) &local_pfx, FIB_SOURCE_API); - fib_table_unlock(import_fib_index1, FIB_PROTOCOL_IP4); - fib_table_unlock(import_fib_index2, FIB_PROTOCOL_IP4); + fib_table_unlock(import_fib_index1, FIB_PROTOCOL_IP4, FIB_SOURCE_CLI); + fib_table_unlock(import_fib_index2, FIB_PROTOCOL_IP4, FIB_SOURCE_CLI); FIB_TEST((0 == adj_nbr_db_size()), "ADJ DB size is %d", adj_nbr_db_size()); @@ -8168,9 +8173,10 @@ lfib_test (void) /* * MPLS enable an interface so we get the MPLS table created */ + mpls_table_create(MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_API); mpls_sw_interface_enable_disable(&mpls_main, tm->hw[0]->sw_if_index, - 1); + 1, 1); ip46_address_t nh_10_10_10_1 = { .ip4.as_u32 = clib_host_to_net_u32(0x0a0a0a01), @@ -8662,7 +8668,8 @@ lfib_test (void) */ mpls_sw_interface_enable_disable(&mpls_main, tm->hw[0]->sw_if_index, - 0); + 0, 1); + mpls_table_delete(MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_API); FIB_TEST(lb_count == pool_elts(load_balance_pool), "Load-balance resources freed %d of %d", diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c index d563bafd..865e2dd5 100644 --- a/src/vnet/fib/ip4_fib.c +++ b/src/vnet/fib/ip4_fib.c @@ -101,7 +101,8 @@ static const ip4_fib_table_special_prefix_t ip4_specials[] = { static u32 -ip4_create_fib_with_table_id (u32 table_id) +ip4_create_fib_with_table_id (u32 table_id, + fib_source_t src) { fib_table_t *fib_table; ip4_fib_t *v4_fib; @@ -128,7 +129,7 @@ ip4_create_fib_with_table_id (u32 table_id) v4_fib->fwd_classify_table_index = ~0; v4_fib->rev_classify_table_index = ~0; - fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP4); + fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP4, src); ip4_mtrie_init(&v4_fib->mtrie); @@ -198,23 +199,24 @@ ip4_fib_table_destroy (u32 fib_index) u32 -ip4_fib_table_find_or_create_and_lock (u32 table_id) +ip4_fib_table_find_or_create_and_lock (u32 table_id, + fib_source_t src) { u32 index; index = ip4_fib_index_from_table_id(table_id); if (~0 == index) - return ip4_create_fib_with_table_id(table_id); + return ip4_create_fib_with_table_id(table_id, src); - fib_table_lock(index, FIB_PROTOCOL_IP4); + fib_table_lock(index, FIB_PROTOCOL_IP4, src); return (index); } u32 -ip4_fib_table_create_and_lock (void) +ip4_fib_table_create_and_lock (fib_source_t src) { - return (ip4_create_fib_with_table_id(~0)); + return (ip4_create_fib_with_table_id(~0, src)); } u32 @@ -525,17 +527,32 @@ ip4_show_fib (vlib_main_t * vm, pool_foreach (fib_table, im4->fibs, ({ ip4_fib_t *fib = pool_elt_at_index(im4->v4_fibs, fib_table->ft_index); + fib_source_t source; + u8 *s = NULL; if (table_id >= 0 && table_id != (int)fib->table_id) continue; if (fib_index != ~0 && fib_index != (int)fib->index) continue; - vlib_cli_output (vm, "%U, fib_index:%d, flow hash:[%U] locks:%d", - format_fib_table_name, fib->index, FIB_PROTOCOL_IP4, - fib->index, - format_ip_flow_hash_config, fib_table->ft_flow_hash_config, - fib_table->ft_locks); + s = format(s, "%U, fib_index:%d, flow hash:[%U] locks:[", + format_fib_table_name, fib->index, + FIB_PROTOCOL_IP4, + fib->index, + format_ip_flow_hash_config, + fib_table->ft_flow_hash_config); + FOR_EACH_FIB_SOURCE(source) + { + if (0 != fib_table->ft_locks[source]) + { + s = format(s, "%U:%d, ", + format_fib_source, source, + fib_table->ft_locks[source]); + } + } + s = format (s, "]"); + vlib_cli_output (vm, "%V", s); + vec_free(s); /* Show summary? */ if (! verbose) diff --git a/src/vnet/fib/ip4_fib.h b/src/vnet/fib/ip4_fib.h index 006163b4..495b45cc 100644 --- a/src/vnet/fib/ip4_fib.h +++ b/src/vnet/fib/ip4_fib.h @@ -127,8 +127,9 @@ ip4_fib_lookup (ip4_main_t * im, u32 sw_if_index, ip4_address_t * dst) * @returns A pointer to the retrieved or created fib. * */ -extern u32 ip4_fib_table_find_or_create_and_lock(u32 table_id); -extern u32 ip4_fib_table_create_and_lock(void); +extern u32 ip4_fib_table_find_or_create_and_lock(u32 table_id, + fib_source_t src); +extern u32 ip4_fib_table_create_and_lock(fib_source_t src); static inline diff --git a/src/vnet/fib/ip6_fib.c b/src/vnet/fib/ip6_fib.c index 8fde6f9f..3ddb8453 100644 --- a/src/vnet/fib/ip6_fib.c +++ b/src/vnet/fib/ip6_fib.c @@ -50,7 +50,8 @@ vnet_ip6_fib_init (u32 fib_index) } static u32 -create_fib_with_table_id (u32 table_id) +create_fib_with_table_id (u32 table_id, + fib_source_t src) { fib_table_t *fib_table; ip6_fib_t *v6_fib; @@ -77,29 +78,30 @@ create_fib_with_table_id (u32 table_id) fib_table->ft_flow_hash_config = IP_FLOW_HASH_DEFAULT; vnet_ip6_fib_init(fib_table->ft_index); - fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP6); + fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_IP6, src); return (fib_table->ft_index); } u32 -ip6_fib_table_find_or_create_and_lock (u32 table_id) +ip6_fib_table_find_or_create_and_lock (u32 table_id, + fib_source_t src) { uword * p; p = hash_get (ip6_main.fib_index_by_table_id, table_id); if (NULL == p) - return create_fib_with_table_id(table_id); + return create_fib_with_table_id(table_id, src); - fib_table_lock(p[0], FIB_PROTOCOL_IP6); + fib_table_lock(p[0], FIB_PROTOCOL_IP6, src); return (p[0]); } u32 -ip6_fib_table_create_and_lock (void) +ip6_fib_table_create_and_lock (fib_source_t src) { - return (create_fib_with_table_id(~0)); + return (create_fib_with_table_id(~0, src)); } void @@ -588,16 +590,33 @@ ip6_show_fib (vlib_main_t * vm, pool_foreach (fib_table, im6->fibs, ({ + fib_source_t source; + u8 *s = NULL; + fib = pool_elt_at_index(im6->v6_fibs, fib_table->ft_index); if (table_id >= 0 && table_id != (int)fib->table_id) continue; if (fib_index != ~0 && fib_index != (int)fib->index) continue; - vlib_cli_output (vm, "%s, fib_index:%d, flow hash:[%U] locks:%d", - fib_table->ft_desc, fib->index, - format_ip_flow_hash_config, fib_table->ft_flow_hash_config, - fib_table->ft_locks); + s = format(s, "%U, fib_index:%d, flow hash:[%U] locks:[", + format_fib_table_name, fib->index, + FIB_PROTOCOL_IP6, + fib->index, + format_ip_flow_hash_config, + fib_table->ft_flow_hash_config); + FOR_EACH_FIB_SOURCE(source) + { + if (0 != fib_table->ft_locks[source]) + { + s = format(s, "%U:%d, ", + format_fib_source, source, + fib_table->ft_locks[source]); + } + } + s = format (s, "]"); + vlib_cli_output (vm, "%V", s); + vec_free(s); /* Show summary? */ if (! verbose) diff --git a/src/vnet/fib/ip6_fib.h b/src/vnet/fib/ip6_fib.h index aad8305c..9728eecc 100644 --- a/src/vnet/fib/ip6_fib.h +++ b/src/vnet/fib/ip6_fib.h @@ -144,8 +144,9 @@ ip6_src_lookup_for_packet (ip6_main_t * im, * \returns A pointer to the retrieved or created fib. * */ -extern u32 ip6_fib_table_find_or_create_and_lock(u32 table_id); -extern u32 ip6_fib_table_create_and_lock(void); +extern u32 ip6_fib_table_find_or_create_and_lock(u32 table_id, + fib_source_t src); +extern u32 ip6_fib_table_create_and_lock(fib_source_t src); static inline ip6_fib_t * ip6_fib_get (fib_node_index_t index) diff --git a/src/vnet/fib/mpls_fib.c b/src/vnet/fib/mpls_fib.c index ca6271fe..4eeef7ab 100644 --- a/src/vnet/fib/mpls_fib.c +++ b/src/vnet/fib/mpls_fib.c @@ -83,7 +83,8 @@ mpls_fib_index_from_table_id (u32 table_id) } static u32 -mpls_fib_create_with_table_id (u32 table_id) +mpls_fib_create_with_table_id (u32 table_id, + fib_source_t src) { dpo_id_t dpo = DPO_INVALID; fib_table_t *fib_table; @@ -107,7 +108,7 @@ mpls_fib_create_with_table_id (u32 table_id) fib_table->ft_table_id = table_id; fib_table->ft_flow_hash_config = MPLS_FLOW_HASH_DEFAULT; - fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_MPLS); + fib_table_lock(fib_table->ft_index, FIB_PROTOCOL_MPLS, src); if (INDEX_INVALID == mpls_fib_drop_dpo_index) { @@ -220,22 +221,23 @@ mpls_fib_create_with_table_id (u32 table_id) } u32 -mpls_fib_table_find_or_create_and_lock (u32 table_id) +mpls_fib_table_find_or_create_and_lock (u32 table_id, + fib_source_t src) { u32 index; index = mpls_fib_index_from_table_id(table_id); if (~0 == index) - return mpls_fib_create_with_table_id(table_id); + return mpls_fib_create_with_table_id(table_id, src); - fib_table_lock(index, FIB_PROTOCOL_MPLS); + fib_table_lock(index, FIB_PROTOCOL_MPLS, src); return (index); } u32 -mpls_fib_table_create_and_lock (void) +mpls_fib_table_create_and_lock (fib_source_t src) { - return (mpls_fib_create_with_table_id(~0)); + return (mpls_fib_create_with_table_id(~0, src)); } void diff --git a/src/vnet/fib/mpls_fib.h b/src/vnet/fib/mpls_fib.h index dfb8b7fc..29cd1d20 100644 --- a/src/vnet/fib/mpls_fib.h +++ b/src/vnet/fib/mpls_fib.h @@ -59,8 +59,9 @@ mpls_fib_get (fib_node_index_t index) return (pool_elt_at_index(mpls_main.mpls_fibs, index)); } -extern u32 mpls_fib_table_find_or_create_and_lock(u32 table_id); -extern u32 mpls_fib_table_create_and_lock(void); +extern u32 mpls_fib_table_find_or_create_and_lock(u32 table_id, + fib_source_t src); +extern u32 mpls_fib_table_create_and_lock(fib_source_t src); // extern mpls_fib_t * mpls_fib_find(u32 table_id); extern u32 mpls_fib_index_from_table_id(u32 table_id); diff --git a/src/vnet/interface_api.c b/src/vnet/interface_api.c index 113728cd..419fef94 100644 --- a/src/vnet/interface_api.c +++ b/src/vnet/interface_api.c @@ -320,68 +320,189 @@ stats_dsunlock (void) static void vl_api_sw_interface_set_table_t_handler (vl_api_sw_interface_set_table_t * mp) { - int rv = 0; - u32 table_id = ntohl (mp->vrf_id); - u32 sw_if_index = ntohl (mp->sw_if_index); vl_api_sw_interface_set_table_reply_t *rmp; - CLIB_UNUSED (ip_interface_address_t * ia); - u32 fib_index; + u32 sw_if_index = ntohl (mp->sw_if_index); + u32 table_id = ntohl (mp->vrf_id); + int rv = 0; VALIDATE_SW_IF_INDEX (mp); stats_dslock_with_hint (1 /* release hint */ , 4 /* tag */ ); if (mp->is_ipv6) + rv = ip_table_bind (FIB_PROTOCOL_IP6, sw_if_index, table_id, 1); + else + rv = ip_table_bind (FIB_PROTOCOL_IP4, sw_if_index, table_id, 1); + + stats_dsunlock (); + + BAD_SW_IF_INDEX_LABEL; + + REPLY_MACRO (VL_API_SW_INTERFACE_SET_TABLE_REPLY); +} + +int +ip_table_bind (fib_protocol_t fproto, + uint32_t sw_if_index, uint32_t table_id, u8 is_api) +{ + CLIB_UNUSED (ip_interface_address_t * ia); + u32 fib_index, mfib_index; + fib_source_t src; + mfib_source_t msrc; + + if (is_api) + { + src = FIB_SOURCE_API; + msrc = MFIB_SOURCE_API; + } + else + { + src = FIB_SOURCE_CLI; + msrc = MFIB_SOURCE_CLI; + } + + /* + * This is temporary whilst I do the song and dance with the CSIT version + */ + if (0 != table_id) { + fib_index = fib_table_find_or_create_and_lock (fproto, table_id, src); + mfib_index = + mfib_table_find_or_create_and_lock (fproto, table_id, msrc); + } + else + { + fib_index = 0; + mfib_index = 0; + } + + /* + * This if table does not exist = error is what we want in the end. + */ + /* fib_index = fib_table_find (fproto, table_id); */ + /* mfib_index = mfib_table_find (fproto, table_id); */ + + /* if (~0 == fib_index || ~0 == mfib_index) */ + /* { */ + /* return (VNET_API_ERROR_NO_SUCH_FIB); */ + /* } */ + + if (FIB_PROTOCOL_IP6 == fproto) + { + /* + * If the interface already has in IP address, then a change int + * VRF is not allowed. The IP address applied must first be removed. + * We do not do that automatically here, since VPP has no knowledge + * of whether thoses subnets are valid in the destination VRF. + */ /* *INDENT-OFF* */ foreach_ip_interface_address (&ip6_main.lookup_main, ia, sw_if_index, 1 /* honor unnumbered */ , ({ - rv = VNET_API_ERROR_ADDRESS_FOUND_FOR_INTERFACE; - goto done; + return (VNET_API_ERROR_ADDRESS_FOUND_FOR_INTERFACE); })); /* *INDENT-ON* */ - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, - table_id); vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index); - ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; - - fib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, - table_id); vec_validate (ip6_main.mfib_index_by_sw_if_index, sw_if_index); - ip6_main.mfib_index_by_sw_if_index[sw_if_index] = fib_index; + + /* + * tell those that are interested that the binding is changing. + */ + ip6_table_bind_callback_t *cb; + vec_foreach (cb, ip6_main.table_bind_callbacks) + cb->function (&ip6_main, cb->function_opaque, + sw_if_index, + fib_index, + ip6_main.fib_index_by_sw_if_index[sw_if_index]); + + if (0 == table_id) + { + /* reset back to default */ + if (0 != ip6_main.fib_index_by_sw_if_index[sw_if_index]) + fib_table_unlock (ip6_main.fib_index_by_sw_if_index[sw_if_index], + FIB_PROTOCOL_IP6, src); + if (0 != ip6_main.mfib_index_by_sw_if_index[sw_if_index]) + mfib_table_unlock (ip6_main.mfib_index_by_sw_if_index + [sw_if_index], FIB_PROTOCOL_IP6, msrc); + + } + else + { + /* we need to lock the table now it's inuse */ + fib_table_lock (fib_index, FIB_PROTOCOL_IP6, src); + mfib_table_lock (mfib_index, FIB_PROTOCOL_IP6, msrc); + } + + ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; + ip6_main.mfib_index_by_sw_if_index[sw_if_index] = mfib_index; } else { + /* + * If the interface already has in IP address, then a change int + * VRF is not allowed. The IP address applied must first be removed. + * We do not do that automatically here, since VPP has no knowledge + * of whether thoses subnets are valid in the destination VRF. + */ /* *INDENT-OFF* */ foreach_ip_interface_address (&ip4_main.lookup_main, ia, sw_if_index, 1 /* honor unnumbered */ , ({ - rv = VNET_API_ERROR_ADDRESS_FOUND_FOR_INTERFACE; - goto done; + return (VNET_API_ERROR_ADDRESS_FOUND_FOR_INTERFACE); })); /* *INDENT-ON* */ - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, - table_id); vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index); - ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; - - fib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, - table_id); vec_validate (ip4_main.mfib_index_by_sw_if_index, sw_if_index); - ip4_main.mfib_index_by_sw_if_index[sw_if_index] = fib_index; - } -done: - stats_dsunlock (); + /* + * tell those that are interested that the binding is changing. + */ + ip4_table_bind_callback_t *cb; + vec_foreach (cb, ip4_main.table_bind_callbacks) + cb->function (&ip4_main, cb->function_opaque, + sw_if_index, + fib_index, + ip4_main.fib_index_by_sw_if_index[sw_if_index]); + + if (0 == table_id) + { + /* reset back to default */ + if (0 != ip4_main.fib_index_by_sw_if_index[sw_if_index]) + fib_table_unlock (ip4_main.fib_index_by_sw_if_index[sw_if_index], + FIB_PROTOCOL_IP4, src); + if (0 != ip4_main.mfib_index_by_sw_if_index[sw_if_index]) + mfib_table_unlock (ip4_main.mfib_index_by_sw_if_index + [sw_if_index], FIB_PROTOCOL_IP4, msrc); - BAD_SW_IF_INDEX_LABEL; + } + else + { + /* we need to lock the table now it's inuse */ + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, + table_id, src); - REPLY_MACRO (VL_API_SW_INTERFACE_SET_TABLE_REPLY); + mfib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, + table_id, msrc); + } + + ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; + ip4_main.mfib_index_by_sw_if_index[sw_if_index] = mfib_index; + } + + /* + * Temporary. undo the locks from the find and create at the staart + */ + if (0 != table_id) + { + fib_table_unlock (fib_index, fproto, src); + mfib_table_unlock (mfib_index, fproto, msrc); + } + + return (0); } static void diff --git a/src/vnet/ip/ip.h b/src/vnet/ip/ip.h index 70b4ccd8..7aae73ff 100644 --- a/src/vnet/ip/ip.h +++ b/src/vnet/ip/ip.h @@ -184,6 +184,13 @@ void ip_del_all_interface_addresses (vlib_main_t * vm, u32 sw_if_index); extern vlib_node_registration_t ip4_inacl_node; extern vlib_node_registration_t ip6_inacl_node; +void ip_table_create (fib_protocol_t fproto, uint32_t table_id, u8 is_api); + +void ip_table_delete (fib_protocol_t fproto, uint32_t table_id, u8 is_api); + +int ip_table_bind (fib_protocol_t fproto, + uint32_t sw_if_index, uint32_t table_id, u8 is_api); + #endif /* included_ip_main_h */ /* diff --git a/src/vnet/ip/ip4.h b/src/vnet/ip/ip4.h index 8f9a8e27..decb840b 100644 --- a/src/vnet/ip/ip4.h +++ b/src/vnet/ip/ip4.h @@ -72,6 +72,16 @@ typedef struct uword function_opaque; } ip4_add_del_interface_address_callback_t; +typedef void (ip4_table_bind_function_t) + (struct ip4_main_t * im, + uword opaque, u32 sw_if_index, u32 new_fib_index, u32 old_fib_index); + +typedef struct +{ + ip4_table_bind_function_t *function; + uword function_opaque; +} ip4_table_bind_callback_t; + /** * @brief IPv4 main type. * @@ -117,6 +127,9 @@ typedef struct ip4_main_t ip4_add_del_interface_address_callback_t * add_del_interface_address_callbacks; + /** Functions to call when interface to table biding changes. */ + ip4_table_bind_callback_t *table_bind_callbacks; + /** Template used to generate IP4 ARP packets. */ vlib_packet_template_t ip4_arp_request_packet_template; diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 2d48e8a9..ec4287bb 100755 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -1198,8 +1198,10 @@ ip4_lookup_init (vlib_main_t * vm) ip_lookup_init (&im->lookup_main, /* is_ip6 */ 0); /* Create FIB with index 0 and table id of 0. */ - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0); - mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0); + fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0, + FIB_SOURCE_DEFAULT_ROUTE); + mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, 0, + MFIB_SOURCE_DEFAULT_ROUTE); { pg_node_t *pn; @@ -2794,101 +2796,6 @@ VLIB_REGISTER_NODE (ip4_midchain_node) = { VLIB_NODE_FUNCTION_MULTIARCH (ip4_midchain_node, ip4_midchain); /* *INDENT-ON */ -static clib_error_t * -add_del_interface_table (vlib_main_t * vm, - unformat_input_t * input, vlib_cli_command_t * cmd) -{ - vnet_main_t *vnm = vnet_get_main (); - ip_interface_address_t *ia; - clib_error_t *error = 0; - u32 sw_if_index, table_id; - - sw_if_index = ~0; - - if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index)) - { - error = clib_error_return (0, "unknown interface `%U'", - format_unformat_error, input); - goto done; - } - - if (unformat (input, "%d", &table_id)) - ; - else - { - error = clib_error_return (0, "expected table id `%U'", - format_unformat_error, input); - goto done; - } - - /* - * If the interface already has in IP address, then a change int - * VRF is not allowed. The IP address applied must first be removed. - * We do not do that automatically here, since VPP has no knowledge - * of whether thoses subnets are valid in the destination VRF. - */ - /* *INDENT-OFF* */ - foreach_ip_interface_address (&ip4_main.lookup_main, - ia, sw_if_index, - 1 /* honor unnumbered */, - ({ - ip4_address_t * a; - - a = ip_interface_address_get_address (&ip4_main.lookup_main, ia); - error = clib_error_return (0, "interface %U has address %U", - format_vnet_sw_if_index_name, vnm, - sw_if_index, - format_ip4_address, a); - goto done; - })); - /* *INDENT-ON* */ - -{ - ip4_main_t *im = &ip4_main; - u32 fib_index; - - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id); - - vec_validate (im->fib_index_by_sw_if_index, sw_if_index); - im->fib_index_by_sw_if_index[sw_if_index] = fib_index; - - fib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id); - vec_validate (im->mfib_index_by_sw_if_index, sw_if_index); - im->mfib_index_by_sw_if_index[sw_if_index] = fib_index; -} - -done: -return error; -} - -/*? - * Place the indicated interface into the supplied IPv4 FIB table (also known - * as a VRF). If the FIB table does not exist, this command creates it. To - * display the current IPv4 FIB table, use the command 'show ip fib'. - * FIB table will only be displayed if a route has been added to the table, or - * an IP Address is assigned to an interface in the table (which adds a route - * automatically). - * - * @note IP addresses added after setting the interface IP table are added to - * the indicated FIB table. If an IP address is added prior to changing the - * table then this is an error. The control plane must remove these addresses - * first and then change the table. VPP will not automatically move the - * addresses from the old to the new table as it does not know the validity - * of such a change. - * - * @cliexpar - * Example of how to add an interface to an IPv4 FIB table (where 2 is the table-id): - * @cliexcmd{set interface ip table GigabitEthernet2/0/0 2} - ?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (set_interface_ip_table_command, static) = -{ - .path = "set interface ip table", - .function = add_del_interface_table, - .short_help = "set interface ip table ", -}; -/* *INDENT-ON* */ - int ip4_lookup_validate (ip4_address_t * a, u32 fib_index0) { diff --git a/src/vnet/ip/ip4_source_and_port_range_check.c b/src/vnet/ip/ip4_source_and_port_range_check.c index ae836a11..9aa880ae 100644 --- a/src/vnet/ip/ip4_source_and_port_range_check.c +++ b/src/vnet/ip/ip4_source_and_port_range_check.c @@ -1126,6 +1126,14 @@ ip6_source_and_port_range_check_add_del (ip6_address_t * address, u16 * low_ports, u16 * high_ports, int is_add) { + uint32_t fib_index; + + fib_index = fib_table_find (FIB_PROTOCOL_IP4, vrf_id); + + ASSERT (~0 != fib_index); + + fib_table_unlock (fib_index, FIB_PROTOCOL_IP4, FIB_SOURCE_CLASSIFY); + return 0; } @@ -1138,7 +1146,8 @@ ip4_source_and_port_range_check_add_del (ip4_address_t * address, { u32 fib_index; - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, vrf_id); + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, vrf_id, + FIB_SOURCE_CLASSIFY); if (is_add == 0) { diff --git a/src/vnet/ip/ip6.h b/src/vnet/ip/ip6.h index fa922725..8aef53a9 100644 --- a/src/vnet/ip/ip6.h +++ b/src/vnet/ip/ip6.h @@ -103,6 +103,16 @@ typedef struct uword function_opaque; } ip6_add_del_interface_address_callback_t; +typedef void (ip6_table_bind_function_t) + (struct ip6_main_t * im, + uword opaque, u32 sw_if_index, u32 new_fib_index, u32 old_fib_index); + +typedef struct +{ + ip6_table_bind_function_t *function; + uword function_opaque; +} ip6_table_bind_callback_t; + /** * Enumeration of the FIB table instance types */ @@ -183,6 +193,9 @@ typedef struct ip6_main_t ip6_add_del_interface_address_callback_t * add_del_interface_address_callbacks; + /** Functions to call when interface to table biding changes. */ + ip6_table_bind_callback_t *table_bind_callbacks; + /* Template used to generate IP6 neighbor solicitation packets. */ vlib_packet_template_t discover_neighbor_packet_template; diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index 5832bd0b..1002f6b6 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -2999,8 +2999,10 @@ ip6_lookup_init (vlib_main_t * vm) im->lookup_table_nbuckets, im->lookup_table_size); /* Create FIB with index 0 and table id of 0. */ - fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, 0); - mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, 0); + fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, 0, + FIB_SOURCE_DEFAULT_ROUTE); + mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, 0, + MFIB_SOURCE_DEFAULT_ROUTE); { pg_node_t *pn; @@ -3045,103 +3047,6 @@ ip6_lookup_init (vlib_main_t * vm) VLIB_INIT_FUNCTION (ip6_lookup_init); -static clib_error_t * -add_del_ip6_interface_table (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - vnet_main_t *vnm = vnet_get_main (); - ip_interface_address_t *ia; - clib_error_t *error = 0; - u32 sw_if_index, table_id; - - sw_if_index = ~0; - - if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index)) - { - error = clib_error_return (0, "unknown interface `%U'", - format_unformat_error, input); - goto done; - } - - if (unformat (input, "%d", &table_id)) - ; - else - { - error = clib_error_return (0, "expected table id `%U'", - format_unformat_error, input); - goto done; - } - - /* - * If the interface already has in IP address, then a change int - * VRF is not allowed. The IP address applied must first be removed. - * We do not do that automatically here, since VPP has no knowledge - * of whether thoses subnets are valid in the destination VRF. - */ - /* *INDENT-OFF* */ - foreach_ip_interface_address (&ip6_main.lookup_main, - ia, sw_if_index, - 1 /* honor unnumbered */, - ({ - ip4_address_t * a; - - a = ip_interface_address_get_address (&ip6_main.lookup_main, ia); - error = clib_error_return (0, "interface %U has address %U", - format_vnet_sw_if_index_name, vnm, - sw_if_index, - format_ip6_address, a); - goto done; - })); - /* *INDENT-ON* */ - - { - u32 fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, - table_id); - - vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index); - ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; - - fib_index = mfib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, - table_id); - - vec_validate (ip6_main.mfib_index_by_sw_if_index, sw_if_index); - ip6_main.mfib_index_by_sw_if_index[sw_if_index] = fib_index; - } - - -done: - return error; -} - -/*? - * Place the indicated interface into the supplied IPv6 FIB table (also known - * as a VRF). If the FIB table does not exist, this command creates it. To - * display the current IPv6 FIB table, use the command 'show ip6 fib'. - * FIB table will only be displayed if a route has been added to the table, or - * an IP Address is assigned to an interface in the table (which adds a route - * automatically). - * - * @note IP addresses added after setting the interface IP table are added to - * the indicated FIB table. If an IP address is added prior to changing the - * table then this is an error. The control plane must remove these addresses - * first and then change the table. VPP will not automatically move the - * addresses from the old to the new table as it does not know the validity - * of such a change. - * - * @cliexpar - * Example of how to add an interface to an IPv6 FIB table (where 2 is the table-id): - * @cliexcmd{set interface ip6 table GigabitEthernet2/0/0 2} - ?*/ -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (set_interface_ip6_table_command, static) = -{ - .path = "set interface ip6 table", - .function = add_del_ip6_interface_table, - .short_help = "set interface ip6 table " -}; -/* *INDENT-ON* */ - void ip6_link_local_address_from_ethernet_mac_address (ip6_address_t * ip, u8 * mac) diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 62cf23ac..56f33ac8 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -250,6 +250,26 @@ format_ip6_neighbor_ip6_entry (u8 * s, va_list * va) return s; } +static void +ip6_neighbor_adj_fib_remove (ip6_neighbor_t * n, uint32_t fib_index) +{ + if (FIB_NODE_INDEX_INVALID != n->fib_entry_index) + { + fib_prefix_t pfx = { + .fp_len = 128, + .fp_proto = FIB_PROTOCOL_IP6, + .fp_addr.ip6 = n->key.ip6_address, + }; + fib_table_entry_path_remove (fib_index, + &pfx, + FIB_SOURCE_ADJ, + DPO_PROTO_IP6, + &pfx.fp_addr, + n->key.sw_if_index, ~0, + 1, FIB_ROUTE_PATH_FLAG_NONE); + } +} + static clib_error_t * ip6_neighbor_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags) @@ -273,22 +293,10 @@ ip6_neighbor_sw_interface_up_down (vnet_main_t * vnm, { n = pool_elt_at_index (nm->neighbor_pool, to_delete[i]); mhash_unset (&nm->neighbor_index_by_key, &n->key, 0); - if (FIB_NODE_INDEX_INVALID != n->fib_entry_index) - { - fib_prefix_t pfx = { - .fp_len = 128, - .fp_proto = FIB_PROTOCOL_IP6, - .fp_addr.ip6 = n->key.ip6_address, - }; - fib_table_entry_path_remove - (ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index), - &pfx, - FIB_SOURCE_ADJ, - DPO_PROTO_IP6, - &pfx.fp_addr, - n->key.sw_if_index, ~0, 1, FIB_ROUTE_PATH_FLAG_NONE); - pool_put (nm->neighbor_pool, n); - } + ip6_neighbor_adj_fib_remove (n, + ip6_fib_table_get_index_for_sw_if_index + (n->key.sw_if_index)); + pool_put (nm->neighbor_pool, n); } vec_free (to_delete); } @@ -579,6 +587,24 @@ ip6_ethernet_update_adjacency (vnet_main_t * vnm, u32 sw_if_index, u32 ai) } } + +static void +ip6_neighbor_adj_fib_add (ip6_neighbor_t * n, uint32_t fib_index) +{ + fib_prefix_t pfx = { + .fp_len = 128, + .fp_proto = FIB_PROTOCOL_IP6, + .fp_addr.ip6 = n->key.ip6_address, + }; + + n->fib_entry_index = + fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, + FIB_ENTRY_FLAG_ATTACHED, + DPO_PROTO_IP6, &pfx.fp_addr, + n->key.sw_if_index, ~0, 1, NULL, + FIB_ROUTE_PATH_FLAG_NONE); +} + int vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, u32 sw_if_index, @@ -633,21 +659,9 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, */ if (!is_no_fib_entry) { - fib_prefix_t pfx = { - .fp_len = 128, - .fp_proto = FIB_PROTOCOL_IP6, - .fp_addr.ip6 = k.ip6_address, - }; - u32 fib_index; - - fib_index = - ip6_fib_table_get_index_for_sw_if_index (n->key.sw_if_index); - n->fib_entry_index = - fib_table_entry_path_add (fib_index, &pfx, FIB_SOURCE_ADJ, - FIB_ENTRY_FLAG_ATTACHED, - DPO_PROTO_IP6, &pfx.fp_addr, - n->key.sw_if_index, ~0, 1, NULL, - FIB_ROUTE_PATH_FLAG_NONE); + ip6_neighbor_adj_fib_add (n, + ip6_fib_table_get_index_for_sw_if_index + (n->key.sw_if_index)); } else { @@ -3843,6 +3857,33 @@ ip6_set_neighbor_limit (u32 neighbor_limit) return 0; } +static void +ip6_neighbor_table_bind (ip6_main_t * im, + uword opaque, + u32 sw_if_index, + u32 new_fib_index, u32 old_fib_index) +{ + ip6_neighbor_main_t *nm = &ip6_neighbor_main; + ip6_neighbor_t *n = NULL; + u32 i, *to_re_add = 0; + + /* *INDENT-OFF* */ + pool_foreach (n, nm->neighbor_pool, + ({ + if (n->key.sw_if_index == sw_if_index) + vec_add1 (to_re_add, n - nm->neighbor_pool); + })); + /* *INDENT-ON* */ + + for (i = 0; i < vec_len (to_re_add); i++) + { + n = pool_elt_at_index (nm->neighbor_pool, to_re_add[i]); + ip6_neighbor_adj_fib_remove (n, old_fib_index); + ip6_neighbor_adj_fib_add (n, new_fib_index); + } + vec_free (to_re_add); +} + static clib_error_t * ip6_neighbor_init (vlib_main_t * vm) { @@ -3874,6 +3915,11 @@ ip6_neighbor_init (vlib_main_t * vm) cb.function_opaque = 0; vec_add1 (im->add_del_interface_address_callbacks, cb); + ip6_table_bind_callback_t cbt; + cbt.function = ip6_neighbor_table_bind; + cbt.function_opaque = 0; + vec_add1 (im->table_bind_callbacks, cbt); + mhash_init (&nm->pending_resolutions_by_address, /* value size */ sizeof (uword), /* key size */ sizeof (ip6_address_t)); diff --git a/src/vnet/ip/ip_api.c b/src/vnet/ip/ip_api.c index bba65ab4..384ec3e0 100644 --- a/src/vnet/ip/ip_api.c +++ b/src/vnet/ip/ip_api.c @@ -699,12 +699,58 @@ vl_api_ip_neighbor_add_del_t_handler (vl_api_ip_neighbor_add_del_t * mp, REPLY_MACRO (VL_API_IP_NEIGHBOR_ADD_DEL_REPLY); } +void +ip_table_delete (fib_protocol_t fproto, u32 table_id, u8 is_api) +{ + u32 fib_index, mfib_index; + + /* + * ignore action on the default table - this is always present + * and cannot be added nor deleted from the API + */ + if (0 != table_id) + { + /* + * The API holds only one lock on the table. + * i.e. it can be added many times via the API but needs to be + * deleted only once. + * The FIB index for unicast and multicast is not necessarily the + * same, since internal VPP systesm (like LISP and SR) create + * their own unicast tables. + */ + fib_index = fib_table_find (fproto, table_id); + mfib_index = mfib_table_find (fproto, table_id); + + if (~0 != fib_index) + { + fib_table_unlock (fib_index, fproto, + (is_api ? FIB_SOURCE_API : FIB_SOURCE_CLI)); + } + if (~0 != mfib_index) + { + mfib_table_unlock (mfib_index, fproto, + (is_api ? MFIB_SOURCE_API : MFIB_SOURCE_CLI)); + } + } +} + void vl_api_ip_table_add_del_t_handler (vl_api_ip_table_add_del_t * mp) { vl_api_ip_table_add_del_reply_t *rmp; + fib_protocol_t fproto = (mp->is_ipv6 ? FIB_PROTOCOL_IP6 : FIB_PROTOCOL_IP4); + u32 table_id = ntohl (mp->table_id); int rv = 0; + if (mp->is_add) + { + ip_table_create (fproto, table_id, 1); + } + else + { + ip_table_delete (fproto, table_id, 1); + } + REPLY_MACRO (VL_API_IP_TABLE_ADD_DEL_REPLY); } @@ -866,18 +912,21 @@ add_del_route_check (fib_protocol_t table_proto, u32 next_hop_sw_if_index, dpo_proto_t next_hop_table_proto, u32 next_hop_table_id, - u8 create_missing_tables, u8 is_rpf_id, u32 * fib_index, u32 * next_hop_fib_index) { vnet_main_t *vnm = vnet_get_main (); + /* Temporaray whilst I do the CSIT dance */ + u8 create_missing_tables = 1; + *fib_index = fib_table_find (table_proto, ntohl (table_id)); if (~0 == *fib_index) { if (create_missing_tables) { *fib_index = fib_table_find_or_create_and_lock (table_proto, - ntohl (table_id)); + ntohl (table_id), + FIB_SOURCE_API); } else { @@ -918,12 +967,14 @@ add_del_route_check (fib_protocol_t table_proto, *next_hop_fib_index = mfib_table_find_or_create_and_lock (fib_nh_proto, ntohl - (next_hop_table_id)); + (next_hop_table_id), + MFIB_SOURCE_API); else *next_hop_fib_index = fib_table_find_or_create_and_lock (fib_nh_proto, ntohl - (next_hop_table_id)); + (next_hop_table_id), + FIB_SOURCE_API); } else { @@ -948,8 +999,7 @@ ip4_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->next_hop_sw_if_index, DPO_PROTO_IP4, mp->next_hop_table_id, - mp->create_vrf_if_needed, 0, - &fib_index, &next_hop_fib_index); + 0, &fib_index, &next_hop_fib_index); if (0 != rv) return (rv); @@ -1008,8 +1058,7 @@ ip6_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) mp->next_hop_sw_if_index, DPO_PROTO_IP6, mp->next_hop_table_id, - mp->create_vrf_if_needed, 0, - &fib_index, &next_hop_fib_index); + 0, &fib_index, &next_hop_fib_index); if (0 != rv) return (rv); @@ -1074,27 +1123,57 @@ vl_api_ip_add_del_route_t_handler (vl_api_ip_add_del_route_t * mp) REPLY_MACRO (VL_API_IP_ADD_DEL_ROUTE_REPLY); } +void +ip_table_create (fib_protocol_t fproto, u32 table_id, u8 is_api) +{ + u32 fib_index, mfib_index; + + /* + * ignore action on the default table - this is always present + * and cannot be added nor deleted from the API + */ + if (0 != table_id) + { + /* + * The API holds only one lock on the table. + * i.e. it can be added many times via the API but needs to be + * deleted only once. + * The FIB index for unicast and multicast is not necessarily the + * same, since internal VPP systesm (like LISP and SR) create + * their own unicast tables. + */ + fib_index = fib_table_find (fproto, table_id); + mfib_index = mfib_table_find (fproto, table_id); + + if (~0 == fib_index) + { + fib_table_find_or_create_and_lock (fproto, table_id, + (is_api ? + FIB_SOURCE_API : + FIB_SOURCE_CLI)); + } + if (~0 == mfib_index) + { + mfib_table_find_or_create_and_lock (fproto, table_id, + (is_api ? + MFIB_SOURCE_API : + MFIB_SOURCE_CLI)); + } + } +} + static int add_del_mroute_check (fib_protocol_t table_proto, u32 table_id, - u32 next_hop_sw_if_index, - u8 is_local, u8 create_missing_tables, u32 * fib_index) + u32 next_hop_sw_if_index, u8 is_local, u32 * fib_index) { vnet_main_t *vnm = vnet_get_main (); *fib_index = mfib_table_find (table_proto, ntohl (table_id)); if (~0 == *fib_index) { - if (create_missing_tables) - { - *fib_index = mfib_table_find_or_create_and_lock (table_proto, - ntohl (table_id)); - } - else - { - /* No such VRF, and we weren't asked to create one */ - return VNET_API_ERROR_NO_SUCH_FIB; - } + /* No such table */ + return VNET_API_ERROR_NO_SUCH_FIB; } if (~0 != ntohl (next_hop_sw_if_index)) @@ -1163,8 +1242,7 @@ api_mroute_add_del_t_handler (vl_api_ip_mroute_add_del_t * mp) rv = add_del_mroute_check (fproto, mp->table_id, mp->next_hop_sw_if_index, - mp->is_local, - mp->create_vrf_if_needed, &fib_index); + mp->is_local, &fib_index); if (0 != rv) return (rv); diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c index 5537bb04..667c6791 100755 --- a/src/vnet/ip/lookup.c +++ b/src/vnet/ip/lookup.c @@ -687,6 +687,78 @@ done: return error; } +clib_error_t * +vnet_ip_table_cmd (vlib_main_t * vm, + unformat_input_t * main_input, + vlib_cli_command_t * cmd, fib_protocol_t fproto) +{ + unformat_input_t _line_input, *line_input = &_line_input; + clib_error_t *error = NULL; + u32 table_id, is_add; + + is_add = 1; + table_id = ~0; + + /* Get a line of input. */ + if (!unformat_user (main_input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%d", &table_id)) + ; + else if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "add")) + is_add = 1; + else + { + error = unformat_parse_error (line_input); + goto done; + } + } + + if (~0 == table_id) + { + error = clib_error_return (0, "No table id"); + goto done; + } + else if (0 == table_id) + { + error = clib_error_return (0, "Can't change the default table"); + goto done; + } + else + { + if (is_add) + { + ip_table_create (fproto, table_id, 0); + } + else + { + ip_table_delete (fproto, table_id, 0); + } + } + +done: + unformat_free (line_input); + return error; +} + +clib_error_t * +vnet_ip4_table_cmd (vlib_main_t * vm, + unformat_input_t * main_input, vlib_cli_command_t * cmd) +{ + return (vnet_ip_table_cmd (vm, main_input, cmd, FIB_PROTOCOL_IP4)); +} + +clib_error_t * +vnet_ip6_table_cmd (vlib_main_t * vm, + unformat_input_t * main_input, vlib_cli_command_t * cmd) +{ + return (vnet_ip_table_cmd (vm, main_input, cmd, FIB_PROTOCOL_IP6)); +} + /* *INDENT-OFF* */ VLIB_CLI_COMMAND (vlib_cli_ip_command, static) = { .path = "ip", @@ -749,6 +821,159 @@ VLIB_CLI_COMMAND (ip_route_command, static) = { .function = vnet_ip_route_cmd, .is_mp_safe = 1, }; + +/* *INDENT-ON* */ +/*? + * This command is used to add or delete IPv4 Tables. All + * Tables must be explicitly added before that can be used. Creating a + * table will add both unicast and multicast FIBs + * + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (ip4_table_command, static) = { + .path = "ip table", + .short_help = "ip table [add|del] ", + .function = vnet_ip4_table_cmd, + .is_mp_safe = 1, +}; +/* *INDENT-ON* */ + +/* *INDENT-ON* */ +/*? + * This command is used to add or delete IPv4 Tables. All + * Tables must be explicitly added before that can be used. Creating a + * table will add both unicast and multicast FIBs + * + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (ip6_table_command, static) = { + .path = "ip6 table", + .short_help = "ip6 table [add|del] ", + .function = vnet_ip6_table_cmd, + .is_mp_safe = 1, +}; + +static clib_error_t * +ip_table_bind_cmd (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd, + fib_protocol_t fproto) +{ + vnet_main_t *vnm = vnet_get_main (); + clib_error_t *error = 0; + u32 sw_if_index, table_id; + int rv; + + sw_if_index = ~0; + + if (!unformat_user (input, unformat_vnet_sw_interface, vnm, &sw_if_index)) + { + error = clib_error_return (0, "unknown interface `%U'", + format_unformat_error, input); + goto done; + } + + if (unformat (input, "%d", &table_id)) + ; + else + { + error = clib_error_return (0, "expected table id `%U'", + format_unformat_error, input); + goto done; + } + + rv = ip_table_bind (fproto, sw_if_index, table_id, 0); + + if (VNET_API_ERROR_ADDRESS_FOUND_FOR_INTERFACE == rv) + { + error = clib_error_return (0, "IP addresses are still present on %U", + format_vnet_sw_if_index_name, + vnet_get_main(), + sw_if_index); + } + else if (VNET_API_ERROR_NO_SUCH_FIB == rv) + { + error = clib_error_return (0, "no such table %d", table_id); + } + else if (0 != rv) + { + error = clib_error_return (0, "unknown error"); + } + + done: + return error; +} + +static clib_error_t * +ip4_table_bind_cmd (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return (ip_table_bind_cmd (vm , input, cmd, FIB_PROTOCOL_IP4)); +} + +static clib_error_t * +ip6_table_bind_cmd (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + return (ip_table_bind_cmd (vm , input, cmd, FIB_PROTOCOL_IP6)); +} + +/*? + * Place the indicated interface into the supplied IPv4 FIB table (also known + * as a VRF). If the FIB table does not exist, this command creates it. To + * display the current IPv4 FIB table, use the command 'show ip fib'. + * FIB table will only be displayed if a route has been added to the table, or + * an IP Address is assigned to an interface in the table (which adds a route + * automatically). + * + * @note IP addresses added after setting the interface IP table are added to + * the indicated FIB table. If an IP address is added prior to changing the + * table then this is an error. The control plane must remove these addresses + * first and then change the table. VPP will not automatically move the + * addresses from the old to the new table as it does not know the validity + * of such a change. + * + * @cliexpar + * Example of how to add an interface to an IPv4 FIB table (where 2 is the table-id): + * @cliexcmd{set interface ip table GigabitEthernet2/0/0 2} + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_interface_ip_table_command, static) = +{ + .path = "set interface ip table", + .function = ip4_table_bind_cmd, + .short_help = "set interface ip table ", +}; +/* *INDENT-ON* */ + +/*? + * Place the indicated interface into the supplied IPv6 FIB table (also known + * as a VRF). If the FIB table does not exist, this command creates it. To + * display the current IPv6 FIB table, use the command 'show ip6 fib'. + * FIB table will only be displayed if a route has been added to the table, or + * an IP Address is assigned to an interface in the table (which adds a route + * automatically). + * + * @note IP addresses added after setting the interface IP table are added to + * the indicated FIB table. If an IP address is added prior to changing the + * table then this is an error. The control plane must remove these addresses + * first and then change the table. VPP will not automatically move the + * addresses from the old to the new table as it does not know the validity + * of such a change. + * + * @cliexpar + * Example of how to add an interface to an IPv6 FIB table (where 2 is the table-id): + * @cliexcmd{set interface ip6 table GigabitEthernet2/0/0 2} + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (set_interface_ip6_table_command, static) = +{ + .path = "set interface ip6 table", + .function = ip6_table_bind_cmd, + .short_help = "set interface ip6 table " +}; /* *INDENT-ON* */ clib_error_t * diff --git a/src/vnet/lisp-gpe/interface.c b/src/vnet/lisp-gpe/interface.c index e832c23f..a0c05e85 100644 --- a/src/vnet/lisp-gpe/interface.c +++ b/src/vnet/lisp-gpe/interface.c @@ -505,12 +505,14 @@ lisp_gpe_iface_set_table (u32 sw_if_index, u32 table_id) { fib_node_index_t fib_index; - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id); + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id, + FIB_SOURCE_LISP); vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index); ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; ip4_sw_interface_enable_disable (sw_if_index, 1); - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id); + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id, + FIB_SOURCE_LISP); vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index); ip6_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; ip6_sw_interface_enable_disable (sw_if_index, 1); @@ -530,7 +532,7 @@ lisp_gpe_tenant_del_default_routes (u32 table_id) fib_index = fib_table_find (prefix.fp_proto, table_id); fib_table_entry_special_remove (fib_index, &prefix, FIB_SOURCE_LISP); - fib_table_unlock (fib_index, prefix.fp_proto); + fib_table_unlock (fib_index, prefix.fp_proto, FIB_SOURCE_LISP); } } @@ -549,7 +551,8 @@ lisp_gpe_tenant_add_default_routes (u32 table_id) /* * Add a deafult route that results in a control plane punt DPO */ - fib_index = fib_table_find_or_create_and_lock (prefix.fp_proto, table_id); + fib_index = fib_table_find_or_create_and_lock (prefix.fp_proto, table_id, + FIB_SOURCE_LISP); fib_table_entry_special_dpo_add (fib_index, &prefix, FIB_SOURCE_LISP, FIB_ENTRY_FLAG_EXCLUSIVE, lisp_cp_dpo_get (fib_proto_to_dpo diff --git a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c index d7d3cb86..0a8dc039 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c +++ b/src/vnet/lisp-gpe/lisp_gpe_fwd_entry.c @@ -66,6 +66,7 @@ ip_dst_fib_add_route (u32 dst_fib_index, const ip_prefix_t * dst_prefix) /* create a new src FIB. */ src_fib_index = fib_table_create_and_lock (dst_fib_prefix.fp_proto, + FIB_SOURCE_LISP, "LISP-src for [%d,%U]", dst_fib_index, format_fib_prefix, &dst_fib_prefix); @@ -180,7 +181,8 @@ ip_src_dst_fib_del_route (u32 src_fib_index, */ fib_table_entry_special_remove (dst_fib_index, &dst_fib_prefix, FIB_SOURCE_LISP); - fib_table_unlock (src_fib_index, src_fib_prefix.fp_proto); + fib_table_unlock (src_fib_index, src_fib_prefix.fp_proto, + FIB_SOURCE_LISP); } } @@ -544,7 +546,8 @@ add_ip_fwd_entry (lisp_gpe_main_t * lgm, lfe->tenant = lisp_gpe_tenant_find_or_create (lfe->key->vni); lfe->eid_table_id = a->table_id; lfe->eid_fib_index = fib_table_find_or_create_and_lock (fproto, - lfe->eid_table_id); + lfe->eid_table_id, + FIB_SOURCE_LISP); lfe->is_src_dst = a->is_src_dst; if (LISP_GPE_FWD_ENTRY_TYPE_NEGATIVE != lfe->type) @@ -578,7 +581,7 @@ del_ip_fwd_entry_i (lisp_gpe_main_t * lgm, lisp_gpe_fwd_entry_t * lfe) fproto = (IP4 == ip_prefix_version (&fid_addr_ippref (&lfe->key->rmt)) ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6); - fib_table_unlock (lfe->eid_fib_index, fproto); + fib_table_unlock (lfe->eid_fib_index, fproto, FIB_SOURCE_LISP); hash_unset_mem (lgm->lisp_gpe_fwd_entries, lfe->key); clib_mem_free (lfe->key); diff --git a/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c index b234d9dc..26664f53 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c +++ b/src/vnet/lisp-gpe/lisp_gpe_sub_interface.c @@ -89,13 +89,15 @@ lisp_gpe_sub_interface_set_table (u32 sw_if_index, u32 table_id) { fib_node_index_t fib_index; - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id); + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP4, table_id, + FIB_SOURCE_LISP); ASSERT (FIB_NODE_INDEX_INVALID != fib_index); vec_validate (ip4_main.fib_index_by_sw_if_index, sw_if_index); ip4_main.fib_index_by_sw_if_index[sw_if_index] = fib_index; - fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id); + fib_index = fib_table_find_or_create_and_lock (FIB_PROTOCOL_IP6, table_id, + FIB_SOURCE_LISP); ASSERT (FIB_NODE_INDEX_INVALID != fib_index); vec_validate (ip6_main.fib_index_by_sw_if_index, sw_if_index); @@ -105,9 +107,13 @@ lisp_gpe_sub_interface_set_table (u32 sw_if_index, u32 table_id) static void lisp_gpe_sub_interface_unset_table (u32 sw_if_index, u32 table_id) { + fib_table_unlock (ip4_main.fib_index_by_sw_if_index[sw_if_index], + FIB_PROTOCOL_IP4, FIB_SOURCE_LISP); ip4_main.fib_index_by_sw_if_index[sw_if_index] = 0; ip4_sw_interface_enable_disable (sw_if_index, 0); + fib_table_unlock (ip6_main.fib_index_by_sw_if_index[sw_if_index], + FIB_PROTOCOL_IP6, FIB_SOURCE_LISP); ip6_main.fib_index_by_sw_if_index[sw_if_index] = 0; ip6_sw_interface_enable_disable (sw_if_index, 0); } @@ -185,6 +191,7 @@ lisp_gpe_sub_interface_unlock (index_t l3si) l3s = lisp_gpe_sub_interface_get_i (l3si); + ASSERT (0 != l3s->locks); l3s->locks--; if (0 == l3s->locks) diff --git a/src/vnet/mfib/ip4_mfib.c b/src/vnet/mfib/ip4_mfib.c index 1849a3a4..b2482580 100644 --- a/src/vnet/mfib/ip4_mfib.c +++ b/src/vnet/mfib/ip4_mfib.c @@ -33,7 +33,8 @@ static const mfib_prefix_t ip4_specials[] = { }; static u32 -ip4_create_mfib_with_table_id (u32 table_id) +ip4_create_mfib_with_table_id (u32 table_id, + mfib_source_t src) { mfib_table_t *mfib_table; @@ -53,7 +54,7 @@ ip4_create_mfib_with_table_id (u32 table_id) mfib_table->v4.table_id = table_id; - mfib_table_lock(mfib_table->mft_index, FIB_PROTOCOL_IP4); + mfib_table_lock(mfib_table->mft_index, FIB_PROTOCOL_IP4, src); /* * add the special entries into the new FIB @@ -113,14 +114,15 @@ ip4_mfib_table_destroy (ip4_mfib_t *mfib) } u32 -ip4_mfib_table_find_or_create_and_lock (u32 table_id) +ip4_mfib_table_find_or_create_and_lock (u32 table_id, + mfib_source_t src) { u32 index; index = ip4_mfib_index_from_table_id(table_id); if (~0 == index) - return ip4_create_mfib_with_table_id(table_id); - mfib_table_lock(index, FIB_PROTOCOL_IP4); + return ip4_create_mfib_with_table_id(table_id, src); + mfib_table_lock(index, FIB_PROTOCOL_IP4, src); return (index); } diff --git a/src/vnet/mfib/ip4_mfib.h b/src/vnet/mfib/ip4_mfib.h index ea682651..e31fb744 100644 --- a/src/vnet/mfib/ip4_mfib.h +++ b/src/vnet/mfib/ip4_mfib.h @@ -72,8 +72,9 @@ ip4_mfib_get (u32 index) * @returns A pointer to the retrieved or created fib. * */ -extern u32 ip4_mfib_table_find_or_create_and_lock(u32 table_id); -extern u32 ip4_mfib_table_create_and_lock(void); +extern u32 ip4_mfib_table_find_or_create_and_lock(u32 table_id, + mfib_source_t src); +extern u32 ip4_mfib_table_create_and_lock(mfib_source_t src); static inline u32 ip4_mfib_index_from_table_id (u32 table_id) diff --git a/src/vnet/mfib/ip6_mfib.c b/src/vnet/mfib/ip6_mfib.c index 5e48e919..e4861330 100644 --- a/src/vnet/mfib/ip6_mfib.c +++ b/src/vnet/mfib/ip6_mfib.c @@ -151,7 +151,8 @@ static const ip6_mfib_special_t ip6_mfib_specials[] = static u32 -ip6_create_mfib_with_table_id (u32 table_id) +ip6_create_mfib_with_table_id (u32 table_id, + mfib_source_t src) { mfib_table_t *mfib_table; mfib_prefix_t pfx = { @@ -182,7 +183,7 @@ ip6_create_mfib_with_table_id (u32 table_id) mfib_table->v6.table_id = table_id; - mfib_table_lock(mfib_table->mft_index, FIB_PROTOCOL_IP6); + mfib_table_lock(mfib_table->mft_index, FIB_PROTOCOL_IP6, src); mfib_table->v6.rhead = clib_mem_alloc_aligned (sizeof(*mfib_table->v6.rhead), @@ -297,14 +298,15 @@ ip6_mfib_interface_enable_disable (u32 sw_if_index, int is_enable) } u32 -ip6_mfib_table_find_or_create_and_lock (u32 table_id) +ip6_mfib_table_find_or_create_and_lock (u32 table_id, + mfib_source_t src) { u32 index; index = ip6_mfib_index_from_table_id(table_id); if (~0 == index) - return ip6_create_mfib_with_table_id(table_id); - mfib_table_lock(index, FIB_PROTOCOL_IP6); + return ip6_create_mfib_with_table_id(table_id, src); + mfib_table_lock(index, FIB_PROTOCOL_IP6, src); return (index); } diff --git a/src/vnet/mfib/ip6_mfib.h b/src/vnet/mfib/ip6_mfib.h index adaa7ec2..ea81b553 100644 --- a/src/vnet/mfib/ip6_mfib.h +++ b/src/vnet/mfib/ip6_mfib.h @@ -79,8 +79,9 @@ ip6_mfib_get (u32 index) * @returns A pointer to the retrieved or created fib. * */ -extern u32 ip6_mfib_table_find_or_create_and_lock(u32 table_id); -extern u32 ip6_mfib_table_create_and_lock(void); +extern u32 ip6_mfib_table_find_or_create_and_lock(u32 table_id, + mfib_source_t src); +extern u32 ip6_mfib_table_create_and_lock(mfib_source_t src); static inline diff --git a/src/vnet/mfib/mfib_entry.c b/src/vnet/mfib/mfib_entry.c index 804e10ab..2302b9a1 100644 --- a/src/vnet/mfib/mfib_entry.c +++ b/src/vnet/mfib/mfib_entry.c @@ -334,6 +334,17 @@ mfib_entry_get_best_src (const mfib_entry_t *mfib_entry) return (bsrc); } +int +mfib_entry_is_sourced (fib_node_index_t mfib_entry_index, + mfib_source_t source) +{ + mfib_entry_t *mfib_entry; + + mfib_entry = mfib_entry_get(mfib_entry_index); + + return (NULL != mfib_entry_src_find(mfib_entry, source, NULL)); +} + static void mfib_entry_src_flush (mfib_entry_src_t *msrc) { diff --git a/src/vnet/mfib/mfib_entry.h b/src/vnet/mfib/mfib_entry.h index d4377878..96ee49f7 100644 --- a/src/vnet/mfib/mfib_entry.h +++ b/src/vnet/mfib/mfib_entry.h @@ -130,6 +130,8 @@ extern void mfib_entry_unlock(fib_node_index_t fib_entry_index); extern void mfib_entry_get_prefix(fib_node_index_t fib_entry_index, mfib_prefix_t *pfx); extern u32 mfib_entry_get_fib_index(fib_node_index_t fib_entry_index); +extern int mfib_entry_is_sourced(fib_node_index_t fib_entry_index, + mfib_source_t source); extern void mfib_entry_contribute_forwarding( fib_node_index_t mfib_entry_index, diff --git a/src/vnet/mfib/mfib_table.c b/src/vnet/mfib/mfib_table.c index 7ffe8941..e5550adc 100644 --- a/src/vnet/mfib/mfib_table.c +++ b/src/vnet/mfib/mfib_table.c @@ -424,7 +424,8 @@ mfib_table_find (fib_protocol_t proto, u32 mfib_table_find_or_create_and_lock (fib_protocol_t proto, - u32 table_id) + u32 table_id, + mfib_source_t src) { mfib_table_t *mfib_table; fib_node_index_t fi; @@ -432,10 +433,10 @@ mfib_table_find_or_create_and_lock (fib_protocol_t proto, switch (proto) { case FIB_PROTOCOL_IP4: - fi = ip4_mfib_table_find_or_create_and_lock(table_id); + fi = ip4_mfib_table_find_or_create_and_lock(table_id, src); break; case FIB_PROTOCOL_IP6: - fi = ip6_mfib_table_find_or_create_and_lock(table_id); + fi = ip6_mfib_table_find_or_create_and_lock(table_id, src); break; case FIB_PROTOCOL_MPLS: default: @@ -451,6 +452,59 @@ mfib_table_find_or_create_and_lock (fib_protocol_t proto, return (fi); } +/** + * @brief Table flush context. Store the indicies of matching FIB entries + * that need to be removed. + */ +typedef struct mfib_table_flush_ctx_t_ +{ + /** + * The list of entries to flush + */ + fib_node_index_t *mftf_entries; + + /** + * The source we are flushing + */ + mfib_source_t mftf_source; +} mfib_table_flush_ctx_t; + +static int +mfib_table_flush_cb (fib_node_index_t mfib_entry_index, + void *arg) +{ + mfib_table_flush_ctx_t *ctx = arg; + + if (mfib_entry_is_sourced(mfib_entry_index, ctx->mftf_source)) + { + vec_add1(ctx->mftf_entries, mfib_entry_index); + } + return (1); +} + +void +mfib_table_flush (u32 mfib_index, + fib_protocol_t proto, + mfib_source_t source) +{ + fib_node_index_t *mfib_entry_index; + mfib_table_flush_ctx_t ctx = { + .mftf_entries = NULL, + .mftf_source = source, + }; + + mfib_table_walk(mfib_index, proto, + mfib_table_flush_cb, + &ctx); + + vec_foreach(mfib_entry_index, ctx.mftf_entries) + { + mfib_table_entry_delete_index(*mfib_entry_index, source); + } + + vec_free(ctx.mftf_entries); +} + static void mfib_table_destroy (mfib_table_t *mfib_table) { @@ -472,27 +526,43 @@ mfib_table_destroy (mfib_table_t *mfib_table) void mfib_table_unlock (u32 fib_index, - fib_protocol_t proto) + fib_protocol_t proto, + mfib_source_t source) { mfib_table_t *mfib_table; mfib_table = mfib_table_get(fib_index, proto); - mfib_table->mft_locks--; + mfib_table->mft_locks[source]--; + mfib_table->mft_locks[MFIB_TABLE_TOTAL_LOCKS]--; + + if (0 == mfib_table->mft_locks[source]) + { + /* + * The source no longer needs the table. flush any routes + * from it just in case + */ + mfib_table_flush(fib_index, proto, source); + } - if (0 == mfib_table->mft_locks) + if (0 == mfib_table->mft_locks[MFIB_TABLE_TOTAL_LOCKS]) { - mfib_table_destroy(mfib_table); + /* + * no more locak from any source - kill it + */ + mfib_table_destroy(mfib_table); } } void mfib_table_lock (u32 fib_index, - fib_protocol_t proto) + fib_protocol_t proto, + mfib_source_t source) { mfib_table_t *mfib_table; mfib_table = mfib_table_get(fib_index, proto); - mfib_table->mft_locks++; + mfib_table->mft_locks[source]++; + mfib_table->mft_locks[MFIB_TABLE_TOTAL_LOCKS]++; } void diff --git a/src/vnet/mfib/mfib_table.h b/src/vnet/mfib/mfib_table.h index 83aa04ef..c6b0b097 100644 --- a/src/vnet/mfib/mfib_table.h +++ b/src/vnet/mfib/mfib_table.h @@ -22,6 +22,12 @@ #include +/** + * Keep a lock per-source and a total + */ +#define MFIB_TABLE_N_LOCKS (MFIB_N_SOURCES+1) +#define MFIB_TABLE_TOTAL_LOCKS MFIB_N_SOURCES + /** * @brief * A protocol Independent IP multicast FIB table @@ -47,7 +53,7 @@ typedef struct mfib_table_t_ /** * number of locks on the table */ - u16 mft_locks; + u16 mft_locks[MFIB_TABLE_N_LOCKS]; /** * Table ID (hash key) for this FIB. @@ -259,7 +265,8 @@ extern fib_node_index_t mfib_table_entry_special_add(u32 fib_index, * the source to flush */ extern void mfib_table_flush(u32 fib_index, - fib_protocol_t proto); + fib_protocol_t proto, + mfib_source_t source); /** * @brief @@ -307,9 +314,13 @@ extern u32 mfib_table_find(fib_protocol_t proto, u32 table_id); * * @return fib_index * The index of the FIB + * + * @param source + * The ID of the client/source. */ extern u32 mfib_table_find_or_create_and_lock(fib_protocol_t proto, - u32 table_id); + u32 table_id, + mfib_source_t source); /** @@ -321,9 +332,13 @@ extern u32 mfib_table_find_or_create_and_lock(fib_protocol_t proto, * * @paran proto * The protocol of the FIB (and thus the entries therein) + * + * @param source + * The ID of the client/source. */ extern void mfib_table_unlock(u32 fib_index, - fib_protocol_t proto); + fib_protocol_t proto, + mfib_source_t source); /** * @brief @@ -335,9 +350,13 @@ extern void mfib_table_unlock(u32 fib_index, * * @paran proto * The protocol of the FIB (and thus the entries therein) + * + * @param source + * The ID of the client/source. */ extern void mfib_table_lock(u32 fib_index, - fib_protocol_t proto); + fib_protocol_t proto, + mfib_source_t source); /** * @brief diff --git a/src/vnet/mfib/mfib_test.c b/src/vnet/mfib/mfib_test.c index 57787eca..3055844d 100644 --- a/src/vnet/mfib/mfib_test.c +++ b/src/vnet/mfib/mfib_test.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -366,7 +367,7 @@ mfib_test_i (fib_protocol_t PROTO, MFIB_TEST(3 == adj_mcast_db_size(), "3 MCAST adjs"); /* Find or create FIB table 11 */ - fib_index = mfib_table_find_or_create_and_lock(PROTO, 11); + fib_index = mfib_table_find_or_create_and_lock(PROTO, 11, MFIB_SOURCE_API); mfib_prefix_t pfx_dft = { .fp_len = 0, @@ -1113,9 +1114,10 @@ mfib_test_i (fib_protocol_t PROTO, /* * MPLS enable an interface so we get the MPLS table created */ + mpls_table_create(MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_API); mpls_sw_interface_enable_disable(&mpls_main, tm->hw[0]->sw_if_index, - 1); + 1, 0); lfei = fib_table_entry_update_one_path(0, // default MPLS Table &pfx_3500, @@ -1192,7 +1194,7 @@ mfib_test_i (fib_protocol_t PROTO, /* * Unlock the table - it's the last lock so should be gone thereafter */ - mfib_table_unlock(fib_index, PROTO); + mfib_table_unlock(fib_index, PROTO, MFIB_SOURCE_API); MFIB_TEST((FIB_NODE_INDEX_INVALID == mfib_table_find(PROTO, fib_index)), @@ -1207,7 +1209,8 @@ mfib_test_i (fib_protocol_t PROTO, */ mpls_sw_interface_enable_disable(&mpls_main, tm->hw[0]->sw_if_index, - 0); + 0, 0); + mpls_table_delete(MPLS_FIB_DEFAULT_TABLE_ID, FIB_SOURCE_API); /* * test we've leaked no resources diff --git a/src/vnet/mfib/mfib_types.h b/src/vnet/mfib/mfib_types.h index 863fad16..50aede04 100644 --- a/src/vnet/mfib/mfib_types.h +++ b/src/vnet/mfib/mfib_types.h @@ -166,9 +166,10 @@ typedef enum mfib_source_t_ MFIB_SOURCE_VXLAN, MFIB_SOURCE_DHCP, MFIB_SOURCE_SRv6, - MFIB_SOURCE_DEFAULT_ROUTE, MFIB_SOURCE_GTPU, MFIB_SOURCE_VXLAN_GPE, + MFIB_SOURCE_RR, + MFIB_SOURCE_DEFAULT_ROUTE, } mfib_source_t; #define MFIB_SOURCE_NAMES { \ @@ -178,11 +179,14 @@ typedef enum mfib_source_t_ [MFIB_SOURCE_DHCP] = "DHCP", \ [MFIB_SOURCE_VXLAN] = "VXLAN", \ [MFIB_SOURCE_SRv6] = "SRv6", \ - [MFIB_SOURCE_DEFAULT_ROUTE] = "Default Route", \ [MFIB_SOURCE_GTPU] = "GTPU", \ [MFIB_SOURCE_VXLAN_GPE] = "VXLAN-GPE", \ + [MFIB_SOURCE_RR] = "Recursive-resolution", \ + [MFIB_SOURCE_DEFAULT_ROUTE] = "Default Route", \ } +#define MFIB_N_SOURCES (MFIB_SOURCE_DEFAULT_ROUTE) + /** * \brief Compare two prefixes for equality */ diff --git a/src/vnet/mpls/interface.c b/src/vnet/mpls/interface.c index a085aaa2..d7c8e7d3 100644 --- a/src/vnet/mpls/interface.c +++ b/src/vnet/mpls/interface.c @@ -35,25 +35,33 @@ mpls_sw_interface_is_enabled (u32 sw_if_index) return (mm->mpls_enabled_by_sw_if_index[sw_if_index]); } -void +int mpls_sw_interface_enable_disable (mpls_main_t * mm, u32 sw_if_index, - u8 is_enable) + u8 is_enable, + u8 is_api) { fib_node_index_t lfib_index; vec_validate_init_empty (mm->mpls_enabled_by_sw_if_index, sw_if_index, 0); + lfib_index = fib_table_find(FIB_PROTOCOL_MPLS, + MPLS_FIB_DEFAULT_TABLE_ID); + + if (~0 == lfib_index) + return VNET_API_ERROR_NO_SUCH_FIB; + /* * enable/disable only on the 1<->0 transition */ if (is_enable) { if (1 != ++mm->mpls_enabled_by_sw_if_index[sw_if_index]) - return; + return (0); + + fib_table_lock(lfib_index, FIB_PROTOCOL_MPLS, + (is_api? FIB_SOURCE_API: FIB_SOURCE_CLI)); - lfib_index = fib_table_find_or_create_and_lock(FIB_PROTOCOL_MPLS, - MPLS_FIB_DEFAULT_TABLE_ID); vec_validate(mm->fib_index_by_sw_if_index, 0); mm->fib_index_by_sw_if_index[sw_if_index] = lfib_index; } @@ -61,15 +69,17 @@ mpls_sw_interface_enable_disable (mpls_main_t * mm, { ASSERT(mm->mpls_enabled_by_sw_if_index[sw_if_index] > 0); if (0 != --mm->mpls_enabled_by_sw_if_index[sw_if_index]) - return; + return (0); fib_table_unlock(mm->fib_index_by_sw_if_index[sw_if_index], - FIB_PROTOCOL_MPLS); + FIB_PROTOCOL_MPLS, + (is_api? FIB_SOURCE_API: FIB_SOURCE_CLI)); } vnet_feature_enable_disable ("mpls-input", "mpls-not-enabled", sw_if_index, !is_enable, 0, 0); + return (0); } static clib_error_t * @@ -101,7 +111,7 @@ mpls_interface_enable_disable (vlib_main_t * vm, goto done; } - mpls_sw_interface_enable_disable(&mpls_main, sw_if_index, enable); + mpls_sw_interface_enable_disable(&mpls_main, sw_if_index, enable, 0); done: return error; diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c index 5021ac23..7bdfd8c7 100644 --- a/src/vnet/mpls/mpls.c +++ b/src/vnet/mpls/mpls.c @@ -536,6 +536,78 @@ VLIB_CLI_COMMAND (mpls_local_label_command, static) = { .short_help = "Create/Delete MPL local labels", }; +clib_error_t * +vnet_mpls_table_cmd (vlib_main_t * vm, + unformat_input_t * main_input, + vlib_cli_command_t * cmdo) +{ + unformat_input_t _line_input, *line_input = &_line_input; + clib_error_t *error = NULL; + u32 table_id, is_add; + + is_add = 1; + table_id = ~0; + + /* Get a line of input. */ + if (!unformat_user (main_input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "%d", &table_id)) + ; + else if (unformat (line_input, "del")) + is_add = 0; + else if (unformat (line_input, "add")) + is_add = 1; + else + { + error = unformat_parse_error (line_input); + goto done; + } + } + + if (~0 == table_id) + { + error = clib_error_return (0, "No table id"); + goto done; + } + else if (0 == table_id) + { + error = clib_error_return (0, "Can't change the default table"); + goto done; + } + else + { + if (is_add) + { + mpls_table_create (table_id, 0); + } + else + { + mpls_table_delete (table_id, 0); + } + } + + done: + unformat_free (line_input); + return error; +} + +/* *INDENT-ON* */ +/*? + * This command is used to add or delete MPLS Tables. All + * Tables must be explicitly added before that can be used, + * Including the default table. + ?*/ +/* *INDENT-OFF* */ +VLIB_CLI_COMMAND (ip6_table_command, static) = { + .path = "mpla table", + .short_help = "mpls table [add|del] ", + .function = vnet_mpls_table_cmd, + .is_mp_safe = 1, +}; + int mpls_fib_reset_labels (u32 fib_id) { @@ -546,12 +618,8 @@ mpls_fib_reset_labels (u32 fib_id) static clib_error_t * mpls_init (vlib_main_t * vm) { - mpls_main_t * mm = &mpls_main; clib_error_t * error; - mm->vlib_main = vm; - mm->vnet_main = vnet_get_main(); - if ((error = vlib_call_init_function (vm, ip_main_init))) return error; diff --git a/src/vnet/mpls/mpls.h b/src/vnet/mpls/mpls.h index b0125e60..31cb1746 100644 --- a/src/vnet/mpls/mpls.h +++ b/src/vnet/mpls/mpls.h @@ -56,10 +56,6 @@ typedef struct { /* IP4 enabled count by software interface */ u8 * mpls_enabled_by_sw_if_index; - - /* convenience */ - vlib_main_t * vlib_main; - vnet_main_t * vnet_main; } mpls_main_t; extern mpls_main_t mpls_main; @@ -77,8 +73,6 @@ extern vlib_node_registration_t mpls_midchain_node; /* Parse mpls protocol as 0xXXXX or protocol name. In either host or network byte order. */ -unformat_function_t unformat_mpls_protocol_host_byte_order; -unformat_function_t unformat_mpls_protocol_net_byte_order; unformat_function_t unformat_mpls_label_net_byte_order; unformat_function_t unformat_mpls_unicast_label; @@ -86,9 +80,10 @@ unformat_function_t unformat_mpls_unicast_label; unformat_function_t unformat_mpls_header; unformat_function_t unformat_pg_mpls_header; -void mpls_sw_interface_enable_disable (mpls_main_t * mm, - u32 sw_if_index, - u8 is_enable); +int mpls_sw_interface_enable_disable (mpls_main_t * mm, + u32 sw_if_index, + u8 is_enable, + u8 is_api); u8 mpls_sw_interface_is_enabled (u32 sw_if_index); @@ -103,4 +98,7 @@ mpls_fib_index_cmp(void * a1, void * a2); int mpls_label_cmp(void * a1, void * a2); +void mpls_table_create(uint32_t table_id, u8 is_api); +void mpls_table_delete(uint32_t table_id, u8 is_api); + #endif /* included_vnet_mpls_h */ diff --git a/src/vnet/mpls/mpls_api.c b/src/vnet/mpls/mpls_api.c index a44b1a25..38f5b014 100644 --- a/src/vnet/mpls/mpls_api.c +++ b/src/vnet/mpls/mpls_api.c @@ -58,6 +58,29 @@ _(MPLS_FIB_DUMP, mpls_fib_dump) extern void stats_dslock_with_hint (int hint, int tag); extern void stats_dsunlock (void); +void +mpls_table_delete (u32 table_id, u8 is_api) +{ + u32 fib_index; + + /* + * The MPLS defult table must also be explicitly created via the API. + * So in contrast to IP, it gets no special treatment here. + * + * The API holds only one lock on the table. + * i.e. it can be added many times via the API but needs to be + * deleted only once. + */ + fib_index = fib_table_find (FIB_PROTOCOL_MPLS, table_id); + + if (~0 != fib_index) + { + fib_table_unlock (fib_index, + FIB_PROTOCOL_MPLS, + (is_api ? FIB_SOURCE_API : FIB_SOURCE_CLI)); + } +} + void vl_api_mpls_table_add_del_t_handler (vl_api_mpls_table_add_del_t * mp) { @@ -68,6 +91,13 @@ vl_api_mpls_table_add_del_t_handler (vl_api_mpls_table_add_del_t * mp) vnm = vnet_get_main (); vnm->api_errno = 0; + if (mp->mt_is_add) + mpls_table_create (ntohl (mp->mt_table_id), 1); + else + mpls_table_delete (ntohl (mp->mt_table_id), 1); + + rv = (rv == 0) ? vnm->api_errno : rv; + REPLY_MACRO (VL_API_MPLS_TABLE_ADD_DEL_REPLY); } @@ -82,14 +112,7 @@ mpls_ip_bind_unbind_handler (vnet_main_t * vnm, if (~0 == mpls_fib_index) { - if (mp->mb_create_table_if_needed) - { - mpls_fib_index = - fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, - ntohl (mp->mb_mpls_table_id)); - } - else - return VNET_API_ERROR_NO_SUCH_FIB; + return VNET_API_ERROR_NO_SUCH_FIB; } ip_fib_index = fib_table_find ((mp->mb_is_ip4 ? @@ -170,7 +193,6 @@ mpls_route_add_del_t_handler (vnet_main_t * vnm, mp->mr_next_hop_sw_if_index, pfx.fp_payload_proto, mp->mr_next_hop_table_id, - mp->mr_create_table_if_needed, mp->mr_is_rpf_id, &fib_index, &next_hop_fib_index); @@ -235,6 +257,32 @@ vl_api_mpls_route_add_del_t_handler (vl_api_mpls_route_add_del_t * mp) REPLY_MACRO (VL_API_MPLS_ROUTE_ADD_DEL_REPLY); } +void +mpls_table_create (u32 table_id, u8 is_api) +{ + u32 fib_index; + + /* + * The MPLS defult table must also be explicitly created via the API. + * So in contrast to IP, it gets no special treatment here. + */ + + /* + * The API holds only one lock on the table. + * i.e. it can be added many times via the API but needs to be + * deleted only once. + */ + fib_index = fib_table_find (FIB_PROTOCOL_MPLS, table_id); + + if (~0 == fib_index) + { + fib_table_find_or_create_and_lock (FIB_PROTOCOL_MPLS, + table_id, + (is_api ? + FIB_SOURCE_API : FIB_SOURCE_CLI)); + } +} + static void vl_api_mpls_tunnel_add_del_t_handler (vl_api_mpls_tunnel_add_del_t * mp) { diff --git a/src/vnet/srv6/sr_policy_rewrite.c b/src/vnet/srv6/sr_policy_rewrite.c index f427bbf3..2f90993a 100755 --- a/src/vnet/srv6/sr_policy_rewrite.c +++ b/src/vnet/srv6/sr_policy_rewrite.c @@ -595,8 +595,10 @@ sr_policy_add (ip6_address_t * bsid, ip6_address_t * segments, if (sm->fib_table_ip6 == (u32) ~ 0) { sm->fib_table_ip6 = fib_table_create_and_lock (FIB_PROTOCOL_IP6, + FIB_SOURCE_SR, "SRv6 steering of IP6 prefixes through BSIDs"); sm->fib_table_ip4 = fib_table_create_and_lock (FIB_PROTOCOL_IP6, + FIB_SOURCE_SR, "SRv6 steering of IP4 prefixes through BSIDs"); } @@ -684,8 +686,8 @@ sr_policy_del (ip6_address_t * bsid, u32 index) /* If FIB empty unlock it */ if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies)) { - fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6); - fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6); + fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6, FIB_SOURCE_SR); + fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6, FIB_SOURCE_SR); sm->fib_table_ip6 = (u32) ~ 0; sm->fib_table_ip4 = (u32) ~ 0; } diff --git a/src/vnet/srv6/sr_steering.c b/src/vnet/srv6/sr_steering.c index 57fe21f6..cf4e81ab 100755 --- a/src/vnet/srv6/sr_steering.c +++ b/src/vnet/srv6/sr_steering.c @@ -159,8 +159,10 @@ sr_steering_policy (int is_del, ip6_address_t * bsid, u32 sr_policy_index, /* If no more SR policies or steering policies */ if (!pool_elts (sm->sr_policies) && !pool_elts (sm->steer_policies)) { - fib_table_unlock (sm->fib_table_ip6, FIB_PROTOCOL_IP6); - fib_table_unlock (sm->fib_table_ip4, FIB_PROTOCOL_IP6); + fib_table_unlock (sm->fib_table_ip6, + FIB_PROTOCOL_IP6, FIB_SOURCE_SR); + fib_table_unlock (sm->fib_table_ip4, + FIB_PROTOCOL_IP6, FIB_SOURCE_SR); sm->fib_table_ip6 = (u32) ~ 0; sm->fib_table_ip4 = (u32) ~ 0; } diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index f9c3129c..044ddb5b 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -699,8 +699,9 @@ static void VALIDATE_SW_IF_INDEX (mp); - mpls_sw_interface_enable_disable (&mpls_main, - ntohl (mp->sw_if_index), mp->enable); + rv = mpls_sw_interface_enable_disable (&mpls_main, + ntohl (mp->sw_if_index), + mp->enable, 1); BAD_SW_IF_INDEX_LABEL; REPLY_MACRO (VL_API_SW_INTERFACE_SET_MPLS_ENABLE_REPLY); diff --git a/src/vpp/api/custom_dump.c b/src/vpp/api/custom_dump.c index 1353fe28..be74b83a 100644 --- a/src/vpp/api/custom_dump.c +++ b/src/vpp/api/custom_dump.c @@ -560,9 +560,6 @@ static void *vl_api_ip_add_del_route_t_print if (mp->table_id != 0) s = format (s, "vrf %d ", ntohl (mp->table_id)); - if (mp->create_vrf_if_needed) - s = format (s, "create-vrf "); - if (mp->next_hop_weight != 1) s = format (s, "weight %d ", mp->next_hop_weight); diff --git a/test/test_dhcp.py b/test/test_dhcp.py index 6fc29182..fe97f6c9 100644 --- a/test/test_dhcp.py +++ b/test/test_dhcp.py @@ -6,7 +6,7 @@ import struct from framework import VppTestCase, VppTestRunner from vpp_neighbor import VppNeighbor -from vpp_ip_route import find_route +from vpp_ip_route import find_route, VppIpTable from util import mk_ll_addr from scapy.layers.l2 import Ether, getmacbyip, ARP @@ -34,9 +34,19 @@ class TestDHCP(VppTestCase): # create 3 pg interfaces self.create_pg_interfaces(range(4)) + self.tables = [] # pg0 and 1 are IP configured in VRF 0 and 1. # pg2 and 3 are non IP-configured in VRF 0 and 1 + table_id = 0 + for table_id in range(1, 4): + tbl4 = VppIpTable(self, table_id) + tbl4.add_vpp_config() + self.tables.append(tbl4) + tbl6 = VppIpTable(self, table_id, is_ip6=1) + tbl6.add_vpp_config() + self.tables.append(tbl6) + table_id = 0 for i in self.pg_interfaces[:2]: i.admin_up() @@ -56,11 +66,15 @@ class TestDHCP(VppTestCase): table_id += 1 def tearDown(self): - super(TestDHCP, self).tearDown() - for i in self.pg_interfaces: + for i in self.pg_interfaces[:2]: i.unconfig_ip4() i.unconfig_ip6() + + for i in self.pg_interfaces: + i.set_table_ip4(0) + i.set_table_ip6(0) i.admin_down() + super(TestDHCP, self).tearDown() def send_and_assert_no_replies(self, intf, pkts, remark): intf.add_stream(pkts) @@ -667,6 +681,8 @@ class TestDHCP(VppTestCase): "DHCP cleanup VRF 0") self.send_and_assert_no_replies(self.pg3, pkts_disc_vrf1, "DHCP cleanup VRF 1") + self.pg2.unconfig_ip4() + self.pg3.unconfig_ip4() def test_dhcp6_proxy(self): """ DHCPv6 Proxy""" @@ -1045,6 +1061,8 @@ class TestDHCP(VppTestCase): server_table_id=0, is_ipv6=1, is_add=0) + self.pg2.unconfig_ip6() + self.pg3.unconfig_ip6() def test_dhcp_client(self): """ DHCP Client""" diff --git a/test/test_gre.py b/test/test_gre.py index 1afc44fb..9046b05f 100644 --- a/test/test_gre.py +++ b/test/test_gre.py @@ -6,7 +6,7 @@ from logging import * from framework import VppTestCase, VppTestRunner from vpp_sub_interface import VppDot1QSubint from vpp_gre_interface import VppGreInterface, VppGre6Interface -from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto +from vpp_ip_route import VppIpRoute, VppRoutePath, DpoProto, VppIpTable from vpp_papi_provider import L2_VTR_OP from scapy.packet import Raw @@ -30,6 +30,9 @@ class TestGRE(VppTestCase): # create 3 pg interfaces - set one in a non-default table. self.create_pg_interfaces(range(3)) + + self.tbl = VppIpTable(self, 1) + self.tbl.add_vpp_config() self.pg1.set_table_ip4(1) for i in self.pg_interfaces: @@ -43,11 +46,12 @@ class TestGRE(VppTestCase): self.pg2.resolve_ndp() def tearDown(self): - super(TestGRE, self).tearDown() for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.admin_down() + self.pg1.set_table_ip4(0) + super(TestGRE, self).tearDown() def create_stream_ip4(self, src_if, src_ip, dst_ip): pkts = [] diff --git a/test/test_ip4.py b/test/test_ip4.py index 7a7098c3..55d16735 100644 --- a/test/test_ip4.py +++ b/test/test_ip4.py @@ -6,7 +6,8 @@ import unittest from framework import VppTestCase, VppTestRunner from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint from vpp_ip_route import VppIpRoute, VppRoutePath, VppIpMRoute, \ - VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind + VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \ + VppMplsTable from scapy.packet import Raw from scapy.layers.l2 import Ether, Dot1Q, ARP @@ -774,6 +775,8 @@ class TestIPLoadBalance(VppTestCase): super(TestIPLoadBalance, self).setUp() self.create_pg_interfaces(range(5)) + mpls_tbl = VppMplsTable(self, 0) + mpls_tbl.add_vpp_config() for i in self.pg_interfaces: i.admin_up() @@ -782,11 +785,11 @@ class TestIPLoadBalance(VppTestCase): i.enable_mpls() def tearDown(self): - super(TestIPLoadBalance, self).tearDown() for i in self.pg_interfaces: i.disable_mpls() i.unconfig_ip4() i.admin_down() + super(TestIPLoadBalance, self).tearDown() def send_and_expect_load_balancing(self, input, pkts, outputs): input.add_stream(pkts) @@ -966,6 +969,8 @@ class TestIPVlan0(VppTestCase): super(TestIPVlan0, self).setUp() self.create_pg_interfaces(range(2)) + mpls_tbl = VppMplsTable(self, 0) + mpls_tbl.add_vpp_config() for i in self.pg_interfaces: i.admin_up() @@ -974,11 +979,11 @@ class TestIPVlan0(VppTestCase): i.enable_mpls() def tearDown(self): - super(TestIPVlan0, self).tearDown() for i in self.pg_interfaces: i.disable_mpls() i.unconfig_ip4() i.admin_down() + super(TestIPVlan0, self).tearDown() def send_and_expect(self, input, pkts, output): input.add_stream(pkts) diff --git a/test/test_ip4_vrf_multi_instance.py b/test/test_ip4_vrf_multi_instance.py index b73ac948..5a8d6760 100644 --- a/test/test_ip4_vrf_multi_instance.py +++ b/test/test_ip4_vrf_multi_instance.py @@ -172,9 +172,10 @@ class TestIp4VrfMultiInst(VppTestCase): pg_if = self.pg_if_by_vrf_id[vrf_id][0] dest_addr = pg_if.remote_hosts[0].ip4n dest_addr_len = 24 + self.vapi.ip_table_add_del(vrf_id, is_add=1) self.vapi.ip_add_del_route( dest_addr, dest_addr_len, pg_if.local_ip4n, - table_id=vrf_id, create_vrf_if_needed=1, is_multipath=1) + table_id=vrf_id, is_multipath=1) self.logger.info("IPv4 VRF ID %d created" % vrf_id) if vrf_id not in self.vrf_list: self.vrf_list.append(vrf_id) @@ -216,6 +217,7 @@ class TestIp4VrfMultiInst(VppTestCase): self.logger.info("IPv4 VRF ID %d reset" % vrf_id) self.logger.debug(self.vapi.ppcli("show ip fib")) self.logger.debug(self.vapi.ppcli("show ip arp")) + self.vapi.ip_table_add_del(vrf_id, is_add=0) def create_stream(self, src_if, packet_sizes): """ diff --git a/test/test_ip6.py b/test/test_ip6.py index 285ce181..aad3713c 100644 --- a/test/test_ip6.py +++ b/test/test_ip6.py @@ -8,7 +8,7 @@ from vpp_sub_interface import VppSubInterface, VppDot1QSubint from vpp_pg_interface import is_ipv6_misc from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, VppIpMRoute, \ VppMRoutePath, MRouteItfFlags, MRouteEntryFlags, VppMplsIpBind, \ - VppMplsRoute, DpoProto + VppMplsRoute, DpoProto, VppMplsTable from vpp_neighbor import find_nbr, VppNeighbor from scapy.packet import Raw @@ -1260,6 +1260,9 @@ class TestIP6LoadBalance(VppTestCase): self.create_pg_interfaces(range(5)) + mpls_tbl = VppMplsTable(self, 0) + mpls_tbl.add_vpp_config() + for i in self.pg_interfaces: i.admin_up() i.config_ip6() @@ -1267,11 +1270,11 @@ class TestIP6LoadBalance(VppTestCase): i.enable_mpls() def tearDown(self): - super(TestIP6LoadBalance, self).tearDown() for i in self.pg_interfaces: i.unconfig_ip6() i.admin_down() i.disable_mpls() + super(TestIP6LoadBalance, self).tearDown() def send_and_expect_load_balancing(self, input, pkts, outputs): input.add_stream(pkts) diff --git a/test/test_ip6_vrf_multi_instance.py b/test/test_ip6_vrf_multi_instance.py index af80b5ba..769cb2e5 100644 --- a/test/test_ip6_vrf_multi_instance.py +++ b/test/test_ip6_vrf_multi_instance.py @@ -187,9 +187,10 @@ class TestIP6VrfMultiInst(VppTestCase): pg_if = self.pg_if_by_vrf_id[vrf_id][0] dest_addr = pg_if.remote_hosts[0].ip6n dest_addr_len = 64 + self.vapi.ip_table_add_del(vrf_id, is_add=1, is_ipv6=1) self.vapi.ip_add_del_route( dest_addr, dest_addr_len, pg_if.local_ip6n, is_ipv6=1, - table_id=vrf_id, create_vrf_if_needed=1, is_multipath=1) + table_id=vrf_id, is_multipath=1) self.logger.info("IPv6 VRF ID %d created" % vrf_id) if vrf_id not in self.vrf_list: self.vrf_list.append(vrf_id) @@ -232,6 +233,7 @@ class TestIP6VrfMultiInst(VppTestCase): self.logger.info("IPv6 VRF ID %d reset" % vrf_id) self.logger.debug(self.vapi.ppcli("show ip6 fib")) self.logger.debug(self.vapi.ppcli("show ip6 neighbors")) + self.vapi.ip_table_add_del(vrf_id, is_add=0, is_ipv6=1) def create_stream(self, src_if, packet_sizes): """ diff --git a/test/test_ip_mcast.py b/test/test_ip_mcast.py index 276555d6..7cad683c 100644 --- a/test/test_ip_mcast.py +++ b/test/test_ip_mcast.py @@ -5,7 +5,7 @@ import unittest from framework import VppTestCase, VppTestRunner from vpp_sub_interface import VppSubInterface, VppDot1QSubint, VppDot1ADSubint from vpp_ip_route import VppIpMRoute, VppMRoutePath, VppMFibSignal, \ - MRouteItfFlags, MRouteEntryFlags + MRouteItfFlags, MRouteEntryFlags, VppIpTable from scapy.packet import Raw from scapy.layers.l2 import Ether @@ -44,16 +44,37 @@ class TestIPMcast(VppTestCase): super(TestIPMcast, self).setUp() # create 8 pg interfaces - self.create_pg_interfaces(range(8)) + self.create_pg_interfaces(range(9)) # setup interfaces - for i in self.pg_interfaces: + for i in self.pg_interfaces[:8]: i.admin_up() i.config_ip4() i.config_ip6() i.resolve_arp() i.resolve_ndp() + # one more in a vrf + tbl4 = VppIpTable(self, 10) + tbl4.add_vpp_config() + self.pg8.set_table_ip4(10) + self.pg8.config_ip4() + + tbl6 = VppIpTable(self, 10, is_ip6=1) + tbl6.add_vpp_config() + self.pg8.set_table_ip6(10) + self.pg8.config_ip6() + + def tearDown(self): + for i in self.pg_interfaces: + i.unconfig_ip4() + i.unconfig_ip6() + i.admin_down() + + self.pg8.set_table_ip4(0) + self.pg8.set_table_ip6(0) + super(TestIPMcast, self).tearDown() + def create_stream_ip4(self, src_if, src_ip, dst_ip, payload_size=0): pkts = [] # default to small packet sizes @@ -663,6 +684,77 @@ class TestIPMcast(VppTestCase): # route_232_1_1_1.remove_vpp_config() + def test_ip_mcast_vrf(self): + """ IP Multicast Replication in non-default table""" + + # + # An (S,G). + # one accepting interface, pg0, 2 forwarding interfaces + # + route_1_1_1_1_232_1_1_1 = VppIpMRoute( + self, + "1.1.1.1", + "232.1.1.1", 64, + MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, + [VppMRoutePath(self.pg8.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), + VppMRoutePath(self.pg1.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), + VppMRoutePath(self.pg2.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], + table_id=10) + route_1_1_1_1_232_1_1_1.add_vpp_config() + + # + # a stream that matches the route for (1.1.1.1,232.1.1.1) + # small packets + # + self.vapi.cli("clear trace") + tx = self.create_stream_ip4(self.pg8, "1.1.1.1", "232.1.1.1") + self.pg8.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + # We expect replications on Pg1 & 2 + self.verify_capture_ip4(self.pg1, tx) + self.verify_capture_ip4(self.pg2, tx) + + def test_ip6_mcast_vrf(self): + """ IPv6 Multicast Replication in non-default table""" + + # + # An (S,G). + # one accepting interface, pg0, 2 forwarding interfaces + # + route_2001_ff01_1 = VppIpMRoute( + self, + "2001::1", + "ff01::1", 256, + MRouteEntryFlags.MFIB_ENTRY_FLAG_NONE, + [VppMRoutePath(self.pg8.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_ACCEPT), + VppMRoutePath(self.pg1.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD), + VppMRoutePath(self.pg2.sw_if_index, + MRouteItfFlags.MFIB_ITF_FLAG_FORWARD)], + table_id=10, + is_ip6=1) + route_2001_ff01_1.add_vpp_config() + + # + # a stream that matches the route for (2001::1, ff00::1) + # + self.vapi.cli("clear trace") + tx = self.create_stream_ip6(self.pg8, "2001::1", "ff01::1") + self.pg8.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + # We expect replications on Pg1, 2, + self.verify_capture_ip6(self.pg1, tx) + self.verify_capture_ip6(self.pg2, tx) if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/test_mpls.py b/test/test_mpls.py index b2226a74..460a32d1 100644 --- a/test/test_mpls.py +++ b/test/test_mpls.py @@ -6,7 +6,7 @@ import socket from framework import VppTestCase, VppTestRunner from vpp_ip_route import VppIpRoute, VppRoutePath, VppMplsRoute, \ VppMplsIpBind, VppIpMRoute, VppMRoutePath, \ - MRouteItfFlags, MRouteEntryFlags, DpoProto + MRouteItfFlags, MRouteEntryFlags, DpoProto, VppIpTable, VppMplsTable from vpp_mpls_tunnel_interface import VppMPLSTunnelInterface from scapy.packet import Raw @@ -60,9 +60,23 @@ class TestMPLS(VppTestCase): # setup both interfaces # assign them different tables. table_id = 0 + self.tables = [] + + tbl = VppMplsTable(self, 0) + tbl.add_vpp_config() + self.tables.append(tbl) for i in self.pg_interfaces: i.admin_up() + + if table_id != 0: + tbl = VppIpTable(self, table_id) + tbl.add_vpp_config() + self.tables.append(tbl) + tbl = VppIpTable(self, table_id, is_ip6=1) + tbl.add_vpp_config() + self.tables.append(tbl) + i.set_table_ip4(table_id) i.set_table_ip6(table_id) i.config_ip4() @@ -73,12 +87,15 @@ class TestMPLS(VppTestCase): table_id += 1 def tearDown(self): - super(TestMPLS, self).tearDown() for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.ip6_disable() + i.set_table_ip4(0) + i.set_table_ip6(0) + i.disable_mpls() i.admin_down() + super(TestMPLS, self).tearDown() # the default of 64 matches the IP packet TTL default def create_stream_labelled_ip4( @@ -1092,6 +1109,9 @@ class TestMPLSDisabled(VppTestCase): # create 2 pg interfaces self.create_pg_interfaces(range(2)) + self.tbl = VppMplsTable(self, 0) + self.tbl.add_vpp_config() + # PG0 is MPLS enalbed self.pg0.admin_up() self.pg0.config_ip4() @@ -1102,11 +1122,13 @@ class TestMPLSDisabled(VppTestCase): self.pg1.admin_up() def tearDown(self): - super(TestMPLSDisabled, self).tearDown() for i in self.pg_interfaces: i.unconfig_ip4() i.admin_down() + self.pg0.disable_mpls() + super(TestMPLSDisabled, self).tearDown() + def send_and_assert_no_replies(self, intf, pkts, remark): intf.add_stream(pkts) self.pg_enable_capture(self.pg_interfaces) @@ -1174,6 +1196,13 @@ class TestMPLSPIC(VppTestCase): # create 2 pg interfaces self.create_pg_interfaces(range(4)) + mpls_tbl = VppMplsTable(self, 0) + mpls_tbl.add_vpp_config() + tbl4 = VppIpTable(self, 1) + tbl4.add_vpp_config() + tbl6 = VppIpTable(self, 1, is_ip6=1) + tbl6.add_vpp_config() + # core links self.pg0.admin_up() self.pg0.config_ip4() @@ -1201,14 +1230,15 @@ class TestMPLSPIC(VppTestCase): self.pg3.resolve_ndp() def tearDown(self): - super(TestMPLSPIC, self).tearDown() self.pg0.disable_mpls() + self.pg1.disable_mpls() for i in self.pg_interfaces: i.unconfig_ip4() i.unconfig_ip6() i.set_table_ip4(0) i.set_table_ip6(0) i.admin_down() + super(TestMPLSPIC, self).tearDown() def test_mpls_ibgp_pic(self): """ MPLS iBGP PIC edge convergence @@ -1534,24 +1564,30 @@ class TestMPLSL2(VppTestCase): # create 2 pg interfaces self.create_pg_interfaces(range(2)) + # create the default MPLS table + self.tables = [] + tbl = VppMplsTable(self, 0) + tbl.add_vpp_config() + self.tables.append(tbl) + # use pg0 as the core facing interface self.pg0.admin_up() self.pg0.config_ip4() self.pg0.resolve_arp() self.pg0.enable_mpls() - # use the other 2 for customer facg L2 links + # use the other 2 for customer facing L2 links for i in self.pg_interfaces[1:]: i.admin_up() def tearDown(self): - super(TestMPLSL2, self).tearDown() for i in self.pg_interfaces[1:]: i.admin_down() self.pg0.disable_mpls() self.pg0.unconfig_ip4() self.pg0.admin_down() + super(TestMPLSL2, self).tearDown() def verify_capture_tunneled_ethernet(self, capture, sent, mpls_labels, ttl=255, top=None): diff --git a/test/test_nat.py b/test/test_nat.py index 1f2d17ab..73e9e217 100644 --- a/test/test_nat.py +++ b/test/test_nat.py @@ -549,6 +549,8 @@ class TestNAT44(MethodHolder): cls.pg0.configure_ipv4_neighbors() cls.overlapping_interfaces = list(list(cls.pg_interfaces[4:7])) + cls.vapi.ip_table_add_del(10, is_add=1) + cls.vapi.ip_table_add_del(20, is_add=1) cls.pg4._local_ip4 = "172.16.255.1" cls.pg4._local_ip4n = socket.inet_pton(socket.AF_INET, i.local_ip4) @@ -1797,6 +1799,8 @@ class TestNAT44(MethodHolder): self.pg0.unconfig_ip4() self.pg1.unconfig_ip4() + self.vapi.ip_table_add_del(vrf_id1, is_add=1) + self.vapi.ip_table_add_del(vrf_id2, is_add=1) self.pg0.set_table_ip4(vrf_id1) self.pg1.set_table_ip4(vrf_id2) self.pg0.config_ip4() @@ -1825,6 +1829,13 @@ class TestNAT44(MethodHolder): capture = self.pg2.get_capture(len(pkts)) self.verify_capture_out(capture, nat_ip2) + self.pg0.unconfig_ip4() + self.pg1.unconfig_ip4() + self.pg0.set_table_ip4(0) + self.pg1.set_table_ip4(0) + self.vapi.ip_table_add_del(vrf_id1, is_add=0) + self.vapi.ip_table_add_del(vrf_id2, is_add=0) + def test_vrf_feature_independent(self): """ NAT44 tenant VRF independent address pool mode """ @@ -3042,6 +3053,8 @@ class TestNAT64(MethodHolder): cls.ip6_interfaces.append(cls.pg_interfaces[2]) cls.ip4_interfaces = list(cls.pg_interfaces[1:2]) + cls.vapi.ip_table_add_del(cls.vrf1_id, is_add=1, is_ipv6=1) + cls.pg_interfaces[2].set_table_ip6(cls.vrf1_id) cls.pg0.generate_remote_hosts(2) diff --git a/test/test_neighbor.py b/test/test_neighbor.py index 1c7cc267..68dde2fb 100644 --- a/test/test_neighbor.py +++ b/test/test_neighbor.py @@ -5,7 +5,8 @@ from socket import AF_INET, AF_INET6, inet_pton from framework import VppTestCase, VppTestRunner from vpp_neighbor import VppNeighbor, find_nbr -from vpp_ip_route import VppIpRoute, VppRoutePath, find_route +from vpp_ip_route import VppIpRoute, VppRoutePath, find_route, \ + VppIpTable from scapy.packet import Raw from scapy.layers.l2 import Ether, ARP, Dot1Q @@ -39,11 +40,13 @@ class ARPTestCase(VppTestCase): self.pg1.config_ip6() # pg3 in a different VRF + self.tbl = VppIpTable(self, 1) + self.tbl.add_vpp_config() + self.pg3.set_table_ip4(1) self.pg3.config_ip4() def tearDown(self): - super(ARPTestCase, self).tearDown() self.pg0.unconfig_ip4() self.pg0.unconfig_ip6() @@ -51,10 +54,13 @@ class ARPTestCase(VppTestCase): self.pg1.unconfig_ip6() self.pg3.unconfig_ip4() + self.pg3.set_table_ip4(0) for i in self.pg_interfaces: i.admin_down() + super(ARPTestCase, self).tearDown() + def verify_arp_req(self, rx, smac, sip, dip): ether = rx[Ether] self.assertEqual(ether.dst, "ff:ff:ff:ff:ff:ff") @@ -1080,6 +1086,62 @@ class ARPTestCase(VppTestCase): self.pg0.remote_ip4, self.pg1.remote_hosts[1].ip4) + def test_arp_static(self): + """ ARP Static""" + self.pg2.generate_remote_hosts(3) + + # + # Add a static ARP entry + # + static_arp = VppNeighbor(self, + self.pg2.sw_if_index, + self.pg2.remote_hosts[1].mac, + self.pg2.remote_hosts[1].ip4, + is_static=1) + static_arp.add_vpp_config() + + # + # Add the connected prefix to the interface + # + self.pg2.config_ip4() + + # + # We should now find the adj-fib + # + self.assertTrue(find_nbr(self, + self.pg2.sw_if_index, + self.pg2.remote_hosts[1].ip4, + is_static=1)) + self.assertTrue(find_route(self, + self.pg2.remote_hosts[1].ip4, + 32)) + + # + # remove the connected + # + self.pg2.unconfig_ip4() + + # + # put the interface into table 1 + # + self.pg2.set_table_ip4(1) + + # + # configure the same connected and expect to find the + # adj fib in the new table + # + self.pg2.config_ip4() + self.assertTrue(find_route(self, + self.pg2.remote_hosts[1].ip4, + 32, + table_id=1)) + + # + # clean-up + # + self.pg2.unconfig_ip4() + self.pg2.set_table_ip4(0) + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py index 2c489e3c..b7993793 100644 --- a/test/vpp_ip_route.py +++ b/test/vpp_ip_route.py @@ -54,6 +54,46 @@ def find_route(test, ip_addr, len, table_id=0, inet=AF_INET): return False +class VppIpTable(VppObject): + + def __init__(self, + test, + table_id, + is_ip6=0): + self._test = test + self.table_id = table_id + self.is_ip6 = is_ip6 + + def add_vpp_config(self): + self._test.vapi.ip_table_add_del( + self.table_id, + is_ipv6=self.is_ip6, + is_add=1) + self._test.registry.register(self, self._test.logger) + + def remove_vpp_config(self): + self._test.vapi.ip_table_add_del( + self.table_id, + is_ipv6=self.is_ip6, + is_add=0) + + def query_vpp_config(self): + # find the default route + return find_route(self._test, + "::" if self.is_ip6 else "0.0.0.0", + 0, + self.table_id, + inet=AF_INET6 if self.is_ip6 == 1 else AF_INET) + + def __str__(self): + return self.object_id() + + def object_id(self): + return ("table-%s-%d" % + ("v6" if self.is_ip6 == 1 else "v4", + self.table_id)) + + class VppRoutePath(object): def __init__( @@ -391,6 +431,39 @@ class VppMplsIpBind(VppObject): self.dest_addr_len)) +class VppMplsTable(VppObject): + + def __init__(self, + test, + table_id): + self._test = test + self.table_id = table_id + + def add_vpp_config(self): + self._test.vapi.mpls_table_add_del( + self.table_id, + is_add=1) + self._test.registry.register(self, self._test.logger) + + def remove_vpp_config(self): + self._test.vapi.mpls_table_add_del( + self.table_id, + is_add=0) + + def query_vpp_config(self): + # find the default route + dump = self._test.vapi.mpls_fib_dump() + if len(dump): + return True + return False + + def __str__(self): + return self.object_id() + + def object_id(self): + return ("table-mpls-%d" % (self.table_id)) + + class VppMplsRoute(VppObject): """ MPLS Route/LSP diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index b70da026..519aff80 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -652,6 +652,24 @@ class VppPapiProvider(object): return self.api(self.papi.delete_loopback, {'sw_if_index': sw_if_index, }) + def ip_table_add_del(self, + table_id, + is_add=1, + is_ipv6=0): + """ + + :param table_id + :param is_add: (Default value = 1) + :param is_ipv6: (Default value = 0) + + """ + + return self.api( + self.papi.ip_table_add_del, + {'table_id': table_id, + 'is_add': is_add, + 'is_ipv6': is_ipv6}) + def ip_add_del_route( self, dst_address, @@ -664,7 +682,6 @@ class VppPapiProvider(object): next_hop_n_out_labels=0, next_hop_out_label_stack=[], next_hop_via_label=MPLS_LABEL_INVALID, - create_vrf_if_needed=0, is_resolve_host=0, is_resolve_attached=0, classify_table_index=0xFFFFFFFF, @@ -687,7 +704,6 @@ class VppPapiProvider(object): :param vrf_id: (Default value = 0) :param lookup_in_vrf: (Default value = 0) :param classify_table_index: (Default value = 0xFFFFFFFF) - :param create_vrf_if_needed: (Default value = 0) :param is_add: (Default value = 1) :param is_drop: (Default value = 0) :param is_ipv6: (Default value = 0) @@ -707,7 +723,6 @@ class VppPapiProvider(object): 'table_id': table_id, 'classify_table_index': classify_table_index, 'next_hop_table_id': next_hop_table_id, - 'create_vrf_if_needed': create_vrf_if_needed, 'is_add': is_add, 'is_drop': is_drop, 'is_unreach': is_unreach, @@ -912,6 +927,22 @@ class VppPapiProvider(object): def mpls_fib_dump(self): return self.api(self.papi.mpls_fib_dump, {}) + def mpls_table_add_del( + self, + table_id, + is_add=1): + """ + + :param table_id + :param is_add: (Default value = 1) + + """ + + return self.api( + self.papi.mpls_table_add_del, + {'mt_table_id': table_id, + 'mt_is_add': is_add}) + def mpls_route_add_del( self, label, @@ -925,7 +956,6 @@ class VppPapiProvider(object): next_hop_n_out_labels=0, next_hop_out_label_stack=[], next_hop_via_label=MPLS_LABEL_INVALID, - create_vrf_if_needed=0, is_resolve_host=0, is_resolve_attached=0, is_interface_rx=0, @@ -947,7 +977,6 @@ class VppPapiProvider(object): :param vrf_id: (Default value = 0) :param lookup_in_vrf: (Default value = 0) :param classify_table_index: (Default value = 0xFFFFFFFF) - :param create_vrf_if_needed: (Default value = 0) :param is_add: (Default value = 1) :param is_drop: (Default value = 0) :param is_ipv6: (Default value = 0) @@ -968,7 +997,6 @@ class VppPapiProvider(object): 'mr_eos': eos, 'mr_table_id': table_id, 'mr_classify_table_index': classify_table_index, - 'mr_create_table_if_needed': create_vrf_if_needed, 'mr_is_add': is_add, 'mr_is_classify': is_classify, 'mr_is_multipath': is_multipath, @@ -994,7 +1022,6 @@ class VppPapiProvider(object): table_id=0, ip_table_id=0, is_ip4=1, - create_vrf_if_needed=0, is_bind=1): """ """ @@ -1003,7 +1030,6 @@ class VppPapiProvider(object): {'mb_mpls_table_id': table_id, 'mb_label': label, 'mb_ip_table_id': ip_table_id, - 'mb_create_table_if_needed': create_vrf_if_needed, 'mb_is_bind': is_bind, 'mb_is_ip4': is_ip4, 'mb_address_length': dst_address_length, @@ -1020,7 +1046,6 @@ class VppPapiProvider(object): next_hop_n_out_labels=0, next_hop_out_label_stack=[], next_hop_via_label=MPLS_LABEL_INVALID, - create_vrf_if_needed=0, is_add=1, l2_only=0, is_multicast=0): @@ -1034,7 +1059,6 @@ class VppPapiProvider(object): :param vrf_id: (Default value = 0) :param lookup_in_vrf: (Default value = 0) :param classify_table_index: (Default value = 0xFFFFFFFF) - :param create_vrf_if_needed: (Default value = 0) :param is_add: (Default value = 1) :param is_drop: (Default value = 0) :param is_ipv6: (Default value = 0) @@ -1844,7 +1868,6 @@ class VppPapiProvider(object): i_flags, rpf_id=0, table_id=0, - create_vrf_if_needed=0, is_add=1, is_ipv6=0, is_local=0): @@ -1857,7 +1880,6 @@ class VppPapiProvider(object): 'itf_flags': i_flags, 'table_id': table_id, 'rpf_id': rpf_id, - 'create_vrf_if_needed': create_vrf_if_needed, 'is_add': is_add, 'is_ipv6': is_ipv6, 'is_local': is_local, -- cgit 1.2.3-korg From 879ace3d3b238ec8db80f52ebfd556aa6f12b4c7 Mon Sep 17 00:00:00 2001 From: Chris Luke Date: Tue, 26 Sep 2017 13:15:16 -0400 Subject: Various fixes for issues found by Coverity (VPP-972) 174267: Revisit this string termination issue 174816: Add check for NULL when trace is enabled 177211: Add notation that mutex is not required here 177117: Added check for log2_page_size == 0 and returns an error if so 163697,163698: Added missing sw_if_index validation Change-Id: I5a76fcf6505c785bfb3269e353360031c6a0fd0f Signed-off-by: Chris Luke --- src/uri/sock_test_server.c | 8 ++++++-- src/vnet/srv6/sr_api.c | 10 ++++++++++ src/vnet/tcp/tcp_input.c | 3 ++- src/vpp-api/vapi/vapi.c | 1 + src/vppinfra/linux/mem.c | 6 ++++++ 5 files changed, 25 insertions(+), 3 deletions(-) (limited to 'src/vnet/srv6') diff --git a/src/uri/sock_test_server.c b/src/uri/sock_test_server.c index 29adea25..35046aa0 100644 --- a/src/uri/sock_test_server.c +++ b/src/uri/sock_test_server.c @@ -514,7 +514,7 @@ main (int argc, char **argv) continue; } - else if (((char *) conn->buf)[0] != 0) + else if (isascii (conn->buf[0])) { // If it looks vaguely like a string, make sure it's terminated ((char *) conn->buf)[rx_bytes < @@ -536,8 +536,12 @@ main (int argc, char **argv) continue; } - if (isascii (conn->buf[0]) && strlen ((const char *) conn->buf)) + if (isascii (conn->buf[0])) { + // If it looks vaguely like a string, make sure it's terminated + ((char *) conn->buf)[rx_bytes < + conn->buf_size ? rx_bytes : + conn->buf_size - 1] = 0; if (xtra) fprintf (stderr, "ERROR: FIFO not drained in previous test!\n" diff --git a/src/vnet/srv6/sr_api.c b/src/vnet/srv6/sr_api.c index 925b50a1..623f672a 100644 --- a/src/vnet/srv6/sr_api.c +++ b/src/vnet/srv6/sr_api.c @@ -60,6 +60,9 @@ static void vl_api_sr_localsid_add_del_t_handler * char end_psp, u8 behavior, u32 sw_if_index, u32 vlan_index, u32 fib_table, * ip46_address_t *nh_addr, void *ls_plugin_mem) */ + + VALIDATE_SW_IF_INDEX (mp); + rv = sr_cli_localsid (mp->is_del, (ip6_address_t *) & mp->localsid_addr, mp->end_psp, @@ -69,6 +72,8 @@ static void vl_api_sr_localsid_add_del_t_handler ntohl (mp->fib_table), (ip46_address_t *) & mp->nh_addr, NULL); + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_SR_LOCALSID_ADD_DEL_REPLY); } @@ -158,6 +163,9 @@ static void vl_api_sr_steering_add_del_t_handler * u32 table_id, ip46_address_t *prefix, u32 mask_width, u32 sw_if_index, * u8 traffic_type) */ + + VALIDATE_SW_IF_INDEX (mp); + rv = sr_steering_policy (mp->is_del, (ip6_address_t *) & mp->bsid_addr, ntohl (mp->sr_policy_index), @@ -166,6 +174,8 @@ static void vl_api_sr_steering_add_del_t_handler ntohl (mp->mask_width), ntohl (mp->sw_if_index), mp->traffic_type); + BAD_SW_IF_INDEX_LABEL; + REPLY_MACRO (VL_API_SR_STEERING_ADD_DEL_REPLY); } diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index 62dcdc5e..63d6fd87 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -2163,7 +2163,8 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, drop: b0->error = error0 ? node->errors[error0] : 0; - if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + if (PREDICT_FALSE + ((b0->flags & VLIB_BUFFER_IS_TRACED) && tcp0 != 0)) { t0 = vlib_add_trace (vm, node, b0, sizeof (*t0)); clib_memcpy (&t0->tcp_header, tcp0, sizeof (t0->tcp_header)); diff --git a/src/vpp-api/vapi/vapi.c b/src/vpp-api/vapi/vapi.c index 59415e03..3150d2b4 100644 --- a/src/vpp-api/vapi/vapi.c +++ b/src/vpp-api/vapi/vapi.c @@ -305,6 +305,7 @@ vapi_connect (vapi_ctx_t ctx, const char *name, } ctx->requests = tmp; memset (ctx->requests, 0, size); + /* coverity[MISSING_LOCK] - 177211 requests_mutex is not needed here */ ctx->requests_start = ctx->requests_count = 0; if (chroot_prefix) { diff --git a/src/vppinfra/linux/mem.c b/src/vppinfra/linux/mem.c index df46763a..2d8f593d 100644 --- a/src/vppinfra/linux/mem.c +++ b/src/vppinfra/linux/mem.c @@ -132,6 +132,12 @@ clib_mem_vm_ext_alloc (clib_mem_vm_alloc_t * a) } } log2_page_size = clib_mem_vm_get_log2_page_size (fd); + + if (log2_page_size == 0) + { + err = clib_error_return_unix (0, "cannot determine page size"); + goto error; + } } else /* not CLIB_MEM_VM_F_SHARED */ { -- cgit 1.2.3-korg