diff options
author | Tetsuya Murakami <tetsuya.mrk@gmail.com> | 2020-01-09 14:22:04 -0800 |
---|---|---|
committer | Ole Trøan <otroan@employees.org> | 2020-01-14 18:16:21 +0000 |
commit | 57584d99dd8a8524db90c67c88525d58879d9b8e (patch) | |
tree | 7e9429d91c753a2f61603bf40cb5f449fe7184f9 /src/plugins/srv6-mobile | |
parent | ba4a5bf884516769211e75d11884a1e458323a21 (diff) |
srv6-mobile:
Type: feature
Add new functions in SRv6 Mobile Plug-in
GTP4.DT and GTP6.DT
Signed-off-by: Tetsuya Murakami <tetsuya.mrk@gmail.com>
Change-Id: I573a0c27bd463dd56a4d11b940941b8a8c826e08
Signed-off-by: Tetsuya Murakami <tetsuya.mrk@gmail.com>
Diffstat (limited to 'src/plugins/srv6-mobile')
-rw-r--r-- | src/plugins/srv6-mobile/CMakeLists.txt | 2 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/README.md | 178 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/extra/README.md | 209 | ||||
-rwxr-xr-x | src/plugins/srv6-mobile/extra/runner.py | 477 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/extra/runner_doc.md | 105 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/extra/topo-init.png | bin | 0 -> 121503 bytes | |||
-rw-r--r-- | src/plugins/srv6-mobile/extra/topo-test_gtp4d.png | bin | 0 -> 138014 bytes | |||
-rw-r--r-- | src/plugins/srv6-mobile/extra/topo-test_gtp6.png | bin | 0 -> 131731 bytes | |||
-rw-r--r-- | src/plugins/srv6-mobile/extra/topo-test_gtp6d.png | bin | 0 -> 127820 bytes | |||
-rw-r--r-- | src/plugins/srv6-mobile/extra/topo-test_gtp6ip6.png | bin | 0 -> 116633 bytes | |||
-rw-r--r-- | src/plugins/srv6-mobile/gtp4_dt.c | 200 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/gtp6_dt.c | 194 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/mobile.h | 71 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/mobile_plugin_doc.md | 139 | ||||
-rw-r--r-- | src/plugins/srv6-mobile/node.c | 864 |
15 files changed, 2110 insertions, 329 deletions
diff --git a/src/plugins/srv6-mobile/CMakeLists.txt b/src/plugins/srv6-mobile/CMakeLists.txt index ebd03f7fbf4..5a9945c2e4f 100644 --- a/src/plugins/srv6-mobile/CMakeLists.txt +++ b/src/plugins/srv6-mobile/CMakeLists.txt @@ -15,9 +15,11 @@ add_vpp_plugin(srv6mobile SOURCES gtp4_e.c gtp4_d.c + gtp4_dt.c gtp6_e.c gtp6_d.c gtp6_d_di.c + gtp6_dt.c node.c INSTALL_HEADERS diff --git a/src/plugins/srv6-mobile/README.md b/src/plugins/srv6-mobile/README.md deleted file mode 100644 index 18628806cbc..00000000000 --- a/src/plugins/srv6-mobile/README.md +++ /dev/null @@ -1,178 +0,0 @@ -SRv6 Mobile User Plane Plugin for VPP -======================== - -## Introduction - -This plugin module can provide the stateless mobile user plane protocols translation between GTP-U and SRv6. -The functions of the translation take advantage of SRv6 network programmability. -[SRv6 Mobile User Plane](https://tools.ietf.org/html/draft-ietf-dmm-srv6-mobile-uplane) defines the user plane protocol using SRv6 -including following stateless translation functions: - -- **T.M.GTP4.D:** - GTP-U over UDP/IPv4 -> SRv6 -- **End.M.GTP4.E:** - SRv6 -> GTP-U over UDP/IPv4 -- **End.M.GTP6.D:** - GTP-U over UDP/IPv6 -> SRv6 -- **End.M.GTP6.E:** - SRv6 -> GTP-U over UDP/IPv6 - -These functions benefit user plane(overlay) to be able to utilize data plane(underlay) networks properly. And also it benefits -data plane to be able to handle user plane in routing paradigm. - -## Getting started -To play with SRv6 Mobile User Plane on VPP, you need to install following packages: - - docker - python3 - pip3 - - Python packages (use pip): - docker - scapy - jinja2 - - -### Quick-start - -1. Build up the docker container image as following: - -``` -$ git clone https://github.com/filvarga/srv6-mobile.git -$ cd ./srv6-mobile/extras/ietf105 -$ ./runner.py infra build - -$ docker images -REPOSITORY TAG IMAGE ID CREATED SIZE -ietf105-image latest 577e786b7ec6 2 days ago 5.57GB -ubuntu 18.04 4c108a37151f 4 weeks ago 64.2MB - -``` - -The runner script [runner.py](test/runner.py) has features to automate configurations and procedures for the test. - -2. Instantiate test Scenario - -Let's try following command to instantiate a topology: - -``` -$ ./runner.py infra start -``` - -This command instantiates 4 VPP containers with following topology: - -![Topology Diagram](test/topo-init.png) - -You can check the instantiated docker instances with "docker ps". - - -``` -$ docker ps -CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -44cb98994500 ietf105-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-4 -6d65fff8aee9 ietf105-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-3 -ad123b516b24 ietf105-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-2 -5efed405b96a ietf105-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-1 - -``` - -You can login to and configure each instantiated container. - -``` -$ ./runner.py cmd vppctl 0 - -Verified image: None -connecting to: hck-vpp-1 - _______ _ _ _____ ___ - __/ __/ _ \ (_)__ | | / / _ \/ _ \ - _/ _// // / / / _ \ | |/ / ___/ ___/ - /_/ /____(_)_/\___/ |___/_/ /_/ - -vpp# -``` - -## Test Scenarios -### SRv6 Drop-in between GTP-U tunnel - -This test scenario introduces SRv6 path between GTP-U tunnel transparently. A GTP-U packet sent out from one end to another is translated to SRv6 and then back to GTP-U. All GTP-U tunnel identifiers are preserved in IPv6 header and SRH. - - -#### GTP-U over UDP/IPv4 case - -This case uses SRv6 end functions, T.M.GTP4.D and End.M.GTP4.E. - -![Topology Diagram](test/topo-test_gtp4d.png) - -VPP1 is configured with "T.M.GTP4.D", and VPP4 is configured with "End.M.GTP4.E". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv4 toward the packet capture. VPP1 translates it to SRv6 toward D4::TEID with SR policy <D2::, D3::> in SRH. VPP4 translates the SRv6 packet to the original GTP-U packet and send out to the packet capture. - -To start this case with IPv4 payload over GTP-U, you can run: - -``` -$ ./runner.py test tmap -``` - -If you want to use IPv6 payload instead of IPv4, you can run: - -``` -$ ./runner.py test tmap_ipv6 -``` - - -#### GTP-U over UDP/IPv6 case - -This case uses SRv6 end functions, End.M.GTP6.D.Di and End.M.GTP6.E. - -![Topology Diagram](test/topo-test_gtp6d.png) - -VPP1 is configured with "End.M.GTP6.D.Di", and VPP4 is configured with "End.M.GTP4.E". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv6 toward D:: of the packet capture. VPP1 translates it to SRv6 toward D:: with SR policy <D2::, D3::, D4::TEID> in SRH. VPP4 translates the SRv6 packet to the original GTP-U packet and send out to the packet capture. - -To start this case with IPv4 payload over GTP-U, you can run: - -``` -$ ./runner.py test gtp6_drop_in -``` - -If you want to use IPv6 payload instead of IPv4, you can run: - -``` -$ ./runner.py test gtp6_drop_in_ipv6 -``` - - -### GTP-U to SRv6 - -This test scenario demonstrates GTP-U to SRv6 translation. A GTP-U packet sent out from one end to another is translated to SRv6. - -#### GTP-U over UDP/IPv6 case - -##### IPv4 payload - -This case uses SRv6 end functions, End.M.GTP6.D and End.DT4. - -![Topology Diagram](test/topo-test_gtp6.png) - -VPP1 is configured with "End.M.GTP6.D", and VPP4 is configured with "End.DT4". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv6 toward D::2. VPP1 translates it to SRv6 toward the IPv6 destination consists of D4:: and TEID of GTP-U with SR policy <D2::, D3::> in SRH. VPP4 decapsulates the SRv6 packet and lookup the table for the inner IPv4 packet and send out to the packet capture. - -To start this case, you can run: - -``` -$ ./runner.py test gtp6 -``` - -##### IPv6 payload - -This case uses SRv6 end functions, End.M.GTP6.D and End.DT6. - - -![Topology Diagram](test/topo-test_gtp6ip6.png) - -The configurations are same with IPv4 payload case, except D4:: is configured as "End.DT6" in VPP4. VPP4 decapsulates the SRv6 packet and lookup the table for the inner IPv6 packet and send out to the packet capture. - -If you want to use IPv6 payload instead of IPv4, you can run: - -``` -$ ./runner.py test gtp6_ipv6 -``` - -## More information -TBD diff --git a/src/plugins/srv6-mobile/extra/README.md b/src/plugins/srv6-mobile/extra/README.md index 3b24dea6fd6..ed6bb40f7ff 100644 --- a/src/plugins/srv6-mobile/extra/README.md +++ b/src/plugins/srv6-mobile/extra/README.md @@ -1,173 +1,168 @@ -# What's `runner.py` doing? +Test and Demonstrate SRv6 Mobile User Plane Plugin +======================== -## Common configurations -### VPP1 -``` -create host-interface name eth1 -set int ip addr host-eth1 A1::1/120 -set int state host-eth1 up -ip route add ::/0 via host-eth1 A1::2 -``` +## Getting started +To play with SRv6 Mobile User Plane on VPP, you need to install following packages: + docker + python3 + pip3 -### VPP2 + Python packages (use pip): + docker + scapy + jinja2 -``` -create host-interface name eth1 -set int ip addr host-eth1 A1::2/120 -create host-interface name eth2 -set int ip addr host-eth2 A2::1/120 -set int state host-eth1 up -set int state host-eth2 up -ip route add ::/0 via host-eth2 A2::2 -``` +### Quick-start -### VPP3 +1. Build up the docker container image as following: ``` -create host-interface name eth1 -set int ip addr host-eth1 A2::2/120 -create host-interface name eth2 -set int ip addr host-eth2 A3::1/120 -set int state host-eth1 up -set int state host-eth2 up -ip route add ::/0 via host-eth1 A2::1 -``` +$ git clone https://github.com/filvarga/srv6-mobile.git +$ cd ./srv6-mobile/src/plugins/srv6-mobile/extra +$ ./runner.py infra build -### VPP4 +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +srv6m-image latest 577e786b7ec6 2 days ago 8GB +ubuntu 18.04 4c108a37151f 4 weeks ago 64.2MB ``` -create host-interface name eth1 -set int ip addr host-eth1 A3::2/120 -set int state host-eth1 up -ip route add ::/0 via host-eth1 A3::1 -``` +The runner script [runner.py](runner.py) has features to automate configurations and procedures for the test. -## Drop-in for GTP-U over IPv4 +2. Instantiate test Scenario -What's happened when you run `test tmap`: +Let's try following command to instantiate a topology: - $ ./runner.py test tmap +``` +$ ./runner.py infra start +``` +This command instantiates 4 VPP containers with following topology: -Setting up a virtual interface of packet generator: +![Topology Diagram](topo-init.png) + +You can check the instantiated docker instances with "docker ps". -#### VPP1 ``` -create packet-generator interface pg0 -set int mac address pg0 aa:bb:cc:dd:ee:01 -set int ip addr pg0 172.16.0.1/30 -set ip arp pg0 172.16.0.2/30 aa:bb:cc:dd:ee:02 +$ docker ps +CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES +44cb98994500 srv6m-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-4 +6d65fff8aee9 srv6m-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-3 +ad123b516b24 srv6m-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-2 +5efed405b96a srv6m-image "/bin/sh -c 'vpp -c …" About a minute ago Up About a minute hck-vpp-1 + ``` -#### VPP4 +You can login to and configure each instantiated container. ``` -create packet-generator interface pg0 -set int mac address pg0 aa:bb:cc:dd:ee:11 -set int ip addr pg0 1.0.0.2/30 -set ip arp pg0 1.0.0.1 aa:bb:cc:dd:ee:22 +$ ./runner.py cmd vppctl 0 + +Verified image: None +connecting to: hck-vpp-1 + _______ _ _ _____ ___ + __/ __/ _ \ (_)__ | | / / _ \/ _ \ + _/ _// // / / / _ \ | |/ / ___/ ___/ + /_/ /____(_)_/\___/ |___/_/ /_/ + +vpp# ``` -SRv6 and IP routing settings: +## Test Scenarios +### SRv6 Drop-in between GTP-U tunnel -#### VPP1 +This test scenario introduces SRv6 path between GTP-U tunnel transparently. A GTP-U packet sent out from one end to another is translated to SRv6 and then back to GTP-U. All GTP-U tunnel identifiers are preserved in IPv6 header and SRH. -``` -sr policy add bsid D1:: next D2:: next D3:: gtp4_removal sr_prefix D4::/32 v6src_prefix C1::/64 -sr steer l3 172.20.0.1/32 via bsid D1:: -``` +#### GTP-U over UDP/IPv4 case + +This case uses SRv6 end functions, T.M.GTP4.D and End.M.GTP4.E. -#### VPP2 +![Topology Diagram](topo-test_gtp4d.png) + +VPP1 is configured with "T.M.GTP4.D", and VPP4 is configured with "End.M.GTP4.E". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv4 toward the packet capture. VPP1 translates it to SRv6 toward D4::TEID with SR policy <D2::, D3::> in SRH. VPP4 translates the SRv6 packet to the original GTP-U packet and send out to the packet capture. + +To start this case with IPv4 payload over GTP-U, you can run: ``` -sr localsid address D2:: behavior end -ip route add D3::/128 via host-eth2 A2::2 +$ ./runner.py test gtp4 ``` -#### VPP3 +If you want to use IPv6 payload instead of IPv4, you can run: ``` -sr localsid address D3:: behavior end -ip route add D4::/32 via host-eth2 A3::2 +$ ./runner.py test gtp4_ipv6 ``` -#### VPP4 +If you use the latest scapy codes from the master branch, you can test the functions with GTP-U packet in 5G format: ``` -sr localsid prefix D4::/32 behavior end.m.gtp4.e v4src_position 64 -ip route add 172.20.0.1/32 via pg0 1.0.0.1 +$ ./runner.py test gtp4_5g ``` +#### GTP-U over UDP/IPv6 case -## Packet generator and testing +This case uses SRv6 end functions, End.M.GTP6.D.Di and End.M.GTP6.E. - Example how to build custom SRv6 packet in scapy and ipaddress pkgs +![Topology Diagram](topo-test_gtp6d.png) - s = '\x11' * 4 + IPv4Address(u"192.168.192.10").packed + '\x11' * 8 - ip6 = IPv6Address(s) - IPv6(dst=ip6, src=ip6) +VPP1 is configured with "End.M.GTP6.D.Di", and VPP4 is configured with "End.M.GTP4.E". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv6 toward D:: of the packet capture. VPP1 translates it to SRv6 toward D:: with SR policy <D2::, D3::, D4::TEID> in SRH. VPP4 translates the SRv6 packet to the original GTP-U packet and send out to the packet capture. +To start this case with IPv4 payload over GTP-U, you can run: -## end.m.gtp4.e +``` +$ ./runner.py test gtp6_drop_in +``` - First set behavior so our localsid node is called with the packet - matching C1::1 in fib table - sr localsid address C1::1 behavior end.m.gtp4.ess +If you want to use IPv6 payload instead of IPv4, you can run: - show sr localsids behaviors - show sr localsid +``` +$ ./runner.py test gtp6_drop_in_ipv6 +``` - We should send a well formated packet to C::1 destination address - that contains the correct spec as for end.m.gtp4.e with encapsulated - ipv4 src and dst address and teid with port for the conversion to - GTPU IPv4 packet +### GTP-U to SRv6 -## additional commands +This test scenario demonstrates GTP-U to SRv6 translation. A GTP-U packet sent out from one end to another is translated to SRv6. - gdb - breakpoint +#### GTP-U over UDP/IPv6 case - break sr_policy_rewrite.c:1620 +##### IPv4 payload - break src/plugins/srv6-end/node.c:84 +This case uses SRv6 end functions, End.M.GTP6.D and End.DT4. - TMAP - Linux: +![Topology Diagram](topo-test_gtp6.png) - ip link add tmp1 type veth peer name tmp2 - ip link set dev tmp1 up - ip link set dev tmp2 up - ip addr add 172.20.0.2/24 dev tmp2 +VPP1 is configured with "End.M.GTP6.D", and VPP4 is configured with "End.DT4". Others are configured with "End". The packet generator sends a GTP-U packet over UDP/IPv6 toward D::2. VPP1 translates it to SRv6 toward the IPv6 destination consists of D4:: and TEID of GTP-U with SR policy <D2::, D3::> in SRH. VPP4 decapsulates the SRv6 packet and lookup the table for the inner IPv4 packet and send out to the packet capture. - create host-interface name tmp1 - set int mac address host-tmp1 02:fe:98:c6:c8:7b - set interface ip address host-tmp1 172.20.0.1/24 - set interface state host-tmp1 up +To start this case, you can run: - VPP - set sr encaps source addr C1:: - sr policy add bsid D1::999:2 next D2:: next D3:: gtp4_removal sr-prefix fc34:5678::/64 local-prefix C1::/64 - sr steer l3 172.21.0.0/24 via bsid d1::999:2 +``` +$ ./runner.py test gtp6 +``` - END - Linux - create host-interface name tmp1 - set int mac address host-tmp1 02:fe:98:c6:c8:7b - set interface ip address host-tmp1 A1::1/64 - set interface state host-tmp1 up +##### IPv6 payload - VPP - sr localsid address 1111:1111:c0a8:c00a:1122:1111:1111:1111 behavior end.m.gtp4.e +This case uses SRv6 end functions, End.M.GTP6.D and End.DT6. + + +![Topology Diagram](topo-test_gtp6ip6.png) + +The configurations are same with IPv4 payload case, except D4:: is configured as "End.DT6" in VPP4. VPP4 decapsulates the SRv6 packet and lookup the table for the inner IPv6 packet and send out to the packet capture. + +If you want to use IPv6 payload instead of IPv4, you can run: + +``` +$ ./runner.py test gtp6_ipv6 +``` - trace add af-packet-input 10 +## More information - sr localsid address C3:: behavior end.m.gtp4.e - sr localsid address 2001:200:0:1ce1:3000:757f:0:2 behavior end.m.gtp4.e +- @subpage runner_doc.md diff --git a/src/plugins/srv6-mobile/extra/runner.py b/src/plugins/srv6-mobile/extra/runner.py index 79ec2d007c5..c438fb161b7 100755 --- a/src/plugins/srv6-mobile/extra/runner.py +++ b/src/plugins/srv6-mobile/extra/runner.py @@ -135,7 +135,7 @@ class Container(object): self.vppctl_exec("set int mac address pg0 {}".format(local_mac)) self.vppctl_exec("set int ip addr pg0 {}".format(local_ip)) self.vppctl_exec( - "set ip6 neighbor pg0 {} {}".format(remote_ip, remote_mac)) + "set ip neighbor pg0 {} {}".format(remote_ip, remote_mac)) self.vppctl_exec("set int state pg0 up") def pg_create_interface4(self, local_ip, remote_ip, local_mac, remote_mac): @@ -145,7 +145,7 @@ class Container(object): self.vppctl_exec("create packet-generator interface pg0") self.vppctl_exec("set int mac address pg0 {}".format(local_mac)) self.vppctl_exec("set int ip addr pg0 {}".format(local_ip)) - self.vppctl_exec("set ip arp pg0 {} {}".format(remote_ip, remote_mac)) + self.vppctl_exec("set ip neighbor pg0 {} {}".format(remote_ip, remote_mac)) self.vppctl_exec("set int state pg0 up") def pg_create_interface6(self, local_ip, remote_ip, local_mac, remote_mac): @@ -154,10 +154,30 @@ class Container(object): time.sleep(2) self.vppctl_exec("create packet-generator interface pg0") self.vppctl_exec("set int mac address pg0 {}".format(local_mac)) - self.vppctl_exec("set int ip6 addr pg0 {}".format(local_ip)) - self.vppctl_exec("set ip6 arp pg0 {} {}".format(remote_ip, remote_mac)) + self.vppctl_exec("set int ip addr pg0 {}".format(local_ip)) + self.vppctl_exec("set ip neighbor pg0 {} {}".format(remote_ip, remote_mac)) self.vppctl_exec("set int state pg0 up") + def pg_create_interface4_name(self, ifname, local_ip, remote_ip, local_mac, remote_mac): + # remote_ip can't have subnet mask + + time.sleep(2) + self.vppctl_exec("create packet-generator interface {}".format(ifname)) + self.vppctl_exec("set int mac address {} {}".format(ifname, local_mac)) + self.vppctl_exec("set int ip addr {} {}".format(ifname, local_ip)) + self.vppctl_exec("set ip neighbor {} {} {}".format(ifname, remote_ip, remote_mac)) + self.vppctl_exec("set int state {} up".format(ifname)) + + def pg_create_interface6_name(self, ifname, local_ip, remote_ip, local_mac, remote_mac): + # remote_ip can't have subnet mask + + time.sleep(2) + self.vppctl_exec("create packet-generator interface {}".format(ifname)) + self.vppctl_exec("set int mac address {} {}".format(ifname, local_mac)) + self.vppctl_exec("set int ip addr {} {}".format(ifname, local_ip)) + self.vppctl_exec("set ip neighbor {} {} {}".format(ifname, remote_ip, remote_mac)) + self.vppctl_exec("set int state {} up".format(ifname)) + def pg_enable(self): # start packet generator self.vppctl_exec("packet-generator enable") @@ -176,6 +196,13 @@ class Container(object): "packet-generator capture pg0 pcap {}".format( self.pg_output_file_in)) + def pg_start_capture_name(self, ifname): + if exists(self.pg_output_file): + remove(self.pg_output_file) + self.vppctl_exec( + "packet-generator capture {} pcap {}".format( + ifname, self.pg_output_file_in)) + def pg_read_packets(self): return rdpcap(self.pg_output_file) @@ -184,6 +211,11 @@ class Container(object): "ip route add {} via host-{} {}".format( subnet, out_if_name, next_hop_ip)) + def set_ipv6_route2(self, out_if_name, next_hop_ip, subnet): + self.vppctl_exec( + "ip route add {} via {} {}".format( + subnet, out_if_name, next_hop_ip)) + def set_ip_pgroute(self, out_if_name, next_hop_ip, subnet): self.vppctl_exec("ip route add {} via {} {}".format( subnet, out_if_name, next_hop_ip)) @@ -1089,6 +1121,142 @@ class Program(object): for p in c4.pg_read_packets(): p.show2() + def test_gtp4_reply(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + c2 = self.containers.get(self.get_name(self.instance_names[1])) + c3 = self.containers.get(self.get_name(self.instance_names[2])) + c4 = self.containers.get(self.get_name(self.instance_names[-1])) + + c1.pg_create_interface4( + local_ip="172.16.0.1/30", + remote_ip="172.16.0.2/30", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + c4.pg_create_interface4( + local_ip="1.0.0.2/30", + remote_ip="1.0.0.1", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + c1.vppctl_exec("sr policy add bsid D4:: next D2:: next D3::") + c1.vppctl_exec("sr policy add bsid D5:: behavior t.m.gtp4.d D4::/32 v6src_prefix C1::/64 nhtype ipv4") + c1.vppctl_exec("sr steer l3 172.20.0.1/32 via bsid D5::") + + c2.vppctl_exec("sr localsid address D2:: behavior end") + + c3.vppctl_exec("sr localsid address D3:: behavior end") + + c4.vppctl_exec( + "sr localsid prefix D4::/32 " + "behavior end.m.gtp4.e v4src_position 64") + + c2.set_ipv6_route("eth2", "A2::2", "D3::/128") + c2.set_ipv6_route("eth1", "A1::1", "C::/120") + c3.set_ipv6_route("eth2", "A3::2", "D4::/32") + c3.set_ipv6_route("eth1", "A2::1", "C::/120") + c4.set_ip_pgroute("pg0", "1.0.0.1", "172.20.0.1/32") + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IP(src="172.20.0.2", dst="172.20.0.1") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="echo_response", S=1, teid=200, seq=200)) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + c4.enable_trace(10) + + c4.pg_start_capture() + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c4.name)) + for p in c4.pg_read_packets(): + p.show2() + + def test_gtp4_error(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + c2 = self.containers.get(self.get_name(self.instance_names[1])) + c3 = self.containers.get(self.get_name(self.instance_names[2])) + c4 = self.containers.get(self.get_name(self.instance_names[-1])) + + c1.pg_create_interface4( + local_ip="172.16.0.1/30", + remote_ip="172.16.0.2/30", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + c4.pg_create_interface4( + local_ip="1.0.0.2/30", + remote_ip="1.0.0.1", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + c1.vppctl_exec("sr policy add bsid D4:: next D2:: next D3::") + c1.vppctl_exec("sr policy add bsid D5:: behavior t.m.gtp4.d D4::/32 v6src_prefix C1::/64 nhtype ipv4") + c1.vppctl_exec("sr steer l3 172.20.0.1/32 via bsid D5::") + + c2.vppctl_exec("sr localsid address D2:: behavior end") + + c3.vppctl_exec("sr localsid address D3:: behavior end") + + c4.vppctl_exec( + "sr localsid prefix D4::/32 " + "behavior end.m.gtp4.e v4src_position 64") + + c2.set_ipv6_route("eth2", "A2::2", "D3::/128") + c2.set_ipv6_route("eth1", "A1::1", "C::/120") + c3.set_ipv6_route("eth2", "A3::2", "D4::/32") + c3.set_ipv6_route("eth1", "A2::1", "C::/120") + c4.set_ip_pgroute("pg0", "1.0.0.1", "172.20.0.1/32") + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IP(src="172.20.0.2", dst="172.20.0.1") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="error_indication", S=1, teid=200, seq=200)/ + IE_TEIDI(TEIDI=65535)/IE_GSNAddress(address="1.1.1.1")/ + IE_PrivateExtension(extention_value="z")) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + c4.enable_trace(10) + + c4.pg_start_capture() + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c4.name)) + for p in c4.pg_read_packets(): + p.show2() + def test_gtp4_ipv6(self): # TESTS: # trace add af-packet-input 10 @@ -1452,6 +1620,154 @@ class Program(object): for p in c4.pg_read_packets(): p.show2() + def test_gtp6_drop_in_reply(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + print("Deleting the old containers...") + time.sleep(30) + print("Starting the new containers...") + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + c2 = self.containers.get(self.get_name(self.instance_names[1])) + c3 = self.containers.get(self.get_name(self.instance_names[2])) + c4 = self.containers.get(self.get_name(self.instance_names[-1])) + + c1.pg_create_interface( + local_ip="C::1/120", + remote_ip="C::2", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + c4.pg_create_interface( + local_ip="B::1/120", + remote_ip="B::2", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + c1.vppctl_exec("sr policy add bsid D4:: next D2:: next D3::") + + c1.vppctl_exec( + "sr localsid prefix D::/64 behavior end.m.gtp6.d.di D4::/64") + + c2.vppctl_exec("sr localsid address D2:: behavior end") + + c3.vppctl_exec("sr localsid address D3:: behavior end") + + c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.m.gtp6.e") + + c2.set_ipv6_route("eth2", "A2::2", "D3::/128") + c2.set_ipv6_route("eth1", "A1::1", "C::/120") + c3.set_ipv6_route("eth2", "A3::2", "D4::/32") + c3.set_ipv6_route("eth1", "A2::1", "C::/120") + c4.set_ip_pgroute("pg0", "B::2", "D::2/128") + + print("Waiting...") + time.sleep(30) + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IPv6(src="C::2", dst="D::2") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="echo_response", S=1, teid=200, seq=300)) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + c4.enable_trace(10) + + c4.pg_start_capture() + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c4.name)) + for p in c4.pg_read_packets(): + p.show2() + + def test_gtp6_drop_in_error(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + print("Deleting the old containers...") + time.sleep(30) + print("Starting the new containers...") + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + c2 = self.containers.get(self.get_name(self.instance_names[1])) + c3 = self.containers.get(self.get_name(self.instance_names[2])) + c4 = self.containers.get(self.get_name(self.instance_names[-1])) + + c1.pg_create_interface( + local_ip="C::1/120", + remote_ip="C::2", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + c4.pg_create_interface( + local_ip="B::1/120", + remote_ip="B::2", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + c1.vppctl_exec("sr policy add bsid D4:: next D2:: next D3::") + + c1.vppctl_exec( + "sr localsid prefix D::/64 behavior end.m.gtp6.d.di D4::/64") + + c2.vppctl_exec("sr localsid address D2:: behavior end") + + c3.vppctl_exec("sr localsid address D3:: behavior end") + + c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.m.gtp6.e") + + c2.set_ipv6_route("eth2", "A2::2", "D3::/128") + c2.set_ipv6_route("eth1", "A1::1", "C::/120") + c3.set_ipv6_route("eth2", "A3::2", "D4::/32") + c3.set_ipv6_route("eth1", "A2::1", "C::/120") + c4.set_ip_pgroute("pg0", "B::2", "D::2/128") + + print("Waiting...") + time.sleep(30) + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IPv6(src="C::2", dst="D::2") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="error_indication", S=1, teid=200, seq=300)/ + IE_TEIDI(TEIDI=65535)/IE_GSNAddress(address="1.1.1.1")/ + IE_PrivateExtension(extention_value="z")) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + c4.enable_trace(10) + + c4.pg_start_capture() + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c4.name)) + for p in c4.pg_read_packets(): + p.show2() + def test_gtp6_drop_in_ipv6(self): # TESTS: # trace add af-packet-input 10 @@ -1641,6 +1957,7 @@ class Program(object): c3.vppctl_exec("sr localsid address D3:: behavior end") + c4.vppctl_exec("set ip neighbor pg0 1.0.0.1 aa:bb:cc:dd:ee:22") c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.dt4 2") c2.set_ipv6_route("eth2", "A2::2", "D3::/128") @@ -1716,6 +2033,7 @@ class Program(object): c3.vppctl_exec("sr localsid address D3:: behavior end") + c4.vppctl_exec("set ip neighbor pg0 1.0.0.1 aa:bb:cc:dd:ee:22") c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.dt4 2") c2.set_ipv6_route("eth2", "A2::2", "D3::/128") @@ -1792,6 +2110,7 @@ class Program(object): c3.vppctl_exec("sr localsid address D3:: behavior end") + c4.vppctl_exec("set ip neighbor pg0 B::2 aa:bb:cc:dd:ee:22") c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.dt6 2") c2.set_ipv6_route("eth2", "A2::2", "D3::/128") @@ -1867,6 +2186,7 @@ class Program(object): c3.vppctl_exec("sr localsid address D3:: behavior end") + c4.vppctl_exec("set ip neighbor pg0 B::2 aa:bb:cc:dd:ee:22") c4.vppctl_exec("sr localsid prefix D4::/64 behavior end.dt6 2") c2.set_ipv6_route("eth2", "A2::2", "D3::/128") @@ -1905,6 +2225,133 @@ class Program(object): for p in c4.pg_read_packets(): p.show2() + def test_gtp6_dt(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + print("Deleting the old containers...") + time.sleep(30) + print("Starting the new containers...") + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + + c1.pg_create_interface6_name( + ifname="pg0", + local_ip="C::1/120", + remote_ip="C::2", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + + c1.pg_create_interface4_name( + ifname="pg1", + local_ip="1.0.0.2/30", + remote_ip="1.0.0.1", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + + c1.vppctl_exec( + "sr localsid prefix D::/64 behavior end.m.gtp6.dt46 fib-table 0 local-fib-table 0") + + c1.vppctl_exec("set ip neighbor pg1 1.0.0.1 aa:bb:cc:dd:ee:22") + c1.set_ip_pgroute("pg1", "1.0.0.1", "172.200.0.1/32") + + print("Waiting...") + time.sleep(30) + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IPv6(src="C::2", dst="D::2") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="g_pdu", teid=200) / + IP(src="172.100.0.1", dst="172.200.0.1") / + ICMP()) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + + c1.pg_start_capture_name(ifname="pg1") + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c1.name)) + for p in c1.pg_read_packets(): + p.show2() + + def test_gtp4_dt(self): + # TESTS: + # trace add af-packet-input 10 + # pg interface on c1 172.20.0.1 + # pg interface on c4 B::1/120 + + self.start_containers() + + print("Deleting the old containers...") + time.sleep(30) + print("Starting the new containers...") + + c1 = self.containers.get(self.get_name(self.instance_names[0])) + + c1.pg_create_interface4_name( + ifname="pg0", + local_ip="172.16.0.1/30", + remote_ip="172.16.0.2", + local_mac="aa:bb:cc:dd:ee:01", + remote_mac="aa:bb:cc:dd:ee:02") + + c1.pg_create_interface4_name( + ifname="pg1", + local_ip="1.0.0.2/30", + remote_ip="1.0.0.1", + local_mac="aa:bb:cc:dd:ee:11", + remote_mac="aa:bb:cc:dd:ee:22") + + c1.vppctl_exec("set sr encaps source addr A1::1") + c1.vppctl_exec("sr policy add bsid D5:: behavior t.m.gtp4.dt4 fib-table 0") + c1.vppctl_exec("sr steer l3 172.20.0.1/32 via bsid D5::") + + c1.vppctl_exec("set ip neighbor pg1 1.0.0.1 aa:bb:cc:dd:ee:22") + c1.set_ip_pgroute("pg1", "1.0.0.1", "172.200.0.1/32") + + print("Waiting...") + time.sleep(30) + + p = (Ether(src="aa:bb:cc:dd:ee:02", dst="aa:bb:cc:dd:ee:01") / + IP(src="172.20.0.2", dst="172.20.0.1") / + UDP(sport=2152, dport=2152) / + GTP_U_Header(gtp_type="g_pdu", teid=200) / + IP(src="172.100.0.1", dst="172.200.0.1") / + ICMP()) + + print("Sending packet on {}:".format(c1.name)) + p.show2() + + c1.enable_trace(10) + + c1.pg_start_capture_name(ifname="pg1") + + c1.pg_create_stream(p) + c1.pg_enable() + + # timeout (sleep) if needed + print("Sleeping") + time.sleep(5) + + print("Receiving packet on {}:".format(c1.name)) + for p in c1.pg_read_packets(): + p.show2() + def status_containers(self): print("Instances:") @@ -2008,17 +2455,23 @@ def get_args(): "gtp4_usid", "gtp4_5g", "gtp4_echo", + "gtp4_reply", + "gtp4_error", "gtp4_ipv6", "gtp4_ipv6_5g", "gtp6_drop_in", "gtp6_drop_in_5g", "gtp6_drop_in_echo", + "gtp6_drop_in_reply", + "gtp6_drop_in_error", "gtp6_drop_in_ipv6", "gtp6_drop_in_ipv6_5g", "gtp6", "gtp6_5g", "gtp6_ipv6", - "gtp6_ipv6_5g"]) + "gtp6_ipv6_5g", + "gtp6_dt", + "gtp4_dt"]) args = parser.parse_args() if not hasattr(args, "op") or not args.op: @@ -2038,7 +2491,7 @@ def main(op=None, prefix=None, verbose=None, image = "srv6m-release-image" elif image == 'debug': image = "srv6m-image" - else + else: image = "srv6m-image" print("Target image: {}".format(image)) @@ -2080,6 +2533,10 @@ def main(op=None, prefix=None, verbose=None, program.test_gtp4_5g() elif op == 'gtp4_echo': program.test_gtp4_echo() + elif op == 'gtp4_reply': + program.test_gtp4_reply() + elif op == 'gtp4_error': + program.test_gtp4_error() elif op == 'gtp4_ipv6': program.test_gtp4_ipv6() elif op == 'gtp4_ipv6_5g': @@ -2090,6 +2547,10 @@ def main(op=None, prefix=None, verbose=None, program.test_gtp6_drop_in_5g() elif op == 'gtp6_drop_in_echo': program.test_gtp6_drop_in_echo() + elif op == 'gtp6_drop_in_reply': + program.test_gtp6_drop_in_reply() + elif op == 'gtp6_drop_in_error': + program.test_gtp6_drop_in_error() elif op == 'gtp6_drop_in_ipv6': program.test_gtp6_drop_in_ipv6() elif op == 'gtp6_drop_in_ipv6_5g': @@ -2102,6 +2563,10 @@ def main(op=None, prefix=None, verbose=None, program.test_gtp6_ipv6() elif op == 'gtp6_ipv6_5g': program.test_gtp6_ipv6_5g() + elif op == 'gtp6_dt': + program.test_gtp6_dt() + elif op == 'gtp4_dt': + program.test_gtp4_dt() except Exception: program.logger.exception("") diff --git a/src/plugins/srv6-mobile/extra/runner_doc.md b/src/plugins/srv6-mobile/extra/runner_doc.md new file mode 100644 index 00000000000..a6fb0277378 --- /dev/null +++ b/src/plugins/srv6-mobile/extra/runner_doc.md @@ -0,0 +1,105 @@ +# What's `runner.py` doing? + +## Common configurations + +### VPP1 +``` +create host-interface name eth1 +set int ip addr host-eth1 A1::1/120 +set int state host-eth1 up +ip route add ::/0 via host-eth1 A1::2 +``` + + +### VPP2 + +``` +create host-interface name eth1 +set int ip addr host-eth1 A1::2/120 +create host-interface name eth2 +set int ip addr host-eth2 A2::1/120 +set int state host-eth1 up +set int state host-eth2 up +ip route add ::/0 via host-eth2 A2::2 +``` + + +### VPP3 + +``` +create host-interface name eth1 +set int ip addr host-eth1 A2::2/120 +create host-interface name eth2 +set int ip addr host-eth2 A3::1/120 +set int state host-eth1 up +set int state host-eth2 up +ip route add ::/0 via host-eth1 A2::1 +``` + +### VPP4 + +``` +create host-interface name eth1 +set int ip addr host-eth1 A3::2/120 +set int state host-eth1 up +ip route add ::/0 via host-eth1 A3::1 +``` + + +## Drop-in for GTP-U over IPv4 + +Drop-in mode is handy to test both GTP-U-to-SRv6 and SRv6-to-GTP-U functions at same time. Let's see what's happened when you run `test gtp4`: + + $ ./runner.py test gtp4 + + +Setting up a virtual interface of packet generator: + +#### VPP1 + +``` +create packet-generator interface pg0 +set int mac address pg0 aa:bb:cc:dd:ee:01 +set int ip addr pg0 172.16.0.1/30 +set ip arp pg0 172.16.0.2/30 aa:bb:cc:dd:ee:02 +``` + +#### VPP4 + +``` +create packet-generator interface pg0 +set int mac address pg0 aa:bb:cc:dd:ee:11 +set int ip addr pg0 1.0.0.2/30 +set ip arp pg0 1.0.0.1 aa:bb:cc:dd:ee:22 +``` + +SRv6 and IP routing settings: + +#### VPP1 + +``` +sr policy add bsid D4:: next D2:: next D3:: +sr policy add bsid D5:: behavior t.m.gtp4.d D4::/32 v6src_prefix C1::/64 nhtype ipv4 +sr steer l3 172.20.0.1/32 via bsid D5:: +``` + +#### VPP2 + +``` +sr localsid address D2:: behavior end +ip route add D3::/128 via host-eth2 A2::2 +``` + +#### VPP3 + +``` +sr localsid address D3:: behavior end +ip route add D4::/32 via host-eth2 A3::2 +``` + +#### VPP4 + +``` +sr localsid prefix D4::/32 behavior end.m.gtp4.e v4src_position 64 +ip route add 172.20.0.1/32 via pg0 1.0.0.1 +``` diff --git a/src/plugins/srv6-mobile/extra/topo-init.png b/src/plugins/srv6-mobile/extra/topo-init.png Binary files differindex e69de29bb2d..dc9603ba290 100644 --- a/src/plugins/srv6-mobile/extra/topo-init.png +++ b/src/plugins/srv6-mobile/extra/topo-init.png diff --git a/src/plugins/srv6-mobile/extra/topo-test_gtp4d.png b/src/plugins/srv6-mobile/extra/topo-test_gtp4d.png Binary files differindex e69de29bb2d..d60beb23093 100644 --- a/src/plugins/srv6-mobile/extra/topo-test_gtp4d.png +++ b/src/plugins/srv6-mobile/extra/topo-test_gtp4d.png diff --git a/src/plugins/srv6-mobile/extra/topo-test_gtp6.png b/src/plugins/srv6-mobile/extra/topo-test_gtp6.png Binary files differindex e69de29bb2d..2cad260215a 100644 --- a/src/plugins/srv6-mobile/extra/topo-test_gtp6.png +++ b/src/plugins/srv6-mobile/extra/topo-test_gtp6.png diff --git a/src/plugins/srv6-mobile/extra/topo-test_gtp6d.png b/src/plugins/srv6-mobile/extra/topo-test_gtp6d.png Binary files differindex e69de29bb2d..78b083daa8a 100644 --- a/src/plugins/srv6-mobile/extra/topo-test_gtp6d.png +++ b/src/plugins/srv6-mobile/extra/topo-test_gtp6d.png diff --git a/src/plugins/srv6-mobile/extra/topo-test_gtp6ip6.png b/src/plugins/srv6-mobile/extra/topo-test_gtp6ip6.png Binary files differindex e69de29bb2d..fe78f673787 100644 --- a/src/plugins/srv6-mobile/extra/topo-test_gtp6ip6.png +++ b/src/plugins/srv6-mobile/extra/topo-test_gtp6ip6.png diff --git a/src/plugins/srv6-mobile/gtp4_dt.c b/src/plugins/srv6-mobile/gtp4_dt.c new file mode 100644 index 00000000000..76525c21161 --- /dev/null +++ b/src/plugins/srv6-mobile/gtp4_dt.c @@ -0,0 +1,200 @@ +/* + * srv6_t_m_gtp4_dt.c + * + * Copyright (c) 2019 Arrcus Inc and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <vnet/vnet.h> +#include <vnet/adj/adj.h> +#include <vnet/fib/fib_table.h> +#include <vnet/plugin/plugin.h> +#include <vpp/app/version.h> +#include <srv6-mobile/mobile.h> + +srv6_t_main_v4_dt_t srv6_t_main_v4_dt; + +static void +clb_dpo_lock_srv6_t_m_gtp4_dt (dpo_id_t * dpo) +{ +} + +static void +clb_dpo_unlock_srv6_t_m_gtp4_dt (dpo_id_t * dpo) +{ +} + +static u8 * +clb_dpo_format_srv6_t_m_gtp4_dt (u8 * s, va_list * args) +{ + index_t index = va_arg (*args, index_t); + CLIB_UNUSED (u32 indent) = va_arg (*args, u32); + + return (format (s, "SR: dynamic_proxy_index:[%u]", index)); +} + +const static dpo_vft_t dpo_vft = { + .dv_lock = clb_dpo_lock_srv6_t_m_gtp4_dt, + .dv_unlock = clb_dpo_unlock_srv6_t_m_gtp4_dt, + .dv_format = clb_dpo_format_srv6_t_m_gtp4_dt, +}; + +const static char *const srv6_t_m_gtp4_dt_nodes[] = { + "srv6-t-m-gtp4-dt", + NULL, +}; + +const static char *const srv6_t_m_gtp4_dt_v6_nodes[] = { + "error-drop", + NULL, +}; + +const static char *const *const dpo_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = srv6_t_m_gtp4_dt_v6_nodes, + [DPO_PROTO_IP4] = srv6_t_m_gtp4_dt_nodes, +}; + +static u8 fn_name[] = "SRv6-T.M.GTP4.DT-plugin"; +static u8 keyword_str[] = "t.m.gtp4.dt"; +static u8 def_str[] = "Transit function with DT for IPv4/GTP tunnel"; +static u8 param_str[] = "fib-index <index> [local-fib-table <index>]"; + +static u8 * +clb_format_srv6_t_m_gtp4_dt (u8 * s, va_list * args) +{ + srv6_t_gtp4_dt_param_t *ls_mem = va_arg (*args, void *); + + s = format (s, "SRv6 Transit gtp4.dt\n\t"); + + if (ls_mem->type == SRV6_GTP4_DT4) + s = format (s, " Type GTP4.DT4 fib-table %u\n", ls_mem->fib4_index); + else if (ls_mem->type == SRV6_GTP4_DT6) + s = format (s, " Type GTP4.DT6, fib-table %u, local-fib-table %u\n", + ls_mem->fib6_index, ls_mem->local_fib_index); + else if (ls_mem->type == SRV6_GTP4_DT46) + s = format (s, " Type GTP4.DT46, fib-table %u, local-fib-table %u\n", + ls_mem->fib6_index, ls_mem->local_fib_index); + else + s = format (s, "\n"); + + return s; +} + +static uword +clb_unformat_srv6_t_m_gtp4_dt (unformat_input_t * input, va_list * args) +{ + void **plugin_mem_p = va_arg (*args, void **); + srv6_t_gtp4_dt_param_t *ls_mem; + u32 fib_index = 0, local_fib_index = 0; + u32 type; + + if (unformat (input, "t.m.gtp4.dt4 fib-table %u", &fib_index)) + { + type = SRV6_GTP4_DT4; + } + else if (unformat (input, "t.m.gtp4.dt6 fib-table %u local-fib-table %u", + &fib_index, &local_fib_index)) + { + type = SRV6_GTP4_DT6; + } + else if (unformat (input, "t.m.gtp4.dt46 fib-table %u local-fib-table %u", + &fib_index, &local_fib_index)) + { + type = SRV6_GTP4_DT46; + } + else + { + return 0; + } + + ls_mem = clib_mem_alloc_aligned_at_offset (sizeof *ls_mem, 0, 0, 1); + clib_memset (ls_mem, 0, sizeof *ls_mem); + *plugin_mem_p = ls_mem; + + ls_mem->fib4_index = fib_table_find (FIB_PROTOCOL_IP4, fib_index); + ls_mem->fib6_index = fib_table_find (FIB_PROTOCOL_IP6, fib_index); + ls_mem->local_fib_index = + fib_table_find (FIB_PROTOCOL_IP6, local_fib_index); + + ls_mem->type = type; + + return 1; +} + +static int +clb_creation_srv6_t_m_gtp4_dt (ip6_sr_policy_t * sr_policy) +{ + return 0; +} + +static int +clb_removal_srv6_t_m_gtp4_dt (ip6_sr_policy_t * sr_policy) +{ + srv6_t_gtp4_dt_param_t *ls_mem; + + ls_mem = (srv6_t_gtp4_dt_param_t *) sr_policy->plugin_mem; + + clib_mem_free (ls_mem); + + return 0; +} + +static clib_error_t * +srv6_t_m_gtp4_dt_init (vlib_main_t * vm) +{ + srv6_t_main_v4_dt_t *sm = &srv6_t_main_v4_dt; + dpo_type_t dpo_type; + vlib_node_t *node; + int rc; + + sm->vlib_main = vm; + sm->vnet_main = vnet_get_main (); + + node = vlib_get_node_by_name (vm, (u8 *) "srv6-t-m-gtp4-dt"); + sm->t_m_gtp4_dt_node_index = node->index; + + node = vlib_get_node_by_name (vm, (u8 *) "error-drop"); + sm->error_node_index = node->index; + + dpo_type = dpo_register_new_type (&dpo_vft, dpo_nodes); + + rc = sr_policy_register_function (vm, fn_name, keyword_str, def_str, param_str, 128, //prefix len + &dpo_type, + clb_format_srv6_t_m_gtp4_dt, + clb_unformat_srv6_t_m_gtp4_dt, + clb_creation_srv6_t_m_gtp4_dt, + clb_removal_srv6_t_m_gtp4_dt); + if (rc < 0) + clib_error_return (0, "SRv6 Transit GTP4.DT Policy function" + "couldn't be registered"); + return 0; +} + +/* *INDENT-OFF* */ +VNET_FEATURE_INIT (srv6_t_m_gtp4_dt, static) = +{ + .arc_name = "ip4-unicast", + .node_name = "srv6-t-m-gtp4-dt", + .runs_before = 0, +}; + +VLIB_INIT_FUNCTION (srv6_t_m_gtp4_dt_init); +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/srv6-mobile/gtp6_dt.c b/src/plugins/srv6-mobile/gtp6_dt.c new file mode 100644 index 00000000000..7b3cf0ac88f --- /dev/null +++ b/src/plugins/srv6-mobile/gtp6_dt.c @@ -0,0 +1,194 @@ +/* + * srv6_end_m_gtp6_dt.c + * + * Copyright (c) 2019 Arrcus Inc and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <vnet/vnet.h> +#include <vnet/adj/adj.h> +#include <vnet/fib/fib_table.h> +#include <vnet/plugin/plugin.h> +#include <vpp/app/version.h> +#include <srv6-mobile/mobile.h> + +srv6_end_main_v6_dt_t srv6_end_main_v6_dt; + +static void +clb_dpo_lock_srv6_end_m_gtp6_dt (dpo_id_t * dpo) +{ +} + +static void +clb_dpo_unlock_srv6_end_m_gtp6_dt (dpo_id_t * dpo) +{ +} + +static u8 * +clb_dpo_format_srv6_end_m_gtp6_dt (u8 * s, va_list * args) +{ + index_t index = va_arg (*args, index_t); + CLIB_UNUSED (u32 indent) = va_arg (*args, u32); + + return (format (s, "SR: dynamic_proxy_index:[%u]", index)); +} + +const static dpo_vft_t dpo_vft = { + .dv_lock = clb_dpo_lock_srv6_end_m_gtp6_dt, + .dv_unlock = clb_dpo_unlock_srv6_end_m_gtp6_dt, + .dv_format = clb_dpo_format_srv6_end_m_gtp6_dt, +}; + +const static char *const srv6_end_m_gtp6_dt_nodes[] = { + "srv6-end-m-gtp6-dt", + NULL, +}; + +const static char *const *const dpo_nodes[DPO_PROTO_NUM] = { + [DPO_PROTO_IP6] = srv6_end_m_gtp6_dt_nodes, +}; + +static u8 fn_name[] = "SRv6-End.M.GTP6.DT-plugin"; +static u8 keyword_str[] = "end.m.gtp6.dt"; +static u8 def_str[] = "Endpoint function with DT for IPv6/GTP tunnel"; +static u8 param_str[] = "fib-index <index> [local-fib-table <index>]"; + +static u8 * +clb_format_srv6_end_m_gtp6_dt (u8 * s, va_list * args) +{ + srv6_end_gtp6_dt_param_t *ls_mem = va_arg (*args, void *); + + s = format (s, "SRv6 End gtp6.dt\n\t"); + + if (ls_mem->type == SRV6_GTP6_DT4) + s = format (s, " Type GTP6.DT4 fib-table %u\n", ls_mem->fib4_index); + else if (ls_mem->type == SRV6_GTP6_DT6) + s = format (s, " Type GTP6.DT6, fib-table %u, local-fib-table %u\n", + ls_mem->fib6_index, ls_mem->local_fib_index); + else if (ls_mem->type == SRV6_GTP6_DT46) + s = format (s, " Type GTP6.DT46, fib-table %u, local-fib-table %u\n", + ls_mem->fib6_index, ls_mem->local_fib_index); + else + s = format (s, "\n"); + + return s; +} + +static uword +clb_unformat_srv6_end_m_gtp6_dt (unformat_input_t * input, va_list * args) +{ + void **plugin_mem_p = va_arg (*args, void **); + srv6_end_gtp6_dt_param_t *ls_mem; + u32 fib_index = 0, local_fib_index = 0; + u32 type; + + if (unformat (input, "end.m.gtp6.dt4 fib-table %u", &fib_index)) + { + type = SRV6_GTP6_DT4; + } + else if (unformat (input, "end.m.gtp6.dt6 fib-table %u local-fib-table %u", + &fib_index, &local_fib_index)) + { + type = SRV6_GTP6_DT6; + } + else if (unformat (input, "end.m.gtp6.dt46 fib-table %u local-fib-table %u", + &fib_index, &local_fib_index)) + { + type = SRV6_GTP6_DT46; + } + else + { + return 0; + } + + ls_mem = clib_mem_alloc_aligned_at_offset (sizeof *ls_mem, 0, 0, 1); + clib_memset (ls_mem, 0, sizeof *ls_mem); + *plugin_mem_p = ls_mem; + + ls_mem->fib4_index = fib_table_find (FIB_PROTOCOL_IP4, fib_index); + ls_mem->fib6_index = fib_table_find (FIB_PROTOCOL_IP6, fib_index); + ls_mem->local_fib_index = + fib_table_find (FIB_PROTOCOL_IP6, local_fib_index); + + ls_mem->type = type; + + return 1; +} + +static int +clb_creation_srv6_end_m_gtp6_dt (ip6_sr_localsid_t * localsid) +{ + return 0; +} + +static int +clb_removal_srv6_end_m_gtp6_dt (ip6_sr_localsid_t * localsid) +{ + srv6_end_gtp6_dt_param_t *ls_mem; + + ls_mem = localsid->plugin_mem; + + clib_mem_free (ls_mem); + + return 0; +} + +static clib_error_t * +srv6_end_m_gtp6_dt_init (vlib_main_t * vm) +{ + srv6_end_main_v6_dt_t *sm = &srv6_end_main_v6_dt; + dpo_type_t dpo_type; + vlib_node_t *node; + int rc; + + sm->vlib_main = vm; + sm->vnet_main = vnet_get_main (); + + node = vlib_get_node_by_name (vm, (u8 *) "srv6-end-m-gtp6-dt"); + sm->end_m_gtp6_dt_node_index = node->index; + + node = vlib_get_node_by_name (vm, (u8 *) "error-drop"); + sm->error_node_index = node->index; + + dpo_type = dpo_register_new_type (&dpo_vft, dpo_nodes); + + rc = sr_localsid_register_function (vm, fn_name, keyword_str, def_str, param_str, 128, //prefix len + &dpo_type, + clb_format_srv6_end_m_gtp6_dt, + clb_unformat_srv6_end_m_gtp6_dt, + clb_creation_srv6_end_m_gtp6_dt, + clb_removal_srv6_end_m_gtp6_dt); + if (rc < 0) + clib_error_return (0, "SRv6 Endpoint GTP6.DT LocalSID function" + "couldn't be registered"); + return 0; +} + +/* *INDENT-OFF* */ +VNET_FEATURE_INIT (srv6_end_m_gtp6_dt, static) = +{ + .arc_name = "ip6-unicast", + .node_name = "srv6-end-m-gtp6-dt", + .runs_before = 0, +}; + +VLIB_INIT_FUNCTION (srv6_end_m_gtp6_dt_init); +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/plugins/srv6-mobile/mobile.h b/src/plugins/srv6-mobile/mobile.h index 5a086b66c6e..eb1836e0968 100644 --- a/src/plugins/srv6-mobile/mobile.h +++ b/src/plugins/srv6-mobile/mobile.h @@ -42,6 +42,11 @@ #define SRV6_GTP6_DT6 2 #define SRV6_GTP6_DT46 3 +#define SRV6_GTP4_UNKNOW 0 +#define SRV6_GTP4_DT4 1 +#define SRV6_GTP4_DT6 2 +#define SRV6_GTP4_DT46 3 + #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #define BITALIGN2(A,B) A; B #define BITALIGN3(A,B,C) A; B; C @@ -59,6 +64,19 @@ #define SRH_TAG_ERROR_INDICATION 0x0002 #define SRH_TAG_END_MARKER 0x0001 +#define GTPU_RECOVERY_IE_TYPE 0x0e + +#define GTPU_IE_MAX_SIZ 256 +#define SRH_TLV_USER_PLANE_CONTAINER 0x0a /* tentative */ + +/* *INDENT-OFF* */ +typedef struct +{ + u8 type; + u8 restart_counter; +} __attribute__ ((packed)) gtpu_recovery_ie; +/* *INDENT-ON* */ + /* *INDENT-OFF* */ typedef struct { @@ -145,6 +163,17 @@ typedef struct #define GTPU_PT_GTP (1<<4) +/* *INDENT-OFF* */ +typedef struct +{ + u8 type; + u8 length; + u8 value[0]; +} __attribute__ ((packed)) user_plane_sub_tlv_t; +/* *INDENT-ON* */ + +#define USER_PLANE_SUB_TLV_IE 0x01 + typedef struct srv6_end_gtp6_param_s { u8 nhtype; @@ -153,6 +182,24 @@ typedef struct srv6_end_gtp6_param_s u32 sr_prefixlen; } srv6_end_gtp6_param_t; +typedef struct srv6_end_gtp6_dt_param_s +{ + u8 type; + + u32 fib4_index; + u32 fib6_index; + u32 local_fib_index; +} srv6_end_gtp6_dt_param_t; + +typedef struct srv6_t_gtp4_dt_param_s +{ + u8 type; + + u32 fib4_index; + u32 fib6_index; + u32 local_fib_index; +} srv6_t_gtp4_dt_param_t; + typedef struct srv6_end_gtp4_param_s { u8 nhtype; @@ -238,6 +285,30 @@ typedef struct srv6_end_main_v6_decap_di_s extern srv6_end_main_v6_decap_di_t srv6_end_main_v6_decap_di; extern vlib_node_registration_t srv6_end_m_gtp6_d_di; +typedef struct srv6_end_main_v6_dt_s +{ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + + u32 end_m_gtp6_dt_node_index; + u32 error_node_index; +} srv6_end_main_v6_dt_t; + +extern srv6_end_main_v6_dt_t srv6_end_main_v6_dt; +extern vlib_node_registration_t srv6_end_m_gtp6_dt; + +typedef struct srv6_t_main_v4_dt_s +{ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + + u32 t_m_gtp4_dt_node_index; + u32 error_node_index; +} srv6_t_main_v4_dt_t; + +extern srv6_t_main_v4_dt_t srv6_t_main_v4_dt; +extern vlib_node_registration_t srv6_t_m_gtp4_dt; + #endif /* __included_srv6_end_h__ */ /* diff --git a/src/plugins/srv6-mobile/mobile_plugin_doc.md b/src/plugins/srv6-mobile/mobile_plugin_doc.md new file mode 100644 index 00000000000..fd45aca637c --- /dev/null +++ b/src/plugins/srv6-mobile/mobile_plugin_doc.md @@ -0,0 +1,139 @@ +SRv6 Mobile User Plane Plugin for VPP +======================== + +# Introduction + +This plugin module can provide the stateless mobile user plane protocols translation between GTP-U and SRv6. The functions of the translation take advantage of SRv6 network programmability. + +[SRv6 Mobile User Plane](https://tools.ietf.org/html/draft-ietf-dmm-srv6-mobile-uplane) defines the user plane protocol using SRv6 +including following stateless translation functions: + +- **T.M.GTP4.D:** + GTP-U over UDP/IPv4 -> SRv6 +- **End.M.GTP4.E:** + SRv6 -> GTP-U over UDP/IPv4 +- **End.M.GTP6.D:** + GTP-U over UDP/IPv6 -> SRv6 +- **End.M.GTP6.E:** + SRv6 -> GTP-U over UDP/IPv6 + +These functions benefit user plane(overlay) to be able to utilize data plane(underlay) networks properly. And also it benefits data plane to be able to handle user plane in routing paradigm. + +Noted that the prefix of function names follow naming convention of SRv6 network programming. "T" means transit function, "End" means end function, "M" means Mobility specific function. The suffix "D" and "E" mean that "decapsulation" and "encapsulation" respectively. + + +# Implementation + +All SRv6 mobile functions are implemented as VPP plugin modules. The plugin modules leverage the sr_policy and sr_localsid mechanisms. + +# Configurations + +## GTP-U to SRv6 + +The GTP-U tunnel and flow identifiers of a receiving packet are mapped to a Segment Identifier(SID) of sending SRv6 packets. + +### IPv4 infrastructure case + +In case that **IPv4** networks are the infrastructure of GTP-U, T.M.GTP4.D function translates the receiving GTP-U packets to SRv6 packets. + +A T.M.GTP4.D function is associated with the following mandatory parameters: + +- SID: A SRv6 SID to represents the function +- DST-PREFIX: Prefix of remote SRv6 segment. The destination address or last SID of out packets consists of the prefix followed by dst IPv4 address, QFI and TEID of the receiving packets. +- SRC-PREFIX: Prefix for src address of sending packets. The src IPv6 address consists of the prefix followed by the src IPv4 address of the receiving packets. + +The following command instantiates a new T.M.GTP4.D function. + +``` +sr policy add bsid SID behavior t.m.gtp4.d DST-PREFIX v6src_prefix SRC-PREFIX [nhtype {ipv4|ipv6|non-ip}] +``` + +For example, the below command configures the SID 2001:db8::1 with `t.m.gtp4.d` behavior for translating receiving GTP-U over IPv4 packets to SRv6 packets with next-header type is IPv4. + +``` +sr policy add bsid 2001:db8::1 behavior t.m.gtp4.d D1::/32 v6src_prefix A1::/64 nhtype ipv4 +``` + +It should be interesting how a SRv6 BSID works to decapsulate the receiving GTP-U packets over IPv4 header. To utilize ```t.m.gtp4.d``` function, you need to configure some SR steering policy like: + +``` +sr steer l3 172.20.0.1/32 via bsid 2001:db8::1 +``` + +The above steering policy with the BSID of `t.m.gtp4.d` would work properly for the GTP-U packets destined to 172.20.0.1. + +If you have a SID(s) list of SR policy which the configured gtp4.d function to be applied, the SR Policy can be configured as following: + +``` +sr policy add bsid D1:: next A1:: next B1:: next C1:: +``` + +### IPv6 infrastructure case + +In case that GTP-U is deployed over **IPv6** infrastructure, you don't need to configure T.M.GTP4.D function and associated SR steering policy. Instead of that, you just need to configure a localsid of End.M.GTP6.D segment. + +An End.M.GTP6.D segment is associated with the following mandatory parameters: + +- SID-PREFIX: SRv6 SID prefix to represent the function. In this function, it should be the dst address of receiving GTP-U packets. +- DST-PREFIX: Prefix of remote SRv6 Segment. The destination address or last SID of output packets consists of the prefix followed by QFI and TEID of the receiving packets. + +The following command instantiates a new End.M.GTP6.D function. + +``` +sr localsid prefix SID-PREFIX behavior end.m.gtp6.d DST-PREFIX [nhtype {ipv4|ipv6|non-ip}] +``` +For example, the below command configures the SID prefix 2001:db8::/64 with `end.m.gtp6.d` behavior for translating receiving GTP-U over IPv6 packets which have IPv6 destination addresses within 2001:db8::/64 to SRv6 packets. The dst IPv6 address of the outgoing packets consists of D4::/64 followed by QFI and TEID. + +``` +sr localsid prefix 2001:db8::/64 behavior end.m.gtp6.d D4::/64 +``` + +In another case, the translated packets from GTP-U over IPv6 to SRv6 will be re-translated back to GTP-U, which is so called 'Drop-In' mode. + +In Drop-In mode, an additional IPv6 specific end segment is required, named End.M.GTP6.D.Di. It is because that unlike `end.m.gtp6.d`, it needs to preserve original IPv6 dst address as the last SID in the SRH. + +Regardless of that difference exists, the required configuration parameters are same as `end.m.gtp6.d`. + +The following command instantiates a new End.M.GTP6.D.Di function. + +``` +sr localsid prefix 2001:db8::/64 behavior end.m.gtp6.d.di D4::/64 +``` + + +## SRv6 to GTP-U + +The SRv6 Mobile functions on SRv6 to GTP-U direction are End.M.GTP4.E and End.M.GTP6.D. + +In this direction with GTP-U over IPv4 infrastructure, an End.M.GTP4.E segment is associated with the following mandatory parameters: + +- SID-PREFIX: SRv6 SID prefix to represent the function. +- V4SRC-ADDR-POSITION: Integer number indicates bit position where IPv4 src address embedded. + +The following command instantiates a new End.M.GTP4.E function. + +``` +sr localsid prefix SID-PREFIX behavior end.m.gtp4.e v4src_position V4SRC-ADDR-POSITION +``` + +For example, the below command configures the SID prefix 2001:db8::/32 with `end.m.gtp4.e` behavior for translating the receiving SRv6 packets to GTP-U packets encapsulated with UDP/IPv4 header. All the GTP-U tunnel and flow identifiers are extracted from the active SID in the receiving packets. The src IPv4 address of sending GTP-U packets is extracted from the configured bit position in the src IPv6 address. + +``` +sr localsid prefix 2001:db8::/32 behavior end.m.gtp4.e v4src_position 64 +``` + +In IPv6 infrastructure case, an End.M.GTP6.E segment is associated with the following mandatory parameters: + +- SID-PREFIX: SRv6 SID prefix to represent the function. + +The following command instantiates a new End.M.GTP6.E function. + +``` +sr localsid prefix SID-PREFIX behavior end.m.gtp6.e +``` + +For example, the below command configures the SID prefix 2001:db8::/64 with `end.m.gtp6.e` behavior for translating the receiving SRv6 packets to GTP-U packets encapsulated with UDP/IPv6 header. While the last SID indicates GTP-U dst IPv6 address, 32-bits GTP-U TEID and 6-bits QFI are extracted from the active SID in the receiving packets. + +``` +sr localsid prefix 2001:db8::/64 behavior end.m.gtp6.e +``` diff --git a/src/plugins/srv6-mobile/node.c b/src/plugins/srv6-mobile/node.c index de3bc4a6f0d..0120c67284b 100644 --- a/src/plugins/srv6-mobile/node.c +++ b/src/plugins/srv6-mobile/node.c @@ -129,6 +129,14 @@ format_srv6_end_rewrite_trace6 (u8 * s, va_list * args) _(M_GTP6_D_DI_PACKETS, "srv6 End.M.GTP6.D.DI packets") \ _(M_GTP6_D_DI_BAD_PACKETS, "srv6 End.M.GTP6.D.DI bad packets") +#define foreach_srv6_end_v6_dt_error \ + _(M_GTP6_DT_PACKETS, "srv6 End.M.GTP6.DT packets") \ + _(M_GTP6_DT_BAD_PACKETS, "srv6 End.M.GTP6.DT bad packets") + +#define foreach_srv6_t_v4_dt_error \ + _(M_GTP4_DT_PACKETS, "srv6 T.M.GTP4.DT packets") \ + _(M_GTP4_DT_BAD_PACKETS, "srv6 T.M.GTP4.DT bad packets") + typedef enum { #define _(sym,str) SRV6_END_ERROR_##sym, @@ -169,6 +177,22 @@ typedef enum SRV6_END_N_V6_D_DI_ERROR, } srv6_end_error_v6_d_di_t; +typedef enum +{ +#define _(sym,str) SRV6_END_ERROR_##sym, + foreach_srv6_end_v6_dt_error +#undef _ + SRV6_END_N_V6_DT_ERROR, +} srv6_end_error_v6_dt_t; + +typedef enum +{ +#define _(sym,str) SRV6_T_ERROR_##sym, + foreach_srv6_t_v4_dt_error +#undef _ + SRV6_T_N_V4_DT_ERROR, +} srv6_t_error_v4_dt_t; + static char *srv6_end_error_v4_strings[] = { #define _(sym,string) string, foreach_srv6_end_v4_error @@ -199,6 +223,18 @@ static char *srv6_end_error_v6_d_di_strings[] = { #undef _ }; +static char *srv6_end_error_v6_dt_strings[] = { +#define _(sym,string) string, + foreach_srv6_end_v6_dt_error +#undef _ +}; + +static char *srv6_t_error_v4_dt_strings[] = { +#define _(sym,string) string, + foreach_srv6_t_v4_dt_error +#undef _ +}; + typedef enum { SRV6_END_M_GTP4_E_NEXT_DROP, @@ -234,6 +270,22 @@ typedef enum SRV6_END_M_GTP6_D_DI_N_NEXT, } srv6_end_m_gtp6_d_di_next_t; +typedef enum +{ + SRV6_END_M_GTP6_DT_NEXT_DROP, + SRV6_END_M_GTP6_DT_NEXT_LOOKUP4, + SRV6_END_M_GTP6_DT_NEXT_LOOKUP6, + SRV6_END_M_GTP6_DT_N_NEXT, +} srv6_end_m_gtp6_dt_next_t; + +typedef enum +{ + SRV6_T_M_GTP4_DT_NEXT_DROP, + SRV6_T_M_GTP4_DT_NEXT_LOOKUP4, + SRV6_T_M_GTP4_DT_NEXT_LOOKUP6, + SRV6_T_M_GTP4_DT_N_NEXT, +} srv6_t_m_gtp4_dt_next_t; + static inline u16 hash_uword_to_u16 (uword * key) { @@ -345,32 +397,14 @@ VLIB_NODE_FN (srv6_end_m_gtp4_e) (vlib_main_t * vm, uword key; u16 port; ip4_address_t dst4; + u16 ie_size = 0; + u8 ie_buf[GTPU_IE_MAX_SIZ]; void *p; - // we need to be sure there is enough space before - // ip6srv0 header, there is some extra space - // in the pre_data area for this kind of - // logic - - // jump over variable length data - // not sure about the length if (ip6srv0->ip.protocol == IPPROTO_IPV6_ROUTE) { tag = ip6srv0->sr.tag; - - vlib_buffer_advance (b0, - (word) sizeof (ip6srv_combo_header_t) + - ip6srv0->sr.length * 8); } - else - { - vlib_buffer_advance (b0, (word) sizeof (ip6_header_t)); - } - - // get length of encapsulated IPv6 packet (the remaining part) - p = vlib_buffer_get_current (b0); - - len0 = vlib_buffer_length_in_chain (vm, b0); offset = ls0->localsid_len / 8; shift = ls0->localsid_len % 8; @@ -443,6 +477,58 @@ VLIB_NODE_FN (srv6_end_m_gtp4_e) (vlib_main_t * vm, { hdrlen = sizeof (gtpu_exthdr_t); } + + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ECHO_REPLY)) + { + hdrlen += sizeof (gtpu_recovery_ie); + } + + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ERROR_INDICATION)) + { + ip6_sr_tlv_t *tlv; + u16 ext_len; + + ext_len = ip6srv0->sr.length * 8; + + if (ext_len > + sizeof (ip6_address_t) * (ip6srv0->sr.last_entry + 1)) + { + tlv = + (ip6_sr_tlv_t *) ((u8 *) & ip6srv0->sr + + sizeof (ip6_sr_header_t) + + sizeof (ip6_address_t) * + (ip6srv0->sr.last_entry + 1)); + + if (tlv->type == SRH_TLV_USER_PLANE_CONTAINER) + { + user_plane_sub_tlv_t *sub_tlv; + + sub_tlv = (user_plane_sub_tlv_t *) tlv->value; + + ie_size = sub_tlv->length; + clib_memcpy_fast (ie_buf, sub_tlv->value, ie_size); + + hdrlen += ie_size; + } + } + } + + if (ip6srv0->ip.protocol == IPPROTO_IPV6_ROUTE) + { + vlib_buffer_advance (b0, + (word) sizeof (ip6srv_combo_header_t) + + ip6srv0->sr.length * 8); + } + else + { + vlib_buffer_advance (b0, (word) sizeof (ip6_header_t)); + } + + // get length of encapsulated IPv6 packet (the remaining part) + p = vlib_buffer_get_current (b0); + + len0 = vlib_buffer_length_in_chain (vm, b0); + len0 += hdrlen; hdrlen += sizeof (ip4_gtpu_header_t); @@ -499,6 +585,28 @@ VLIB_NODE_FN (srv6_end_m_gtp4_e) (vlib_main_t * vm, hdr0->gtpu.ext->seq = seq; hdr0->gtpu.ext->npdu_num = 0; hdr0->gtpu.ext->nextexthdr = 0; + + if (gtpu_type == GTPU_TYPE_ECHO_REPLY) + { + gtpu_recovery_ie *recovery; + + recovery = + (gtpu_recovery_ie *) ((u8 *) hdr0 + + (hdrlen - + sizeof (gtpu_recovery_ie))); + recovery->type = GTPU_RECOVERY_IE_TYPE; + recovery->restart_counter = 0; + } + else if (gtpu_type == GTPU_TYPE_ERROR_INDICATION) + { + if (ie_size) + { + u8 *ie_ptr; + + ie_ptr = (u8 *) ((u8 *) hdr0 + (hdrlen - ie_size)); + clib_memcpy_fast (ie_ptr, ie_buf, ie_size); + } + } } offset = ls_param->v4src_position / 8; @@ -659,6 +767,9 @@ VLIB_NODE_FN (srv6_t_m_gtp4_d) (vlib_main_t * vm, u32 offset, shift, index; ip6srv_combo_header_t *ip6srv; gtpu_pdu_session_t *sess = NULL; + u16 ie_size = 0; + u16 tlv_siz = 0; + u8 ie_buf[GTPU_IE_MAX_SIZ]; // Decap from GTP-U. hdr = (ip4_gtpu_header_t *) ip4; @@ -783,6 +894,24 @@ VLIB_NODE_FN (srv6_t_m_gtp4_d) (vlib_main_t * vm, } } + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ERROR_INDICATION)) + { + u16 payload_len; + + payload_len = clib_net_to_host_u16 (hdr->gtpu.length); + if (payload_len != 0 + && payload_len > hdr_len - sizeof (ip4_gtpu_header_t)) + { + u8 *ies; + + ies = (u8 *) ((u8 *) hdr + hdr_len); + ie_size = + payload_len - (hdr_len - sizeof (ip4_gtpu_header_t)); + clib_memcpy_fast (ie_buf, ies, ie_size); + hdr_len += ie_size; + } + } + src6 = ls_param->v6src_prefix; offset = ls_param->v6src_prefixlen / 8; @@ -847,6 +976,16 @@ VLIB_NODE_FN (srv6_t_m_gtp4_d) (vlib_main_t * vm, } } + if (ie_size) + { + tlv_siz = + sizeof (ip6_sr_tlv_t) + sizeof (user_plane_sub_tlv_t) + + ie_size; + + tlv_siz = (tlv_siz & ~0x07) + (tlv_siz & 0x07 ? 0x08 : 0x0); + hdr_len += tlv_siz; + } + vlib_buffer_advance (b0, -(word) hdr_len); ip6srv = vlib_buffer_get_current (b0); @@ -1014,6 +1153,25 @@ VLIB_NODE_FN (srv6_t_m_gtp4_d) (vlib_main_t * vm, ip6srv->ip.src_address = src6; + if (PREDICT_FALSE (ie_size)) + { + ip6_sr_tlv_t *tlv; + user_plane_sub_tlv_t *sub_tlv; + + tlv = + (ip6_sr_tlv_t *) ((u8 *) ip6srv + (hdr_len - tlv_siz)); + tlv->type = SRH_TLV_USER_PLANE_CONTAINER; + tlv->length = tlv_siz - sizeof (ip6_sr_tlv_t); + clib_memset (tlv->value, 0, tlv->length); + + sub_tlv = (user_plane_sub_tlv_t *) tlv->value; + sub_tlv->type = USER_PLANE_SUB_TLV_IE; + sub_tlv->length = ie_size; + clib_memcpy_fast (sub_tlv->value, ie_buf, ie_size); + + ip6srv->sr.length += tlv_siz / 8; + } + ip6srv->ip.payload_length = clib_host_to_net_u16 (len0 + hdr_len - sizeof (ip6_header_t)); @@ -1056,8 +1214,8 @@ VLIB_REGISTER_NODE (srv6_end_m_gtp4_e) = srv6_end_error_v4_strings,.n_next_nodes = SRV6_END_M_GTP4_E_N_NEXT,.next_nodes = { - [SRV6_END_M_GTP4_E_NEXT_DROP] = "error-drop", - [SRV6_END_M_GTP4_E_NEXT_LOOKUP] = "ip4-lookup",} + [SRV6_END_M_GTP4_E_NEXT_DROP] = + "error-drop",[SRV6_END_M_GTP4_E_NEXT_LOOKUP] = "ip4-lookup",} ,}; VLIB_REGISTER_NODE (srv6_t_m_gtp4_d) = @@ -1068,8 +1226,8 @@ VLIB_REGISTER_NODE (srv6_t_m_gtp4_d) = srv6_t_error_v4_d_strings,.n_next_nodes = SRV6_T_M_GTP4_D_N_NEXT,.next_nodes = { - [SRV6_T_M_GTP4_D_NEXT_DROP] = "error-drop", - [SRV6_T_M_GTP4_D_NEXT_LOOKUP] = "ip6-lookup",} + [SRV6_T_M_GTP4_D_NEXT_DROP] = + "error-drop",[SRV6_T_M_GTP4_D_NEXT_LOOKUP] = "ip6-lookup",} ,}; // Function for SRv6 GTP6.E function @@ -1149,16 +1307,6 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e) (vlib_main_t * vm, // in the pre_data area for this kind of // logic - // jump over variable length data - // not sure about the length - vlib_buffer_advance (b0, (word) sizeof (ip6srv_combo_header_t) + - ip6srv0->sr.length * 8); - - // get length of encapsulated IPv6 packet (the remaining part) - p = vlib_buffer_get_current (b0); - - len0 = vlib_buffer_length_in_chain (vm, b0); - u32 teid = 0; u8 *teid8p = (u8 *) & teid; u8 qfi = 0; @@ -1167,6 +1315,8 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e) (vlib_main_t * vm, u16 index; u16 offset, shift; u32 hdrlen = 0; + u16 ie_size = 0; + u8 ie_buf[GTPU_IE_MAX_SIZ]; index = ls0->localsid_len; index += 8; @@ -1233,7 +1383,59 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e) (vlib_main_t * vm, hdrlen = sizeof (gtpu_exthdr_t); } + if (gtpu_type == GTPU_TYPE_ECHO_REPLY) + { + hdrlen += sizeof (gtpu_recovery_ie); + } + + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ERROR_INDICATION)) + { + ip6_sr_tlv_t *tlv; + u16 ext_len; + + ext_len = ip6srv0->sr.length * 8; + + if (ext_len > + sizeof (ip6_address_t) * (ip6srv0->sr.last_entry + 1)) + { + tlv = + (ip6_sr_tlv_t *) ((u8 *) & ip6srv0->sr + + sizeof (ip6_sr_header_t) + + sizeof (ip6_address_t) * + (ip6srv0->sr.last_entry + 1)); + + if (tlv->type == SRH_TLV_USER_PLANE_CONTAINER) + { + user_plane_sub_tlv_t *sub_tlv; + + sub_tlv = (user_plane_sub_tlv_t *) tlv->value; + + ie_size = sub_tlv->length; + clib_memcpy_fast (ie_buf, sub_tlv->value, ie_size); + + hdrlen += ie_size; + } + } + } + + if (ip6srv0->ip.protocol == IPPROTO_IPV6_ROUTE) + { + vlib_buffer_advance (b0, + (word) sizeof (ip6srv_combo_header_t) + + ip6srv0->sr.length * 8); + } + else + { + vlib_buffer_advance (b0, (word) sizeof (ip6_header_t)); + } + + // get length of encapsulated IPv6 packet (the remaining part) + p = vlib_buffer_get_current (b0); + + len0 = vlib_buffer_length_in_chain (vm, b0); + len0 += hdrlen; + hdrlen += sizeof (ip6_gtpu_header_t); vlib_buffer_advance (b0, -(word) hdrlen); @@ -1284,6 +1486,28 @@ VLIB_NODE_FN (srv6_end_m_gtp6_e) (vlib_main_t * vm, hdr0->gtpu.ext->seq = seq; hdr0->gtpu.ext->npdu_num = 0; hdr0->gtpu.ext->nextexthdr = 0; + + if (gtpu_type == GTPU_TYPE_ECHO_REPLY) + { + gtpu_recovery_ie *recovery; + + recovery = + (gtpu_recovery_ie *) ((u8 *) hdr0 + + (hdrlen - + sizeof (gtpu_recovery_ie))); + recovery->type = GTPU_RECOVERY_IE_TYPE; + recovery->restart_counter = 0; + } + else if (gtpu_type == GTPU_TYPE_ERROR_INDICATION) + { + if (ie_size) + { + u8 *ie_ptr; + + ie_ptr = (u8 *) ((u8 *) hdr0 + (hdrlen - ie_size)); + clib_memcpy_fast (ie_ptr, ie_buf, ie_size); + } + } } hdr0->udp.length = clib_host_to_net_u16 (len0 + @@ -1389,6 +1613,9 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d) (vlib_main_t * vm, u32 hdrlen; ip6_header_t *encap = NULL; gtpu_pdu_session_t *sess = NULL; + u16 ie_size = 0; + u16 tlv_siz = 0; + u8 ie_buf[GTPU_IE_MAX_SIZ]; u32 next0 = SRV6_END_M_GTP6_D_NEXT_LOOKUP; @@ -1535,6 +1762,24 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d) (vlib_main_t * vm, } } + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ERROR_INDICATION)) + { + u16 payload_len; + + payload_len = clib_net_to_host_u16 (hdr0->gtpu.length); + if (payload_len != 0 + && payload_len > hdrlen - sizeof (ip6_gtpu_header_t)) + { + u8 *ies; + + ies = (u8 *) ((u8 *) hdr0 + hdrlen); + ie_size = + payload_len - (hdrlen - sizeof (ip6_gtpu_header_t)); + clib_memcpy_fast (ie_buf, ies, ie_size); + hdrlen += ie_size; + } + } + // jump over variable length data vlib_buffer_advance (b0, (word) hdrlen); @@ -1587,6 +1832,16 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d) (vlib_main_t * vm, } } + if (ie_size) + { + tlv_siz = + sizeof (ip6_sr_tlv_t) + sizeof (user_plane_sub_tlv_t) + + ie_size; + + tlv_siz = (tlv_siz & ~0x07) + (tlv_siz & 0x07 ? 0x08 : 0x0); + hdr_len += tlv_siz; + } + // jump back to data[0] or pre_data if required vlib_buffer_advance (b0, -(word) hdr_len); @@ -1755,6 +2010,25 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d) (vlib_main_t * vm, } } + if (PREDICT_FALSE (ie_size)) + { + ip6_sr_tlv_t *tlv; + user_plane_sub_tlv_t *sub_tlv; + + tlv = + (ip6_sr_tlv_t *) ((u8 *) ip6srv + (hdr_len - tlv_siz)); + tlv->type = SRH_TLV_USER_PLANE_CONTAINER; + tlv->length = tlv_siz - sizeof (ip6_sr_tlv_t); + clib_memset (tlv->value, 0, tlv->length); + + sub_tlv = (user_plane_sub_tlv_t *) tlv->value; + sub_tlv->type = USER_PLANE_SUB_TLV_IE; + sub_tlv->length = ie_size; + clib_memcpy_fast (sub_tlv->value, ie_buf, ie_size); + + ip6srv->sr.length += tlv_siz / 8; + } + ip6srv->ip.payload_length = clib_host_to_net_u16 (len0 + hdr_len - sizeof (ip6_header_t)); @@ -1845,6 +2119,9 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di) (vlib_main_t * vm, u32 hdrlen; ip6_header_t *encap = NULL; gtpu_pdu_session_t *sess; + u16 ie_size = 0; + u16 tlv_siz = 0; + u8 ie_buf[GTPU_IE_MAX_SIZ]; u32 next0 = SRV6_END_M_GTP6_D_DI_NEXT_LOOKUP; @@ -1990,6 +2267,24 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di) (vlib_main_t * vm, } } + if (PREDICT_FALSE (gtpu_type == GTPU_TYPE_ERROR_INDICATION)) + { + u16 payload_len; + + payload_len = clib_net_to_host_u16 (hdr0->gtpu.length); + if (payload_len != 0 + && payload_len > hdrlen - sizeof (ip6_gtpu_header_t)) + { + u8 *ies; + + ies = (u8 *) ((u8 *) hdr0 + hdrlen); + ie_size = + payload_len - (hdrlen - sizeof (ip6_gtpu_header_t)); + clib_memcpy_fast (ie_buf, ies, ie_size); + hdrlen += ie_size; + } + } + // jump over variable length data vlib_buffer_advance (b0, (word) hdrlen); @@ -2033,6 +2328,16 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di) (vlib_main_t * vm, hdr_len += sizeof (ip6_address_t) * 2; + if (ie_size) + { + tlv_siz = + sizeof (ip6_sr_tlv_t) + sizeof (user_plane_sub_tlv_t) + + ie_size; + + tlv_siz = (tlv_siz & ~0x07) + (tlv_siz & 0x07 ? 0x08 : 0x0); + hdr_len += tlv_siz; + } + // jump back to data[0] or pre_data if required vlib_buffer_advance (b0, -(word) hdr_len); @@ -2114,6 +2419,24 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di) (vlib_main_t * vm, ip6srv->sr.segments[0] = dst0; } + if (PREDICT_FALSE (ie_size)) + { + ip6_sr_tlv_t *tlv; + user_plane_sub_tlv_t *sub_tlv; + + tlv = + (ip6_sr_tlv_t *) ((u8 *) ip6srv + (hdr_len - tlv_siz)); + tlv->type = SRH_TLV_USER_PLANE_CONTAINER; + tlv->length = tlv_siz - sizeof (ip6_sr_tlv_t); + clib_memset (tlv->value, 0, tlv->length); + + sub_tlv = (user_plane_sub_tlv_t *) tlv->value; + sub_tlv->length = ie_size; + clib_memcpy_fast (sub_tlv->value, ie_buf, ie_size); + + ip6srv->sr.length += tlv_siz / 8; + } + ip6srv->ip.payload_length = clib_host_to_net_u16 (len0 + hdr_len - sizeof (ip6_header_t)); ip6srv->ip.protocol = IP_PROTOCOL_IPV6_ROUTE; @@ -2207,6 +2530,443 @@ VLIB_NODE_FN (srv6_end_m_gtp6_d_di) (vlib_main_t * vm, return frame->n_vectors; } +// Function for SRv6 GTP6.DT function +VLIB_NODE_FN (srv6_end_m_gtp6_dt) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + srv6_end_main_v6_dt_t *sm = &srv6_end_main_v6_dt; + ip6_sr_main_t *sm2 = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + u32 thread_index = vm->thread_index; + + u32 good_n = 0, bad_n = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + srv6_end_gtp6_dt_param_t *ls_param; + ip6_sr_localsid_t *ls0; + + ip6_gtpu_header_t *hdr0 = NULL; + ip4_header_t *ip4 = NULL; + ip6_header_t *ip6 = NULL; + ip6_address_t src, dst; + u32 teid; + u32 hdrlen; + u32 len0; + + u32 next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ls0 = + pool_elt_at_index (sm2->localsids, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + ls_param = (srv6_end_gtp6_dt_param_t *) ls0->plugin_mem; + + hdr0 = vlib_buffer_get_current (b0); + + hdrlen = sizeof (ip6_gtpu_header_t); + + len0 = vlib_buffer_length_in_chain (vm, b0); + + if ((hdr0->ip6.protocol != IP_PROTOCOL_UDP) + || (hdr0->udp.dst_port != + clib_host_to_net_u16 (SRV6_GTP_UDP_DST_PORT)) + || (len0 < sizeof (ip6_gtpu_header_t))) + { + next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + + bad_n++; + } + else + { + clib_memcpy_fast (src.as_u8, hdr0->ip6.src_address.as_u8, + sizeof (ip6_address_t)); + clib_memcpy_fast (dst.as_u8, hdr0->ip6.dst_address.as_u8, + sizeof (ip6_address_t)); + + teid = hdr0->gtpu.teid; + + if (hdr0->gtpu.ver_flags & GTPU_EXTHDR_FLAG) + { + hdrlen += sizeof (gtpu_exthdr_t); + if (hdr0->gtpu.ext->nextexthdr == GTPU_EXTHDR_PDU_SESSION) + { + gtpu_pdu_session_t *sess; + + sess = + (gtpu_pdu_session_t *) (((char *) hdr0) + + sizeof (ip6_gtpu_header_t) + + sizeof (gtpu_exthdr_t)); + + hdrlen += sizeof (gtpu_pdu_session_t); + if (sess->u.val & GTPU_PDU_SESSION_P_BIT_MASK) + { + hdrlen += sizeof (gtpu_paging_policy_t); + } + } + } + + if (ls_param->type == SRV6_GTP6_DT4) + { + vlib_buffer_advance (b0, (word) hdrlen); + ip4 = vlib_buffer_get_current (b0); + if ((ip4->ip_version_and_header_length & 0xf0) != 0x40) + { + next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + next0 = SRV6_END_M_GTP6_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib4_index; + } + else if (ls_param->type == SRV6_GTP6_DT6) + { + ip6 = (ip6_header_t *) ((u8 *) hdr0 + hdrlen); + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + != 6) + { + next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + next0 = SRV6_END_M_GTP6_DT_NEXT_LOOKUP6; + if ((ip6->dst_address.as_u8[0] == 0xfe) + && ((ip6->dst_address.as_u8[1] & 0xc0) == 0x80)) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->local_fib_index; + } + else + { + vlib_buffer_advance (b0, (word) hdrlen); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib6_index; + } + } + else if (ls_param->type == SRV6_GTP6_DT46) + { + ip6 = (ip6_header_t *) ((u8 *) hdr0 + hdrlen); + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + == 6) + { + next0 = SRV6_END_M_GTP6_DT_NEXT_LOOKUP6; + if ((ip6->dst_address.as_u8[0] == 0xfe) + && ((ip6->dst_address.as_u8[1] & 0xc0) == 0x80)) + { + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->local_fib_index; + } + else + { + vlib_buffer_advance (b0, (word) hdrlen); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib6_index; + } + } + else + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + == 4) + { + vlib_buffer_advance (b0, (word) hdrlen); + next0 = SRV6_END_M_GTP6_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib4_index; + } + else + { + next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + } + else + { + next0 = SRV6_END_M_GTP6_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + good_n++; + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + srv6_end_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, src.as_u8, + sizeof (ip6_address_t)); + clib_memcpy (tr->dst.as_u8, dst.as_u8, + sizeof (ip6_address_t)); + tr->teid = teid; + } + } + + DONE: + vlib_increment_combined_counter + (((next0 == + SRV6_END_M_GTP6_DT_NEXT_DROP) ? &(sm2->sr_ls_invalid_counters) + : &(sm2->sr_ls_valid_counters)), thread_index, + ls0 - sm2->localsids, 1, vlib_buffer_length_in_chain (vm, b0)); + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, sm->end_m_gtp6_dt_node_index, + SRV6_END_ERROR_M_GTP6_DT_BAD_PACKETS, bad_n); + + vlib_node_increment_counter (vm, sm->end_m_gtp6_dt_node_index, + SRV6_END_ERROR_M_GTP6_DT_PACKETS, good_n); + + return frame->n_vectors; +} + +// Function for SRv6 GTP4.DT function +VLIB_NODE_FN (srv6_t_m_gtp4_dt) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + srv6_t_main_v4_dt_t *sm = &srv6_t_main_v4_dt; + ip6_sr_main_t *sm2 = &sr_main; + u32 n_left_from, next_index, *from, *to_next; + + u32 good_n = 0, bad_n = 0; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + u32 n_left_to_next; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 bi0; + vlib_buffer_t *b0; + srv6_t_gtp4_dt_param_t *ls_param; + ip6_sr_sl_t *ls0; + + ip4_gtpu_header_t *hdr0 = NULL; + ip4_header_t *ip4 = NULL; + ip6_header_t *ip6 = NULL; + ip6_address_t src, dst; + u32 teid; + u32 hdrlen; + u32 len0; + + u32 next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + + bi0 = from[0]; + to_next[0] = bi0; + from += 1; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; + + b0 = vlib_get_buffer (vm, bi0); + ls0 = + pool_elt_at_index (sm2->sid_lists, + vnet_buffer (b0)->ip.adj_index[VLIB_TX]); + + ls_param = (srv6_t_gtp4_dt_param_t *) ls0->plugin_mem; + + hdr0 = vlib_buffer_get_current (b0); + + hdrlen = sizeof (ip4_gtpu_header_t); + + len0 = vlib_buffer_length_in_chain (vm, b0); + + if ((hdr0->ip4.protocol != IP_PROTOCOL_UDP) + || (hdr0->udp.dst_port != + clib_host_to_net_u16 (SRV6_GTP_UDP_DST_PORT)) + || (len0 < sizeof (ip4_gtpu_header_t))) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + + bad_n++; + } + else + { + clib_memcpy_fast (src.as_u8, hdr0->ip4.src_address.as_u8, + sizeof (ip4_address_t)); + clib_memcpy_fast (dst.as_u8, hdr0->ip4.dst_address.as_u8, + sizeof (ip4_address_t)); + + teid = hdr0->gtpu.teid; + + if (hdr0->gtpu.ver_flags & GTPU_EXTHDR_FLAG) + { + hdrlen += sizeof (gtpu_exthdr_t); + if (hdr0->gtpu.ext->nextexthdr == GTPU_EXTHDR_PDU_SESSION) + { + gtpu_pdu_session_t *sess; + + sess = + (gtpu_pdu_session_t *) (((char *) hdr0) + + sizeof (ip6_gtpu_header_t) + + sizeof (gtpu_exthdr_t)); + + hdrlen += sizeof (gtpu_pdu_session_t); + if (sess->u.val & GTPU_PDU_SESSION_P_BIT_MASK) + { + hdrlen += sizeof (gtpu_paging_policy_t); + } + } + } + + if (ls_param->type == SRV6_GTP4_DT4) + { + vlib_buffer_advance (b0, (word) hdrlen); + ip4 = vlib_buffer_get_current (b0); + if ((ip4->ip_version_and_header_length & 0xf0) != 0x40) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib4_index; + } + else if (ls_param->type == SRV6_GTP4_DT6) + { + ip6 = (ip6_header_t *) ((u8 *) hdr0 + hdrlen); + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + != 6) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP6; + if ((ip6->dst_address.as_u8[0] == 0xfe) + && ((ip6->dst_address.as_u8[1] & 0xc0) == 0x80)) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->local_fib_index; + } + else + { + vlib_buffer_advance (b0, (word) hdrlen); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib6_index; + } + } + else if (ls_param->type == SRV6_GTP4_DT46) + { + ip6 = (ip6_header_t *) ((u8 *) hdr0 + hdrlen); + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + == 6) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP6; + if ((ip6->dst_address.as_u8[0] == 0xfe) + && ((ip6->dst_address.as_u8[1] & 0xc0) == 0x80)) + { + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->local_fib_index; + } + else + { + vlib_buffer_advance (b0, (word) hdrlen); + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib6_index; + } + } + else + if ((clib_net_to_host_u32 + (ip6->ip_version_traffic_class_and_flow_label) >> 28) + == 4) + { + vlib_buffer_advance (b0, (word) hdrlen); + next0 = SRV6_T_M_GTP4_DT_NEXT_LOOKUP4; + vnet_buffer (b0)->sw_if_index[VLIB_TX] = + ls_param->fib4_index; + } + else + { + next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + } + else + { + next0 = SRV6_T_M_GTP4_DT_NEXT_DROP; + bad_n++; + goto DONE; + } + + good_n++; + + if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) && + PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + { + srv6_end_rewrite_trace_t *tr = + vlib_add_trace (vm, node, b0, sizeof (*tr)); + clib_memcpy (tr->src.as_u8, src.as_u8, + sizeof (ip6_address_t)); + clib_memcpy (tr->dst.as_u8, dst.as_u8, + sizeof (ip6_address_t)); + tr->teid = teid; + } + } + + DONE: + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, bi0, next0); + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + vlib_node_increment_counter (vm, sm->t_m_gtp4_dt_node_index, + SRV6_T_ERROR_M_GTP4_DT_BAD_PACKETS, bad_n); + + vlib_node_increment_counter (vm, sm->t_m_gtp4_dt_node_index, + SRV6_T_ERROR_M_GTP4_DT_PACKETS, good_n); + + return frame->n_vectors; +} + VLIB_REGISTER_NODE (srv6_end_m_gtp6_e) = { .name = "srv6-end-m-gtp6-e",.vector_size = sizeof (u32),.format_trace = @@ -2215,8 +2975,8 @@ VLIB_REGISTER_NODE (srv6_end_m_gtp6_e) = srv6_end_error_v6_e_strings,.n_next_nodes = SRV6_END_M_GTP6_E_N_NEXT,.next_nodes = { - [SRV6_END_M_GTP6_E_NEXT_DROP] = "error-drop", - [SRV6_END_M_GTP6_E_NEXT_LOOKUP] = "ip6-lookup",} + [SRV6_END_M_GTP6_E_NEXT_DROP] = + "error-drop",[SRV6_END_M_GTP6_E_NEXT_LOOKUP] = "ip6-lookup",} ,}; VLIB_REGISTER_NODE (srv6_end_m_gtp6_d) = @@ -2227,8 +2987,8 @@ VLIB_REGISTER_NODE (srv6_end_m_gtp6_d) = srv6_end_error_v6_d_strings,.n_next_nodes = SRV6_END_M_GTP6_D_N_NEXT,.next_nodes = { - [SRV6_END_M_GTP6_D_NEXT_DROP] = "error-drop", - [SRV6_END_M_GTP6_D_NEXT_LOOKUP] = "ip6-lookup",} + [SRV6_END_M_GTP6_D_NEXT_DROP] = + "error-drop",[SRV6_END_M_GTP6_D_NEXT_LOOKUP] = "ip6-lookup",} ,}; VLIB_REGISTER_NODE (srv6_end_m_gtp6_d_di) = @@ -2243,6 +3003,34 @@ VLIB_REGISTER_NODE (srv6_end_m_gtp6_d_di) = [SRV6_END_M_GTP6_D_DI_NEXT_LOOKUP] = "ip6-lookup",} ,}; +VLIB_REGISTER_NODE (srv6_end_m_gtp6_dt) = +{ + .name = "srv6-end-m-gtp6-dt",.vector_size = sizeof (u32),.format_trace = + format_srv6_end_rewrite_trace6,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors = + ARRAY_LEN (srv6_end_error_v6_dt_strings),.error_strings = + srv6_end_error_v6_dt_strings,.n_next_nodes = + SRV6_END_M_GTP6_DT_N_NEXT,.next_nodes = + { + [SRV6_END_M_GTP6_DT_NEXT_DROP] = + "error-drop", + [SRV6_END_M_GTP6_DT_NEXT_LOOKUP4] + = "ip4-lookup",[SRV6_END_M_GTP6_DT_NEXT_LOOKUP6] = "ip6-lookup",} +,}; + +VLIB_REGISTER_NODE (srv6_t_m_gtp4_dt) = +{ + .name = "srv6-t-m-gtp4-dt",.vector_size = sizeof (u32),.format_trace = + format_srv6_end_rewrite_trace6,.type = VLIB_NODE_TYPE_INTERNAL,.n_errors = + ARRAY_LEN (srv6_t_error_v4_dt_strings),.error_strings = + srv6_t_error_v4_dt_strings,.n_next_nodes = + SRV6_T_M_GTP4_DT_N_NEXT,.next_nodes = + { + [SRV6_T_M_GTP4_DT_NEXT_DROP] = + "error-drop", + [SRV6_T_M_GTP4_DT_NEXT_LOOKUP4] = + "ip4-lookup",[SRV6_T_M_GTP4_DT_NEXT_LOOKUP6] = "ip6-lookup",} +,}; + /* * fd.io coding-style-patch-verification: ON * |