From 7cd468a3d7dee7d6c92f69a0bb7061ae208ec727 Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Mon, 19 Dec 2016 23:05:39 +0100 Subject: Reorganize source tree to use single autotools instance Change-Id: I7b51f88292e057c6443b12224486f2d0c9f8ae23 Signed-off-by: Damjan Marion --- src/vnet/map/examples/gen-rules.py | 186 +++ src/vnet/map/examples/health_check.c | 109 ++ src/vnet/map/examples/test_map.py | 141 +++ src/vnet/map/gen-rules.py | 107 ++ src/vnet/map/ip4_map.c | 813 +++++++++++++ src/vnet/map/ip4_map_t.c | 1363 +++++++++++++++++++++ src/vnet/map/ip6_map.c | 1269 ++++++++++++++++++++ src/vnet/map/ip6_map_t.c | 1517 ++++++++++++++++++++++++ src/vnet/map/map.api | 178 +++ src/vnet/map/map.c | 2166 ++++++++++++++++++++++++++++++++++ src/vnet/map/map.h | 591 ++++++++++ src/vnet/map/map_api.c | 295 +++++ src/vnet/map/map_doc.md | 69 ++ src/vnet/map/map_dpo.c | 191 +++ src/vnet/map/map_dpo.h | 67 ++ src/vnet/map/test.c | 205 ++++ 16 files changed, 9267 insertions(+) create mode 100755 src/vnet/map/examples/gen-rules.py create mode 100644 src/vnet/map/examples/health_check.c create mode 100755 src/vnet/map/examples/test_map.py create mode 100755 src/vnet/map/gen-rules.py create mode 100644 src/vnet/map/ip4_map.c create mode 100644 src/vnet/map/ip4_map_t.c create mode 100644 src/vnet/map/ip6_map.c create mode 100644 src/vnet/map/ip6_map_t.c create mode 100644 src/vnet/map/map.api create mode 100644 src/vnet/map/map.c create mode 100644 src/vnet/map/map.h create mode 100644 src/vnet/map/map_api.c create mode 100644 src/vnet/map/map_doc.md create mode 100644 src/vnet/map/map_dpo.c create mode 100644 src/vnet/map/map_dpo.h create mode 100644 src/vnet/map/test.c (limited to 'src/vnet/map') diff --git a/src/vnet/map/examples/gen-rules.py b/src/vnet/map/examples/gen-rules.py new file mode 100755 index 00000000..7964aa9a --- /dev/null +++ b/src/vnet/map/examples/gen-rules.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python3 + +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import argparse +import sys + +# map add domain ip4-pfx ip6-pfx ::/0 ip6-src ea-bits-len 0 psid-offset 6 psid-len 6 +# map add rule index <0> psid ip6-dst + +def_ip4_pfx = '192.0.2.0/24' +def_ip6_pfx = '2001:db8::/32' +def_ip6_src = '2001:db8::1' +def_psid_offset = 6 +def_psid_len = 6 +def_ea_bits_len = 0 + +parser = argparse.ArgumentParser(description='MAP VPP configuration generator') +parser.add_argument('-t', action="store", dest="mapmode") +parser.add_argument('-f', action="store", dest="format", default="vpp") +parser.add_argument('--ip4-prefix', action="store", dest="ip4_pfx", default=def_ip4_pfx) +parser.add_argument('--ip6-prefix', action="store", dest="ip6_pfx", default=def_ip6_pfx) +parser.add_argument('--ip6-src', action="store", dest="ip6_src", default=def_ip6_src) +parser.add_argument('--psid-len', action="store", dest="psid_len", default=def_psid_len) +parser.add_argument('--psid-offset', action="store", dest="psid_offset", default=def_psid_offset) +parser.add_argument('--ea-bits-len', action="store", dest="ea_bits_len", default=def_ea_bits_len) +args = parser.parse_args() + +# +# Print domain +# +def domain_print(i, ip4_pfx, ip6_pfx, ip6_src, eabits_len, psid_offset, psid_len): + if format == 'vpp': + print("map add domain ip4-pfx " + ip4_pfx + " ip6-pfx", ip6_pfx, "ip6-src " + ip6_src + + " ea-bits-len", eabits_len, "psid-offset", psid_offset, "psid-len", psid_len) + if format == 'confd': + print("vpp softwire softwire-instances softwire-instance", i, "br-ipv6 " + ip6_src + + " ipv6-prefix " + ip6_pfx + " ipv4-prefix " + ip4_pfx + + " ea-bits-len", eabits_len, "psid-offset", psid_offset, "psid-len", psid_len) + if format == 'xml': + print("") + print("", i, ""); + print(" " + ip6_src + "") + print(" " + ip6_pfx + "") + print(" " + ip4_pfx + "") + print(" ", eabits_len, "") + print(" ", psid_len, "") + print(" ", psid_offset, "") + +def domain_print_end(): + if format == 'xml': + print("") + +def rule_print(i, psid, dst): + if format == 'vpp': + print("map add rule index", i, "psid", psid, "ip6-dst", dst) + if format == 'confd': + print("binding", psid, "ipv6-addr", dst) + if format == 'xml': + print(" ") + print(" ", psid, "") + print(" ", dst, "") + print(" ") + +# +# Algorithmic mapping Shared IPv4 address +# +def algo(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False): + domain_print(0, ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len) + domain_print_end() + +# +# 1:1 Full IPv4 address +# +def lw46(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False): + ip4_pfx = ipaddress.ip_network(ip4_pfx_str) + ip6_src = ipaddress.ip_address(ip6_src_str) + ip6_dst = ipaddress.ip_network(ip6_pfx_str) + psid_len = 0 + mod = ip4_pfx.num_addresses / 1024 + + for i in range(ip4_pfx.num_addresses): + domain_print(i, str(ip4_pfx[i]) + "/32", str(ip6_dst[i]) + "/128", str(ip6_src), 0, 0, 0) + domain_print_end() + if ip6_src_ecmp and not i % mod: + ip6_src = ip6_src + 1 + +# +# 1:1 Shared IPv4 address, shared BR (16) VPP CLI +# +def lw46_shared(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False): + ip4_pfx = ipaddress.ip_network(ip4_pfx_str) + ip6_src = ipaddress.ip_address(ip6_src_str) + ip6_dst = ipaddress.ip_network(ip6_pfx_str) + mod = ip4_pfx.num_addresses / 1024 + + for i in range(ip4_pfx.num_addresses): + domain_print(i, str(ip4_pfx[i]) + "/32", "::/0", str(ip6_src), 0, 0, psid_len) + for psid in range(0x1 << int(psid_len)): + rule_print(i, psid, str(ip6_dst[(i * (0x1< + + + urn:ietf:params:netconf:base:1.0 + + +]]>]]> + + + + + + + + + + + + + + ''') + +def xml_footer_print(): + print(''' + + + + + + + +]]>]]> + + + + + + +]]>]]> + ''') + + +format = args.format +if format == 'xml': + xml_header_print() +globals()[args.mapmode](args.ip4_pfx, args.ip6_pfx, args.ip6_src, args.ea_bits_len, args.psid_offset, args.psid_len) +if format == 'xml': + xml_footer_print() diff --git a/src/vnet/map/examples/health_check.c b/src/vnet/map/examples/health_check.c new file mode 100644 index 00000000..5f0d85fe --- /dev/null +++ b/src/vnet/map/examples/health_check.c @@ -0,0 +1,109 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static void +usage (void) { + fprintf(stderr, + "Usage: health_check" + " -d debug" + " -I interface" + "\n"); + exit(2); +} + +int +main (int argc, char **argv) +{ + int sd, ch; + uint8_t *opt, *pkt; + struct ifreq ifr; + char *interface = NULL; + bool debug = false; + + while ((ch = getopt(argc, argv, "h?" "I:" "d")) != EOF) { + switch(ch) { + case 'I': + interface = optarg; + break; + case 'd': + debug = true; + break; + default: + usage(); + break; + } + } + + argc -= optind; + argv += optind; + + if (!interface) + usage(); + + /* Request a socket descriptor sd. */ + if ((sd = socket (AF_INET6, SOCK_RAW, IPPROTO_IPIP)) < 0) { + perror ("Failed to get socket descriptor "); + exit (EXIT_FAILURE); + } + + memset(&ifr, 0, sizeof(ifr)); + snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", interface); + + /* Bind socket to interface of this node. */ + if (setsockopt (sd, SOL_SOCKET, SO_BINDTODEVICE, (void *) &ifr, sizeof (ifr)) < 0) { + perror ("SO_BINDTODEVICE failed"); + exit (EXIT_FAILURE); + } + if (debug) printf("Binding to interface %s\n", interface); + + while (1) { + struct sockaddr_in6 src_addr; + socklen_t addrlen = sizeof(src_addr); + char source[INET6_ADDRSTRLEN+1]; + int len; + uint8_t inpack[IP_MAXPACKET]; + + if ((len = recvfrom(sd, inpack, sizeof(inpack), 0, (struct sockaddr *)&src_addr, &addrlen)) < 0) { + perror("recvfrom failed "); + } + if (inet_ntop(AF_INET6, &src_addr.sin6_addr, source, INET6_ADDRSTRLEN) == NULL) { + perror("inet_ntop() failed."); + exit(EXIT_FAILURE); + } + + /* Reply */ + struct iphdr *ip = (struct iphdr *)inpack; + uint32_t saddr; + struct icmphdr *icmp; + + saddr = ip->saddr; + ip->saddr = ip->daddr; + ip->daddr = saddr; + + switch (ip->protocol) { + case 1: + if (debug) printf ("ICMP Echo request from %s\n", source); + icmp = (struct icmphdr *)&ip[1]; + icmp->type = ICMP_ECHOREPLY; + break; + default: + fprintf(stderr, "Unsupported protocol %d", ip->protocol); + } + if (len = sendto(sd, inpack, len, 0, (struct sockaddr *)&src_addr, addrlen) < 0) { + perror("sendto failed "); + } + } + + close (sd); + + return (EXIT_SUCCESS); +} diff --git a/src/vnet/map/examples/test_map.py b/src/vnet/map/examples/test_map.py new file mode 100755 index 00000000..21388d49 --- /dev/null +++ b/src/vnet/map/examples/test_map.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python + +import time,argparse,sys,cmd, unittest +from ipaddress import * + +parser = argparse.ArgumentParser(description='VPP MAP test') +parser.add_argument('-i', nargs='*', action="store", dest="inputdir") +args = parser.parse_args() + +for dir in args.inputdir: + sys.path.append(dir) +from vpp_papi import * + +# +# 1:1 Shared IPv4 address, shared BR (16) VPP CLI +# +def lw46_shared(ip4_pfx_str, ip6_pfx_str, ip6_src_str, ea_bits_len, psid_offset, psid_len, ip6_src_ecmp = False): + ip4_pfx = ip_network(ip4_pfx_str) + ip6_src = ip_address(ip6_src_str) + ip6_dst = ip_network(ip6_pfx_str) + ip6_nul = IPv6Address(u'0::0') + mod = ip4_pfx.num_addresses / 1024 + + for i in range(ip4_pfx.num_addresses): + a = time.clock() + t = map_add_domain(0, ip6_nul.packed, ip4_pfx[i].packed, ip6_src.packed, 0, 32, 128, ea_bits_len, psid_offset, psid_len, 0, 0) + #print "Return from map_add_domain", t + if t == None: + print "map_add_domain failed" + continue + if t.retval != 0: + print "map_add_domain failed", t + continue + for psid in range(0x1 << int(psid_len)): + r = map_add_del_rule(0, t.index, 1, (ip6_dst[(i * (0x1<H', msg[0:2]) + size = unpack('>H', msg[2:4]) + print "Received", id, "of size", size + i += 1 + #del msg + continue + + #time.sleep(0.001) + return + +# Create RX thread +rxthread = RXThread() +rxthread.setDaemon(True) + +print "Connect", connect_to_vpe("client124") +import timeit +rxthread.start() +print "After thread started" + +#pneum_kill_thread() +print "After thread killed" + +#t = show_version(0) +#print "Result from show version", t + +print timeit.timeit('t = show_version(0)', number=1000, setup="from __main__ import show_version") +time.sleep(10) +#print timeit.timeit('control_ping(0)', number=10, setup="from __main__ import control_ping") + + +disconnect_from_vpe() +sys.exit() + + +print t.program, t.version,t.builddate,t.builddirectory + +''' + +t = map_domain_dump(0) +if not t: + print('show map domain failed') + +for d in t: + print("IP6 prefix:",str(IPv6Address(d.ip6prefix))) + print( "IP4 prefix:",str(IPv4Address(d.ip4prefix))) +''' + +suite = unittest.TestLoader().loadTestsFromTestCase(TestMAP) +unittest.TextTestRunner(verbosity=2).run(suite) + +disconnect_from_vpe() + + diff --git a/src/vnet/map/gen-rules.py b/src/vnet/map/gen-rules.py new file mode 100755 index 00000000..533a8e23 --- /dev/null +++ b/src/vnet/map/gen-rules.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python + +# Copyright (c) 2015 Cisco and/or its affiliates. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import ipaddress +import argparse +import sys + +# map add domain ip4-pfx ip6-pfx ::/0 ip6-src ea-bits-len 0 psid-offset 6 psid-len 6 +# map add rule index <0> psid ip6-dst + +parser = argparse.ArgumentParser(description='MAP VPP configuration generator') +parser.add_argument('-t', action="store", dest="mapmode") +args = parser.parse_args() + +# +# 1:1 Shared IPv4 address, shared BR +# +def shared11br(): + ip4_pfx = ipaddress.ip_network('20.0.0.0/16') + ip6_dst = ipaddress.ip_network('bbbb::/32') + psid_len = 6 + for i in range(ip4_pfx.num_addresses): + print("map add domain ip4-pfx " + str(ip4_pfx[i]) + "/32 ip6-pfx ::/0 ip6-shared-src cccc:bbbb::1", + "ea-bits-len 0 psid-offset 6 psid-len", psid_len) + for psid in range(0x1 << psid_len): + print("map add rule index", i, "psid", psid, "ip6-dst", ip6_dst[(i * (0x1<map_domain_index, t->port, + t->cached ? "cached" : "forwarded"); +} + +/* + * ip4_map_get_port + */ +u16 +ip4_map_get_port (ip4_header_t * ip, map_dir_e dir) +{ + /* Find port information */ + if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || + (ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (ip + 1); + return (dir == MAP_SENDER ? udp->src_port : udp->dst_port); + } + else if (ip->protocol == IP_PROTOCOL_ICMP) + { + /* + * 1) ICMP Echo request or Echo reply + * 2) ICMP Error with inner packet being UDP or TCP + * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply + */ + icmp46_header_t *icmp = (void *) (ip + 1); + if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) + { + return *((u16 *) (icmp + 1)); + } + else if (clib_net_to_host_u16 (ip->length) >= 56) + { // IP + ICMP + IP + L4 header + ip4_header_t *icmp_ip = (ip4_header_t *) (icmp + 2); + if (PREDICT_TRUE ((icmp_ip->protocol == IP_PROTOCOL_TCP) || + (icmp_ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (icmp_ip + 1); + return (dir == MAP_SENDER ? udp->dst_port : udp->src_port); + } + else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) + { + icmp46_header_t *inner_icmp = (void *) (icmp_ip + 1); + if (inner_icmp->type == ICMP4_echo_request + || inner_icmp->type == ICMP4_echo_reply) + return (*((u16 *) (inner_icmp + 1))); + } + } + } + return (0); +} + +static_always_inline u16 +ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip, + u32 * next, u8 * error) +{ + u16 port = 0; + + if (d->psid_length > 0) + { + if (ip4_get_fragment_offset (ip) == 0) + { + if (PREDICT_FALSE + ((ip->ip_version_and_header_length != 0x45) + || clib_host_to_net_u16 (ip->length) < 28)) + { + return 0; + } + port = ip4_map_get_port (ip, MAP_RECEIVER); + if (port) + { + /* Verify that port is not among the well-known ports */ + if ((d->psid_offset > 0) + && (clib_net_to_host_u16 (port) < + (0x1 << (16 - d->psid_offset)))) + { + *error = MAP_ERROR_ENCAP_SEC_CHECK; + } + else + { + if (ip4_get_fragment_more (ip)) + *next = IP4_MAP_NEXT_REASS; + return (port); + } + } + else + { + *error = MAP_ERROR_BAD_PROTOCOL; + } + } + else + { + *next = IP4_MAP_NEXT_REASS; + } + } + return (0); +} + +/* + * ip4_map_vtcfl + */ +static_always_inline u32 +ip4_map_vtcfl (ip4_header_t * ip4, vlib_buffer_t * p) +{ + map_main_t *mm = &map_main; + u8 tc = mm->tc_copy ? ip4->tos : mm->tc; + u32 vtcfl = 0x6 << 28; + vtcfl |= tc << 20; + vtcfl |= vnet_buffer (p)->ip.flow_hash & 0x000fffff; + + return (clib_host_to_net_u32 (vtcfl)); +} + +static_always_inline bool +ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip) +{ +#ifdef MAP_SKIP_IP6_LOOKUP + map_main_t *mm = &map_main; + u32 adj_index0 = mm->adj6_index; + if (adj_index0 > 0) + { + ip_lookup_main_t *lm6 = &ip6_main.lookup_main; + ip_adjacency_t *adj = ip_get_adjacency (lm6, mm->adj6_index); + if (adj->n_adj > 1) + { + u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT); + adj_index0 += (hash_c0 & (adj->n_adj - 1)); + } + vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0; + return (true); + } +#endif + return (false); +} + +/* + * ip4_map_ttl + */ +static inline void +ip4_map_decrement_ttl (ip4_header_t * ip, u8 * error) +{ + i32 ttl = ip->ttl; + + /* Input node should have reject packets with ttl 0. */ + ASSERT (ip->ttl > 0); + + u32 checksum = ip->checksum + clib_host_to_net_u16 (0x0100); + checksum += checksum >= 0xffff; + ip->checksum = checksum; + ttl -= 1; + ip->ttl = ttl; + *error = ttl <= 0 ? IP4_ERROR_TIME_EXPIRED : *error; + + /* Verify checksum. */ + ASSERT (ip->checksum == ip4_header_checksum (ip)); +} + +static u32 +ip4_map_fragment (vlib_buffer_t * b, u16 mtu, bool df, u8 * error) +{ + map_main_t *mm = &map_main; + + if (mm->frag_inner) + { + ip_frag_set_vnet_buffer (b, sizeof (ip6_header_t), mtu, + IP4_FRAG_NEXT_IP6_LOOKUP, + IP_FRAG_FLAG_IP6_HEADER); + return (IP4_MAP_NEXT_IP4_FRAGMENT); + } + else + { + if (df && !mm->frag_ignore_df) + { + icmp4_error_set_vnet_buffer (b, ICMP4_destination_unreachable, + ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set, + mtu); + vlib_buffer_advance (b, sizeof (ip6_header_t)); + *error = MAP_ERROR_DF_SET; + return (IP4_MAP_NEXT_ICMP_ERROR); + } + ip_frag_set_vnet_buffer (b, 0, mtu, IP6_FRAG_NEXT_IP6_LOOKUP, + IP_FRAG_FLAG_IP6_HEADER); + return (IP4_MAP_NEXT_IP6_FRAGMENT); + } +} + +/* + * ip4_map + */ +static uword +ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_node.index); + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + map_main_t *mm = &map_main; + vlib_combined_counter_main_t *cm = mm->domain_counters; + u32 cpu_index = os_get_cpu_number (); + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Dual loop */ + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + map_domain_t *d0, *d1; + u8 error0 = MAP_ERROR_NONE, error1 = MAP_ERROR_NONE; + ip4_header_t *ip40, *ip41; + u16 port0 = 0, port1 = 0; + ip6_header_t *ip6h0, *ip6h1; + u32 map_domain_index0 = ~0, map_domain_index1 = ~0; + u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP, next1 = + IP4_MAP_NEXT_IP6_LOOKUP; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p2, *p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, STORE); + vlib_prefetch_buffer_header (p3, STORE); + /* IPv4 + 8 = 28. possibly plus -40 */ + CLIB_PREFETCH (p2->data - 40, 68, STORE); + CLIB_PREFETCH (p3->data - 40, 68, STORE); + } + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip40 = vlib_buffer_get_current (p0); + ip41 = vlib_buffer_get_current (p1); + d0 = + ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + &map_domain_index0); + d1 = + ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], + &map_domain_index1); + ASSERT (d0); + ASSERT (d1); + + /* + * Shared IPv4 address + */ + port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0); + port1 = ip4_map_port_and_security_check (d1, ip41, &next1, &error1); + + /* Decrement IPv4 TTL */ + ip4_map_decrement_ttl (ip40, &error0); + ip4_map_decrement_ttl (ip41, &error1); + bool df0 = + ip40->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT); + bool df1 = + ip41->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT); + + /* MAP calc */ + u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32); + u32 da41 = clib_net_to_host_u32 (ip41->dst_address.as_u32); + u16 dp40 = clib_net_to_host_u16 (port0); + u16 dp41 = clib_net_to_host_u16 (port1); + u64 dal60 = map_get_pfx (d0, da40, dp40); + u64 dal61 = map_get_pfx (d1, da41, dp41); + u64 dar60 = map_get_sfx (d0, da40, dp40); + u64 dar61 = map_get_sfx (d1, da41, dp41); + if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE + && next0 != IP4_MAP_NEXT_REASS) + error0 = MAP_ERROR_NO_BINDING; + if (dal61 == 0 && dar61 == 0 && error1 == MAP_ERROR_NONE + && next1 != IP4_MAP_NEXT_REASS) + error1 = MAP_ERROR_NO_BINDING; + + /* construct ipv6 header */ + vlib_buffer_advance (p0, -sizeof (ip6_header_t)); + vlib_buffer_advance (p1, -sizeof (ip6_header_t)); + ip6h0 = vlib_buffer_get_current (p0); + ip6h1 = vlib_buffer_get_current (p1); + vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + ip6h0->ip_version_traffic_class_and_flow_label = + ip4_map_vtcfl (ip40, p0); + ip6h1->ip_version_traffic_class_and_flow_label = + ip4_map_vtcfl (ip41, p1); + ip6h0->payload_length = ip40->length; + ip6h1->payload_length = ip41->length; + ip6h0->protocol = IP_PROTOCOL_IP_IN_IP; + ip6h1->protocol = IP_PROTOCOL_IP_IN_IP; + ip6h0->hop_limit = 0x40; + ip6h1->hop_limit = 0x40; + ip6h0->src_address = d0->ip6_src; + ip6h1->src_address = d1->ip6_src; + ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60); + ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60); + ip6h1->dst_address.as_u64[0] = clib_host_to_net_u64 (dal61); + ip6h1->dst_address.as_u64[1] = clib_host_to_net_u64 (dar61); + + /* + * Determine next node. Can be one of: + * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop + */ + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) + { + if (PREDICT_FALSE + (d0->mtu + && (clib_net_to_host_u16 (ip6h0->payload_length) + + sizeof (*ip6h0) > d0->mtu))) + { + next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0); + } + else + { + next0 = + ip4_map_ip6_lookup_bypass (p0, + ip40) ? + IP4_MAP_NEXT_IP6_REWRITE : next0; + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + map_domain_index0, 1, + clib_net_to_host_u16 + (ip6h0->payload_length) + + 40); + } + } + else + { + next0 = IP4_MAP_NEXT_DROP; + } + + /* + * Determine next node. Can be one of: + * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop + */ + if (PREDICT_TRUE (error1 == MAP_ERROR_NONE)) + { + if (PREDICT_FALSE + (d1->mtu + && (clib_net_to_host_u16 (ip6h1->payload_length) + + sizeof (*ip6h1) > d1->mtu))) + { + next1 = ip4_map_fragment (p1, d1->mtu, df1, &error1); + } + else + { + next1 = + ip4_map_ip6_lookup_bypass (p1, + ip41) ? + IP4_MAP_NEXT_IP6_REWRITE : next1; + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + map_domain_index1, 1, + clib_net_to_host_u16 + (ip6h1->payload_length) + + 40); + } + } + else + { + next1 = IP4_MAP_NEXT_DROP; + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = port0; + } + if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr)); + tr->map_domain_index = map_domain_index1; + tr->port = port1; + } + + p0->error = error_node->errors[error0]; + p1->error = error_node->errors[error1]; + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, next0, + next1); + } + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + map_domain_t *d0; + u8 error0 = MAP_ERROR_NONE; + ip4_header_t *ip40; + u16 port0 = 0; + ip6_header_t *ip6h0; + u32 next0 = IP4_MAP_NEXT_IP6_LOOKUP; + u32 map_domain_index0 = ~0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip40 = vlib_buffer_get_current (p0); + d0 = + ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + &map_domain_index0); + ASSERT (d0); + + /* + * Shared IPv4 address + */ + port0 = ip4_map_port_and_security_check (d0, ip40, &next0, &error0); + + /* Decrement IPv4 TTL */ + ip4_map_decrement_ttl (ip40, &error0); + bool df0 = + ip40->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT); + + /* MAP calc */ + u32 da40 = clib_net_to_host_u32 (ip40->dst_address.as_u32); + u16 dp40 = clib_net_to_host_u16 (port0); + u64 dal60 = map_get_pfx (d0, da40, dp40); + u64 dar60 = map_get_sfx (d0, da40, dp40); + if (dal60 == 0 && dar60 == 0 && error0 == MAP_ERROR_NONE + && next0 != IP4_MAP_NEXT_REASS) + error0 = MAP_ERROR_NO_BINDING; + + /* construct ipv6 header */ + vlib_buffer_advance (p0, -(sizeof (ip6_header_t))); + ip6h0 = vlib_buffer_get_current (p0); + vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0; + + ip6h0->ip_version_traffic_class_and_flow_label = + ip4_map_vtcfl (ip40, p0); + ip6h0->payload_length = ip40->length; + ip6h0->protocol = IP_PROTOCOL_IP_IN_IP; + ip6h0->hop_limit = 0x40; + ip6h0->src_address = d0->ip6_src; + ip6h0->dst_address.as_u64[0] = clib_host_to_net_u64 (dal60); + ip6h0->dst_address.as_u64[1] = clib_host_to_net_u64 (dar60); + + /* + * Determine next node. Can be one of: + * ip6-lookup, ip6-rewrite, ip4-fragment, ip4-virtreass, error-drop + */ + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) + { + if (PREDICT_FALSE + (d0->mtu + && (clib_net_to_host_u16 (ip6h0->payload_length) + + sizeof (*ip6h0) > d0->mtu))) + { + next0 = ip4_map_fragment (p0, d0->mtu, df0, &error0); + } + else + { + next0 = + ip4_map_ip6_lookup_bypass (p0, + ip40) ? + IP4_MAP_NEXT_IP6_REWRITE : next0; + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + map_domain_index0, 1, + clib_net_to_host_u16 + (ip6h0->payload_length) + + 40); + } + } + else + { + next0 = IP4_MAP_NEXT_DROP; + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = port0; + } + + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return frame->n_vectors; +} + +/* + * ip4_map_reass + */ +static uword +ip4_map_reass (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_reass_node.index); + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + map_main_t *mm = &map_main; + vlib_combined_counter_main_t *cm = mm->domain_counters; + u32 cpu_index = os_get_cpu_number (); + u32 *fragments_to_drop = NULL; + u32 *fragments_to_loopback = NULL; + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + map_domain_t *d0; + u8 error0 = MAP_ERROR_NONE; + ip4_header_t *ip40; + i32 port0 = 0; + ip6_header_t *ip60; + u32 next0 = IP4_MAP_REASS_NEXT_IP6_LOOKUP; + u32 map_domain_index0; + u8 cached = 0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + ip40 = (ip4_header_t *) (ip60 + 1); + d0 = + ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + &map_domain_index0); + + map_ip4_reass_lock (); + map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32, + ip40->dst_address.as_u32, + ip40->fragment_id, + ip40->protocol, + &fragments_to_drop); + if (PREDICT_FALSE (!r)) + { + // Could not create a caching entry + error0 = MAP_ERROR_FRAGMENT_MEMORY; + } + else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40))) + { + if (r->port >= 0) + { + // We know the port already + port0 = r->port; + } + else if (map_ip4_reass_add_fragment (r, pi0)) + { + // Not enough space for caching + error0 = MAP_ERROR_FRAGMENT_MEMORY; + map_ip4_reass_free (r, &fragments_to_drop); + } + else + { + cached = 1; + } + } + else + if ((port0 = + ip4_get_port (ip40, MAP_RECEIVER, p0->current_length)) < 0) + { + // Could not find port. We'll free the reassembly. + error0 = MAP_ERROR_BAD_PROTOCOL; + port0 = 0; + map_ip4_reass_free (r, &fragments_to_drop); + } + else + { + r->port = port0; + map_ip4_reass_get_fragments (r, &fragments_to_loopback); + } + +#ifdef MAP_IP4_REASS_COUNT_BYTES + if (!cached && r) + { + r->forwarded += clib_host_to_net_u16 (ip40->length) - 20; + if (!ip4_get_fragment_more (ip40)) + r->expected_total = + ip4_get_fragment_offset (ip40) * 8 + + clib_host_to_net_u16 (ip40->length) - 20; + if (r->forwarded >= r->expected_total) + map_ip4_reass_free (r, &fragments_to_drop); + } +#endif + + map_ip4_reass_unlock (); + + // NOTE: Most operations have already been performed by ip4_map + // All we need is the right destination address + ip60->dst_address.as_u64[0] = + map_get_pfx_net (d0, ip40->dst_address.as_u32, port0); + ip60->dst_address.as_u64[1] = + map_get_sfx_net (d0, ip40->dst_address.as_u32, port0); + + if (PREDICT_FALSE + (d0->mtu + && (clib_net_to_host_u16 (ip60->payload_length) + + sizeof (*ip60) > d0->mtu))) + { + vnet_buffer (p0)->ip_frag.header_offset = sizeof (*ip60); + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP6_LOOKUP; + vnet_buffer (p0)->ip_frag.mtu = d0->mtu; + vnet_buffer (p0)->ip_frag.flags = IP_FRAG_FLAG_IP6_HEADER; + next0 = IP4_MAP_REASS_NEXT_IP4_FRAGMENT; + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_ip4_map_reass_trace_t *tr = + vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = port0; + tr->cached = cached; + } + + if (cached) + { + //Dequeue the packet + n_left_to_next++; + to_next--; + } + else + { + if (error0 == MAP_ERROR_NONE) + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, map_domain_index0, + 1, + clib_net_to_host_u16 + (ip60->payload_length) + 40); + next0 = + (error0 == MAP_ERROR_NONE) ? next0 : IP4_MAP_REASS_NEXT_DROP; + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } + + //Loopback when we reach the end of the inpu vector + if (n_left_from == 0 && vec_len (fragments_to_loopback)) + { + from = vlib_frame_vector_args (frame); + u32 len = vec_len (fragments_to_loopback); + if (len <= VLIB_FRAME_SIZE) + { + clib_memcpy (from, fragments_to_loopback, + sizeof (u32) * len); + n_left_from = len; + vec_reset_length (fragments_to_loopback); + } + else + { + clib_memcpy (from, + fragments_to_loopback + (len - + VLIB_FRAME_SIZE), + sizeof (u32) * VLIB_FRAME_SIZE); + n_left_from = VLIB_FRAME_SIZE; + _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE; + } + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + map_send_all_to_node (vm, fragments_to_drop, node, + &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED], + IP4_MAP_REASS_NEXT_DROP); + + vec_free (fragments_to_drop); + vec_free (fragments_to_loopback); + return frame->n_vectors; +} + +static char *map_error_strings[] = { +#define _(sym,string) string, + foreach_map_error +#undef _ +}; + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_node) = { + .function = ip4_map, + .name = "ip4-map", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + + .n_next_nodes = IP4_MAP_N_NEXT, + .next_nodes = { + [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup", +#ifdef MAP_SKIP_IP6_LOOKUP + [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite", +#endif + [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag", + [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag", + [IP4_MAP_NEXT_REASS] = "ip4-map-reass", + [IP4_MAP_NEXT_ICMP_ERROR] = "ip4-icmp-error", + [IP4_MAP_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_reass_node) = { + .function = ip4_map_reass, + .name = "ip4-map-reass", + .vector_size = sizeof(u32), + .format_trace = format_ip4_map_reass_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + + .n_next_nodes = IP4_MAP_REASS_N_NEXT, + .next_nodes = { + [IP4_MAP_REASS_NEXT_IP6_LOOKUP] = "ip6-lookup", + [IP4_MAP_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag", + [IP4_MAP_REASS_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c new file mode 100644 index 00000000..15974d8a --- /dev/null +++ b/src/vnet/map/ip4_map_t.c @@ -0,0 +1,1363 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "map.h" + +#include "../ip/ip_frag.h" + +#define IP4_MAP_T_DUAL_LOOP 1 + +typedef enum +{ + IP4_MAPT_NEXT_MAPT_TCP_UDP, + IP4_MAPT_NEXT_MAPT_ICMP, + IP4_MAPT_NEXT_MAPT_FRAGMENTED, + IP4_MAPT_NEXT_DROP, + IP4_MAPT_N_NEXT +} ip4_mapt_next_t; + +typedef enum +{ + IP4_MAPT_ICMP_NEXT_IP6_LOOKUP, + IP4_MAPT_ICMP_NEXT_IP6_FRAG, + IP4_MAPT_ICMP_NEXT_DROP, + IP4_MAPT_ICMP_N_NEXT +} ip4_mapt_icmp_next_t; + +typedef enum +{ + IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP, + IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG, + IP4_MAPT_TCP_UDP_NEXT_DROP, + IP4_MAPT_TCP_UDP_N_NEXT +} ip4_mapt_tcp_udp_next_t; + +typedef enum +{ + IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP, + IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG, + IP4_MAPT_FRAGMENTED_NEXT_DROP, + IP4_MAPT_FRAGMENTED_N_NEXT +} ip4_mapt_fragmented_next_t; + +//This is used to pass information within the buffer data. +//Buffer structure being too small to contain big structures like this. +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + ip6_address_t daddr; + ip6_address_t saddr; + //IPv6 header + Fragmentation header will be here + //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4) + u8 unused[28]; +}) ip4_mapt_pseudo_header_t; +/* *INDENT-ON* */ + +#define frag_id_4to6(id) (id) + +//TODO: Find the right place in memory for this. +/* *INDENT-OFF* */ +static u8 icmp_to_icmp6_updater_pointer_table[] = + { 0, 1, 4, 4, ~0, + ~0, ~0, ~0, 7, 6, + ~0, ~0, 8, 8, 8, + 8, 24, 24, 24, 24 + }; +/* *INDENT-ON* */ + + +static_always_inline int +ip4_map_fragment_cache (ip4_header_t * ip4, u16 port) +{ + u32 *ignore = NULL; + map_ip4_reass_lock (); + map_ip4_reass_t *r = + map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32, + ip4->fragment_id, + (ip4->protocol == + IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol, + &ignore); + if (r) + r->port = port; + + map_ip4_reass_unlock (); + return !r; +} + +static_always_inline i32 +ip4_map_fragment_get_port (ip4_header_t * ip4) +{ + u32 *ignore = NULL; + map_ip4_reass_lock (); + map_ip4_reass_t *r = + map_ip4_reass_get (ip4->src_address.as_u32, ip4->dst_address.as_u32, + ip4->fragment_id, + (ip4->protocol == + IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol, + &ignore); + i32 ret = r ? r->port : -1; + map_ip4_reass_unlock (); + return ret; +} + + +/* Statelessly translates an ICMP packet into ICMPv6. + * + * Warning: The checksum will need to be recomputed. + * + */ +static_always_inline int +ip4_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len, + i32 * receiver_port, ip4_header_t ** inner_ip4) +{ + *inner_ip4 = NULL; + switch (icmp->type) + { + case ICMP4_echo_reply: + *receiver_port = ((u16 *) icmp)[2]; + icmp->type = ICMP6_echo_reply; + break; + case ICMP4_echo_request: + *receiver_port = ((u16 *) icmp)[2]; + icmp->type = ICMP6_echo_request; + break; + case ICMP4_destination_unreachable: + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); + + switch (icmp->code) + { + case ICMP4_destination_unreachable_destination_unreachable_net: //0 + case ICMP4_destination_unreachable_destination_unreachable_host: //1 + icmp->type = ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_no_route_to_destination; + break; + case ICMP4_destination_unreachable_protocol_unreachable: //2 + icmp->type = ICMP6_parameter_problem; + icmp->code = ICMP6_parameter_problem_unrecognized_next_header; + break; + case ICMP4_destination_unreachable_port_unreachable: //3 + icmp->type = ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_port_unreachable; + break; + case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4 + icmp->type = + ICMP6_packet_too_big; + icmp->code = 0; + { + u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + if (advertised_mtu) + advertised_mtu += 20; + else + advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value) + + //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20) + *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu); + } + break; + + case ICMP4_destination_unreachable_source_route_failed: //5 + case ICMP4_destination_unreachable_destination_network_unknown: //6 + case ICMP4_destination_unreachable_destination_host_unknown: //7 + case ICMP4_destination_unreachable_source_host_isolated: //8 + case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11 + case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12 + icmp->type = + ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_no_route_to_destination; + break; + case ICMP4_destination_unreachable_network_administratively_prohibited: //9 + case ICMP4_destination_unreachable_host_administratively_prohibited: //10 + case ICMP4_destination_unreachable_communication_administratively_prohibited: //13 + case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15 + icmp->type = ICMP6_destination_unreachable; + icmp->code = + ICMP6_destination_unreachable_destination_administratively_prohibited; + break; + case ICMP4_destination_unreachable_host_precedence_violation: //14 + default: + return -1; + } + break; + + case ICMP4_time_exceeded: //11 + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); + icmp->type = ICMP6_time_exceeded; + //icmp->code = icmp->code //unchanged + break; + + case ICMP4_parameter_problem: + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); + + switch (icmp->code) + { + case ICMP4_parameter_problem_pointer_indicates_error: + case ICMP4_parameter_problem_bad_length: + icmp->type = ICMP6_parameter_problem; + icmp->code = ICMP6_parameter_problem_erroneous_header_field; + { + u8 ptr = + icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))]; + if (ptr == 0xff) + return -1; + + *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr); + } + break; + default: + //All other codes cause dropping the packet + return -1; + } + break; + + default: + //All other types cause dropping the packet + return -1; + break; + } + return 0; +} + +static_always_inline void +_ip4_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error) +{ + ip4_header_t *ip4, *inner_ip4; + ip6_header_t *ip6, *inner_ip6; + u32 ip_len; + icmp46_header_t *icmp; + i32 recv_port; + ip_csum_t csum; + u16 *inner_L4_checksum = 0; + ip6_frag_hdr_t *inner_frag; + u32 inner_frag_id; + u32 inner_frag_offset; + u8 inner_frag_more; + + ip4 = vlib_buffer_get_current (p); + ip_len = clib_net_to_host_u16 (ip4->length); + ASSERT (ip_len <= p->current_length); + + icmp = (icmp46_header_t *) (ip4 + 1); + if (ip4_icmp_to_icmp6_in_place (icmp, ip_len - sizeof (*ip4), + &recv_port, &inner_ip4)) + { + *error = MAP_ERROR_ICMP; + return; + } + + if (recv_port < 0) + { + // In case of 1:1 mapping, we don't care about the port + if (d->ea_bits_len == 0 && d->rules) + { + recv_port = 0; + } + else + { + *error = MAP_ERROR_ICMP; + return; + } + } + + if (inner_ip4) + { + //We have 2 headers to translate. + //We need to make some room in the middle of the packet + + if (PREDICT_FALSE (ip4_is_fragment (inner_ip4))) + { + //Here it starts getting really tricky + //We will add a fragmentation header in the inner packet + + if (!ip4_is_first_fragment (inner_ip4)) + { + //For now we do not handle unless it is the first fragment + //Ideally we should handle the case as we are in slow path already + *error = MAP_ERROR_FRAGMENTED; + return; + } + + vlib_buffer_advance (p, + -2 * (sizeof (*ip6) - sizeof (*ip4)) - + sizeof (*inner_frag)); + ip6 = vlib_buffer_get_current (p); + clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, + 20 + 8); + ip4 = + (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + icmp = (icmp46_header_t *) (ip4 + 1); + + inner_ip6 = + (ip6_header_t *) u8_ptr_add (inner_ip4, + sizeof (*ip4) - sizeof (*ip6) - + sizeof (*inner_frag)); + inner_frag = + (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6)); + ip6->payload_length = + u16_net_add (ip4->length, + sizeof (*ip6) - 2 * sizeof (*ip4) + + sizeof (*inner_frag)); + inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id); + inner_frag_offset = ip4_get_fragment_offset (inner_ip4); + inner_frag_more = + ! !(inner_ip4->flags_and_fragment_offset & + clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)); + } + else + { + vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4))); + ip6 = vlib_buffer_get_current (p); + clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, + 20 + 8); + ip4 = + (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4)); + inner_ip6 = + (ip6_header_t *) u8_ptr_add (inner_ip4, + sizeof (*ip4) - sizeof (*ip6)); + ip6->payload_length = + u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4)); + inner_frag = NULL; + } + + if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP)) + { + inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum; + *inner_L4_checksum = + ip_csum_fold (ip_csum_sub_even + (*inner_L4_checksum, + *((u64 *) (&inner_ip4->src_address)))); + } + else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP)) + { + inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum; + if (!*inner_L4_checksum) + { + //The inner packet was first translated, and therefore came from IPv6. + //As the packet was an IPv6 packet, the UDP checksum can't be NULL + *error = MAP_ERROR_ICMP; + return; + } + *inner_L4_checksum = + ip_csum_fold (ip_csum_sub_even + (*inner_L4_checksum, + *((u64 *) (&inner_ip4->src_address)))); + } + else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) + { + //We have an ICMP inside an ICMP + //It needs to be translated, but not for error ICMP messages + icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1); + csum = inner_icmp->checksum; + //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place + csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); + inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ? + ICMP6_echo_request : ICMP6_echo_reply; + csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); + csum = + ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); + csum = + ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4)); + inner_icmp->checksum = ip_csum_fold (csum); + inner_L4_checksum = &inner_icmp->checksum; + inner_ip4->protocol = IP_PROTOCOL_ICMP6; + } + else + { + /* To shut up Coverity */ + os_panic (); + } + + //FIXME: Security check with the port found in the inner packet + + csum = *inner_L4_checksum; //Initial checksum of the inner L4 header + //FIXME: Shouldn't we remove ip addresses from there ? + + inner_ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20)); + inner_ip6->payload_length = + u16_net_add (inner_ip4->length, -sizeof (*inner_ip4)); + inner_ip6->hop_limit = inner_ip4->ttl; + inner_ip6->protocol = inner_ip4->protocol; + + //Note that the source address is within the domain + //while the destination address is the one outside the domain + ip4_map_t_embedded_address (d, &inner_ip6->dst_address, + &inner_ip4->dst_address); + inner_ip6->src_address.as_u64[0] = + map_get_pfx_net (d, inner_ip4->src_address.as_u32, recv_port); + inner_ip6->src_address.as_u64[1] = + map_get_sfx_net (d, inner_ip4->src_address.as_u32, recv_port); + + if (PREDICT_FALSE (inner_frag != NULL)) + { + inner_frag->next_hdr = inner_ip6->protocol; + inner_frag->identification = inner_frag_id; + inner_frag->rsv = 0; + inner_frag->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more); + inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + inner_ip6->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 + (inner_ip6->payload_length) + + sizeof (*inner_frag)); + } + + csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]); + csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]); + csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]); + csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]); + *inner_L4_checksum = ip_csum_fold (csum); + + } + else + { + vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6)); + ip6 = vlib_buffer_get_current (p); + ip6->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) - + sizeof (*ip4)); + } + + //Translate outer IPv6 + ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20)); + + ip6->hop_limit = ip4->ttl; + ip6->protocol = IP_PROTOCOL_ICMP6; + + ip4_map_t_embedded_address (d, &ip6->src_address, &ip4->src_address); + ip6->dst_address.as_u64[0] = + map_get_pfx_net (d, ip4->dst_address.as_u32, recv_port); + ip6->dst_address.as_u64[1] = + map_get_sfx_net (d, ip4->dst_address.as_u32, recv_port); + + //Truncate when the packet exceeds the minimal IPv6 MTU + if (p->current_length > 1280) + { + ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6)); + p->current_length = 1280; //Looks too simple to be correct... + } + + //TODO: We could do an easy diff-checksum for echo requests/replies + //Recompute ICMP checksum + icmp->checksum = 0; + csum = ip_csum_with_carry (0, ip6->payload_length); + csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol)); + csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]); + csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]); + csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]); + csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]); + csum = + ip_incremental_checksum (csum, icmp, + clib_net_to_host_u16 (ip6->payload_length)); + icmp->checksum = ~ip_csum_fold (csum); +} + +static uword +ip4_map_t_icmp (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index); + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + vlib_combined_counter_main_t *cm = map_main.domain_counters; + u32 cpu_index = os_get_cpu_number (); + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip4_mapt_icmp_next_t next0; + u8 error0; + map_domain_t *d0; + u16 len0; + + next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP; + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + error0 = MAP_ERROR_NONE; + + p0 = vlib_get_buffer (vm, pi0); + vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used + len0 = + clib_net_to_host_u16 (((ip4_header_t *) + vlib_buffer_get_current (p0))->length); + d0 = + pool_elt_at_index (map_main.domains, + vnet_buffer (p0)->map_t.map_domain_index); + _ip4_map_t_icmp (d0, p0, &error0); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG; + } + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + vnet_buffer (p0)->map_t. + map_domain_index, 1, len0); + } + else + { + next0 = IP4_MAPT_ICMP_NEXT_DROP; + } + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static uword +ip4_map_t_fragmented (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip4_header_t *ip40; + ip6_header_t *ip60; + ip6_frag_hdr_t *frag0; + ip4_mapt_pseudo_header_t *pheader0; + ip4_mapt_fragmented_next_t next0; + + next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP; + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + + //Accessing pseudo header + pheader0 = vlib_buffer_get_current (p0); + vlib_buffer_advance (p0, sizeof (*pheader0)); + + //Accessing ip4 header + ip40 = vlib_buffer_get_current (p0); + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip40, + sizeof (*ip40) - sizeof (*frag0)); + ip60 = + (ip6_header_t *) u8_ptr_add (ip40, + sizeof (*ip40) - sizeof (*frag0) - + sizeof (*ip60)); + vlib_buffer_advance (p0, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + + //We know that the protocol was one of ICMP, TCP or UDP + //because the first fragment was found and cached + frag0->next_hdr = + (ip40->protocol == + IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol; + frag0->identification = frag_id_4to6 (ip40->fragment_id); + frag0->rsv = 0; + frag0->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip40), + clib_net_to_host_u16 + (ip40->flags_and_fragment_offset) & + IP4_HEADER_FLAG_MORE_FRAGMENTS); + + ip60->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); + ip60->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 (ip40->length) - + sizeof (*ip40) + sizeof (*frag0)); + ip60->hop_limit = ip40->ttl; + ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; + ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; + ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; + ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static uword +ip4_map_t_tcp_udp (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + +#ifdef IP4_MAP_T_DUAL_LOOP + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + ip4_header_t *ip40, *ip41; + ip6_header_t *ip60, *ip61; + ip_csum_t csum0, csum1; + u16 *checksum0, *checksum1; + ip6_frag_hdr_t *frag0, *frag1; + u32 frag_id0, frag_id1; + ip4_mapt_pseudo_header_t *pheader0, *pheader1; + ip4_mapt_tcp_udp_next_t next0, next1; + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; + next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + + //Accessing pseudo header + pheader0 = vlib_buffer_get_current (p0); + pheader1 = vlib_buffer_get_current (p1); + vlib_buffer_advance (p0, sizeof (*pheader0)); + vlib_buffer_advance (p1, sizeof (*pheader1)); + + //Accessing ip4 header + ip40 = vlib_buffer_get_current (p0); + ip41 = vlib_buffer_get_current (p1); + checksum0 = + (u16 *) u8_ptr_add (ip40, + vnet_buffer (p0)->map_t.checksum_offset); + checksum1 = + (u16 *) u8_ptr_add (ip41, + vnet_buffer (p1)->map_t.checksum_offset); + + //UDP checksum is optional over IPv4 but mandatory for IPv6 + //We do not check udp->length sanity but use our safe computed value instead + if (PREDICT_FALSE + (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) + { + u16 udp_len = + clib_host_to_net_u16 (ip40->length) - sizeof (*ip40); + udp_header_t *udp = + (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40)); + ip_csum_t csum; + csum = ip_incremental_checksum (0, udp, udp_len); + csum = + ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); + csum = + ip_csum_with_carry (csum, + clib_host_to_net_u16 (IP_PROTOCOL_UDP)); + csum = + ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address))); + *checksum0 = ~ip_csum_fold (csum); + } + if (PREDICT_FALSE + (!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP)) + { + u16 udp_len = + clib_host_to_net_u16 (ip41->length) - sizeof (*ip40); + udp_header_t *udp = + (udp_header_t *) u8_ptr_add (ip41, sizeof (*ip40)); + ip_csum_t csum; + csum = ip_incremental_checksum (0, udp, udp_len); + csum = + ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); + csum = + ip_csum_with_carry (csum, + clib_host_to_net_u16 (IP_PROTOCOL_UDP)); + csum = + ip_csum_with_carry (csum, *((u64 *) (&ip41->src_address))); + *checksum1 = ~ip_csum_fold (csum); + } + + csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32); + csum1 = ip_csum_sub_even (*checksum1, ip41->src_address.as_u32); + csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32); + csum1 = ip_csum_sub_even (csum1, ip41->dst_address.as_u32); + + // Deal with fragmented packets + if (PREDICT_FALSE (ip40->flags_and_fragment_offset & + clib_host_to_net_u16 + (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + { + ip60 = + (ip6_header_t *) u8_ptr_add (ip40, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip40, + sizeof (*ip40) - + sizeof (*frag0)); + frag_id0 = frag_id_4to6 (ip40->fragment_id); + vlib_buffer_advance (p0, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + } + else + { + ip60 = + (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) - + sizeof (*ip60)); + vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60)); + frag0 = NULL; + } + + if (PREDICT_FALSE (ip41->flags_and_fragment_offset & + clib_host_to_net_u16 + (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + { + ip61 = + (ip6_header_t *) u8_ptr_add (ip41, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + frag1 = + (ip6_frag_hdr_t *) u8_ptr_add (ip41, + sizeof (*ip40) - + sizeof (*frag0)); + frag_id1 = frag_id_4to6 (ip41->fragment_id); + vlib_buffer_advance (p1, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + } + else + { + ip61 = + (ip6_header_t *) (((u8 *) ip41) + sizeof (*ip40) - + sizeof (*ip60)); + vlib_buffer_advance (p1, sizeof (*ip40) - sizeof (*ip60)); + frag1 = NULL; + } + + ip60->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); + ip61->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip41->tos << 20)); + ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40)); + ip61->payload_length = u16_net_add (ip41->length, -sizeof (*ip40)); + ip60->hop_limit = ip40->ttl; + ip61->hop_limit = ip41->ttl; + ip60->protocol = ip40->protocol; + ip61->protocol = ip41->protocol; + + if (PREDICT_FALSE (frag0 != NULL)) + { + frag0->next_hdr = ip60->protocol; + frag0->identification = frag_id0; + frag0->rsv = 0; + frag0->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (0, 1); + ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + ip60->payload_length = + u16_net_add (ip60->payload_length, sizeof (*frag0)); + } + + if (PREDICT_FALSE (frag1 != NULL)) + { + frag1->next_hdr = ip61->protocol; + frag1->identification = frag_id1; + frag1->rsv = 0; + frag1->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (0, 1); + ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + ip61->payload_length = + u16_net_add (ip61->payload_length, sizeof (*frag0)); + } + + //Finally copying the address + ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; + ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0]; + ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; + ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1]; + ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; + ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0]; + ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; + ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1]; + + csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]); + csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[0]); + csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]); + csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[1]); + csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]); + csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[0]); + csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]); + csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[1]); + *checksum0 = ip_csum_fold (csum0); + *checksum1 = ip_csum_fold (csum1); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } + + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; + next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, pi0, pi1, + next0, next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip4_header_t *ip40; + ip6_header_t *ip60; + ip_csum_t csum0; + u16 *checksum0; + ip6_frag_hdr_t *frag0; + u32 frag_id0; + ip4_mapt_pseudo_header_t *pheader0; + ip4_mapt_tcp_udp_next_t next0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP; + p0 = vlib_get_buffer (vm, pi0); + + //Accessing pseudo header + pheader0 = vlib_buffer_get_current (p0); + vlib_buffer_advance (p0, sizeof (*pheader0)); + + //Accessing ip4 header + ip40 = vlib_buffer_get_current (p0); + checksum0 = + (u16 *) u8_ptr_add (ip40, + vnet_buffer (p0)->map_t.checksum_offset); + + //UDP checksum is optional over IPv4 but mandatory for IPv6 + //We do not check udp->length sanity but use our safe computed value instead + if (PREDICT_FALSE + (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) + { + u16 udp_len = + clib_host_to_net_u16 (ip40->length) - sizeof (*ip40); + udp_header_t *udp = + (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40)); + ip_csum_t csum; + csum = ip_incremental_checksum (0, udp, udp_len); + csum = + ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); + csum = + ip_csum_with_carry (csum, + clib_host_to_net_u16 (IP_PROTOCOL_UDP)); + csum = + ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address))); + *checksum0 = ~ip_csum_fold (csum); + } + + csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32); + csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32); + + // Deal with fragmented packets + if (PREDICT_FALSE (ip40->flags_and_fragment_offset & + clib_host_to_net_u16 + (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + { + ip60 = + (ip6_header_t *) u8_ptr_add (ip40, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip40, + sizeof (*ip40) - + sizeof (*frag0)); + frag_id0 = frag_id_4to6 (ip40->fragment_id); + vlib_buffer_advance (p0, + sizeof (*ip40) - sizeof (*ip60) - + sizeof (*frag0)); + } + else + { + ip60 = + (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) - + sizeof (*ip60)); + vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60)); + frag0 = NULL; + } + + ip60->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); + ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40)); + ip60->hop_limit = ip40->ttl; + ip60->protocol = ip40->protocol; + + if (PREDICT_FALSE (frag0 != NULL)) + { + frag0->next_hdr = ip60->protocol; + frag0->identification = frag_id0; + frag0->rsv = 0; + frag0->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (0, 1); + ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + ip60->payload_length = + u16_net_add (ip60->payload_length, sizeof (*frag0)); + } + + //Finally copying the address + ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; + ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; + ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; + ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; + + csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]); + csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]); + csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]); + csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]); + *checksum0 = ip_csum_fold (csum0); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return frame->n_vectors; +} + +static_always_inline void +ip4_map_t_classify (vlib_buffer_t * p0, map_domain_t * d0, + ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0, + u8 * error0, ip4_mapt_next_t * next0) +{ + if (PREDICT_FALSE (ip4_get_fragment_offset (ip40))) + { + *next0 = IP4_MAPT_NEXT_MAPT_FRAGMENTED; + if (d0->ea_bits_len == 0 && d0->rules) + { + *dst_port0 = 0; + } + else + { + *dst_port0 = ip4_map_fragment_get_port (ip40); + *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0; + } + } + else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP)) + { + vnet_buffer (p0)->map_t.checksum_offset = 36; + *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP; + *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0; + *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2)); + } + else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP)) + { + vnet_buffer (p0)->map_t.checksum_offset = 26; + *next0 = IP4_MAPT_NEXT_MAPT_TCP_UDP; + *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0; + *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2)); + } + else if (ip40->protocol == IP_PROTOCOL_ICMP) + { + *next0 = IP4_MAPT_NEXT_MAPT_ICMP; + if (d0->ea_bits_len == 0 && d0->rules) + *dst_port0 = 0; + else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code + == ICMP4_echo_reply + || ((icmp46_header_t *) + u8_ptr_add (ip40, + sizeof (*ip40)))->code == ICMP4_echo_request) + *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6)); + } + else + { + *error0 = MAP_ERROR_BAD_PROTOCOL; + } +} + +static uword +ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_t_node.index); + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + vlib_combined_counter_main_t *cm = map_main.domain_counters; + u32 cpu_index = os_get_cpu_number (); + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + +#ifdef IP4_MAP_T_DUAL_LOOP + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + ip4_header_t *ip40, *ip41; + map_domain_t *d0, *d1; + ip4_mapt_next_t next0 = 0, next1 = 0; + u16 ip4_len0, ip4_len1; + u8 error0, error1; + i32 dst_port0, dst_port1; + ip4_mapt_pseudo_header_t *pheader0, *pheader1; + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + error0 = MAP_ERROR_NONE; + error1 = MAP_ERROR_NONE; + + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip40 = vlib_buffer_get_current (p0); + ip41 = vlib_buffer_get_current (p1); + ip4_len0 = clib_host_to_net_u16 (ip40->length); + ip4_len1 = clib_host_to_net_u16 (ip41->length); + + if (PREDICT_FALSE (p0->current_length < ip4_len0 || + ip40->ip_version_and_header_length != 0x45)) + { + error0 = MAP_ERROR_UNKNOWN; + next0 = IP4_MAPT_NEXT_DROP; + } + + if (PREDICT_FALSE (p1->current_length < ip4_len1 || + ip41->ip_version_and_header_length != 0x45)) + { + error1 = MAP_ERROR_UNKNOWN; + next1 = IP4_MAPT_NEXT_DROP; + } + + d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + &vnet_buffer (p0)->map_t.map_domain_index); + d1 = ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], + &vnet_buffer (p1)->map_t.map_domain_index); + + vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; + vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0; + + dst_port0 = -1; + dst_port1 = -1; + + ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0, + &next0); + ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1, + &next1); + + //Add MAP-T pseudo header in front of the packet + vlib_buffer_advance (p0, -sizeof (*pheader0)); + vlib_buffer_advance (p1, -sizeof (*pheader1)); + pheader0 = vlib_buffer_get_current (p0); + pheader1 = vlib_buffer_get_current (p1); + + //Save addresses within the packet + ip4_map_t_embedded_address (d0, &pheader0->saddr, + &ip40->src_address); + ip4_map_t_embedded_address (d1, &pheader1->saddr, + &ip41->src_address); + pheader0->daddr.as_u64[0] = + map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0); + pheader0->daddr.as_u64[1] = + map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0); + pheader1->daddr.as_u64[0] = + map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1); + pheader1->daddr.as_u64[1] = + map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1); + + if (PREDICT_FALSE + (ip4_is_first_fragment (ip40) && (dst_port0 != -1) + && (d0->ea_bits_len != 0 || !d0->rules) + && ip4_map_fragment_cache (ip40, dst_port0))) + { + error0 = MAP_ERROR_FRAGMENT_MEMORY; + } + + if (PREDICT_FALSE + (ip4_is_first_fragment (ip41) && (dst_port1 != -1) + && (d1->ea_bits_len != 0 || !d1->rules) + && ip4_map_fragment_cache (ip41, dst_port1))) + { + error1 = MAP_ERROR_FRAGMENT_MEMORY; + } + + if (PREDICT_TRUE + (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + vnet_buffer (p0)->map_t. + map_domain_index, 1, + clib_net_to_host_u16 (ip40-> + length)); + } + + if (PREDICT_TRUE + (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + vnet_buffer (p1)->map_t. + map_domain_index, 1, + clib_net_to_host_u16 (ip41-> + length)); + } + + next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; + next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1; + p0->error = error_node->errors[error0]; + p1->error = error_node->errors[error1]; + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, next0, + next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip4_header_t *ip40; + map_domain_t *d0; + ip4_mapt_next_t next0; + u16 ip4_len0; + u8 error0; + i32 dst_port0; + ip4_mapt_pseudo_header_t *pheader0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + error0 = MAP_ERROR_NONE; + + p0 = vlib_get_buffer (vm, pi0); + ip40 = vlib_buffer_get_current (p0); + ip4_len0 = clib_host_to_net_u16 (ip40->length); + if (PREDICT_FALSE (p0->current_length < ip4_len0 || + ip40->ip_version_and_header_length != 0x45)) + { + error0 = MAP_ERROR_UNKNOWN; + next0 = IP4_MAPT_NEXT_DROP; + } + + d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + &vnet_buffer (p0)->map_t.map_domain_index); + + vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; + + dst_port0 = -1; + ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0, + &next0); + + //Add MAP-T pseudo header in front of the packet + vlib_buffer_advance (p0, -sizeof (*pheader0)); + pheader0 = vlib_buffer_get_current (p0); + + //Save addresses within the packet + ip4_map_t_embedded_address (d0, &pheader0->saddr, + &ip40->src_address); + pheader0->daddr.as_u64[0] = + map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0); + pheader0->daddr.as_u64[1] = + map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0); + + //It is important to cache at this stage because the result might be necessary + //for packets within the same vector. + //Actually, this approach even provides some limited out-of-order fragments support + if (PREDICT_FALSE + (ip4_is_first_fragment (ip40) && (dst_port0 != -1) + && (d0->ea_bits_len != 0 || !d0->rules) + && ip4_map_fragment_cache (ip40, dst_port0))) + { + error0 = MAP_ERROR_UNKNOWN; + } + + if (PREDICT_TRUE + (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, + cpu_index, + vnet_buffer (p0)->map_t. + map_domain_index, 1, + clib_net_to_host_u16 (ip40-> + length)); + } + + next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static char *map_t_error_strings[] = { +#define _(sym,string) string, + foreach_map_error +#undef _ +}; + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = { + .function = ip4_map_t_fragmented, + .name = "ip4-map-t-fragmented", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT, + .next_nodes = { + [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup", + [IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, + [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = { + .function = ip4_map_t_icmp, + .name = "ip4-map-t-icmp", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP4_MAPT_ICMP_N_NEXT, + .next_nodes = { + [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup", + [IP4_MAPT_ICMP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, + [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = { + .function = ip4_map_t_tcp_udp, + .name = "ip4-map-t-tcp-udp", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT, + .next_nodes = { + [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup", + [IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG] = IP6_FRAG_NODE_NAME, + [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip4_map_t_node) = { + .function = ip4_map_t, + .name = "ip4-map-t", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP4_MAPT_N_NEXT, + .next_nodes = { + [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp", + [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp", + [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented", + [IP4_MAPT_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c new file mode 100644 index 00000000..d2945059 --- /dev/null +++ b/src/vnet/map/ip6_map.c @@ -0,0 +1,1269 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "map.h" + +#include "../ip/ip_frag.h" + +enum ip6_map_next_e +{ + IP6_MAP_NEXT_IP4_LOOKUP, +#ifdef MAP_SKIP_IP6_LOOKUP + IP6_MAP_NEXT_IP4_REWRITE, +#endif + IP6_MAP_NEXT_IP6_REASS, + IP6_MAP_NEXT_IP4_REASS, + IP6_MAP_NEXT_IP4_FRAGMENT, + IP6_MAP_NEXT_IP6_ICMP_RELAY, + IP6_MAP_NEXT_IP6_LOCAL, + IP6_MAP_NEXT_DROP, + IP6_MAP_NEXT_ICMP, + IP6_MAP_N_NEXT, +}; + +enum ip6_map_ip6_reass_next_e +{ + IP6_MAP_IP6_REASS_NEXT_IP6_MAP, + IP6_MAP_IP6_REASS_NEXT_DROP, + IP6_MAP_IP6_REASS_N_NEXT, +}; + +enum ip6_map_ip4_reass_next_e +{ + IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP, + IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT, + IP6_MAP_IP4_REASS_NEXT_DROP, + IP6_MAP_IP4_REASS_N_NEXT, +}; + +enum ip6_icmp_relay_next_e +{ + IP6_ICMP_RELAY_NEXT_IP4_LOOKUP, + IP6_ICMP_RELAY_NEXT_DROP, + IP6_ICMP_RELAY_N_NEXT, +}; + +vlib_node_registration_t ip6_map_ip4_reass_node; +vlib_node_registration_t ip6_map_ip6_reass_node; +static vlib_node_registration_t ip6_map_icmp_relay_node; + +typedef struct +{ + u32 map_domain_index; + u16 port; + u8 cached; +} map_ip6_map_ip4_reass_trace_t; + +u8 * +format_ip6_map_ip4_reass_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + map_ip6_map_ip4_reass_trace_t *t = + va_arg (*args, map_ip6_map_ip4_reass_trace_t *); + return format (s, "MAP domain index: %d L4 port: %u Status: %s", + t->map_domain_index, t->port, + t->cached ? "cached" : "forwarded"); +} + +typedef struct +{ + u16 offset; + u16 frag_len; + u8 out; +} map_ip6_map_ip6_reass_trace_t; + +u8 * +format_ip6_map_ip6_reass_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + map_ip6_map_ip6_reass_trace_t *t = + va_arg (*args, map_ip6_map_ip6_reass_trace_t *); + return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset, + t->frag_len, t->out ? "out" : "in"); +} + +/* + * ip6_map_sec_check + */ +static_always_inline bool +ip6_map_sec_check (map_domain_t * d, u16 port, ip4_header_t * ip4, + ip6_header_t * ip6) +{ + u16 sp4 = clib_net_to_host_u16 (port); + u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32); + u64 sal6 = map_get_pfx (d, sa4, sp4); + u64 sar6 = map_get_sfx (d, sa4, sp4); + + if (PREDICT_FALSE + (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0]) + || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1]))) + return (false); + return (true); +} + +static_always_inline void +ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4, + ip6_header_t * ip6, u32 * next, u8 * error) +{ + map_main_t *mm = &map_main; + if (d->ea_bits_len || d->rules) + { + if (d->psid_length > 0) + { + if (!ip4_is_fragment (ip4)) + { + u16 port = ip4_map_get_port (ip4, MAP_SENDER); + if (port) + { + if (mm->sec_check) + *error = + ip6_map_sec_check (d, port, ip4, + ip6) ? MAP_ERROR_NONE : + MAP_ERROR_DECAP_SEC_CHECK; + } + else + { + *error = MAP_ERROR_BAD_PROTOCOL; + } + } + else + { + *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next; + } + } + } +} + +static_always_inline bool +ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip) +{ +#ifdef MAP_SKIP_IP6_LOOKUP + map_main_t *mm = &map_main; + u32 adj_index0 = mm->adj4_index; + if (adj_index0 > 0) + { + ip_lookup_main_t *lm4 = &ip4_main.lookup_main; + ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index); + if (adj->n_adj > 1) + { + u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT); + adj_index0 += (hash_c0 & (adj->n_adj - 1)); + } + vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0; + return (true); + } +#endif + return (false); +} + +/* + * ip6_map + */ +static uword +ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_node.index); + map_main_t *mm = &map_main; + vlib_combined_counter_main_t *cm = mm->domain_counters; + u32 cpu_index = os_get_cpu_number (); + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Dual loop */ + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + u8 error0 = MAP_ERROR_NONE; + u8 error1 = MAP_ERROR_NONE; + map_domain_t *d0 = 0, *d1 = 0; + ip4_header_t *ip40, *ip41; + ip6_header_t *ip60, *ip61; + u16 port0 = 0, port1 = 0; + u32 map_domain_index0 = ~0, map_domain_index1 = ~0; + u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP; + u32 next1 = IP6_MAP_NEXT_IP4_LOOKUP; + + /* Prefetch next iteration. */ + { + vlib_buffer_t *p2, *p3; + + p2 = vlib_get_buffer (vm, from[2]); + p3 = vlib_get_buffer (vm, from[3]); + + vlib_prefetch_buffer_header (p2, LOAD); + vlib_prefetch_buffer_header (p3, LOAD); + + /* IPv6 + IPv4 header + 8 bytes of ULP */ + CLIB_PREFETCH (p2->data, 68, LOAD); + CLIB_PREFETCH (p3->data, 68, LOAD); + } + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip60 = vlib_buffer_get_current (p0); + ip61 = vlib_buffer_get_current (p1); + vlib_buffer_advance (p0, sizeof (ip6_header_t)); + vlib_buffer_advance (p1, sizeof (ip6_header_t)); + ip40 = vlib_buffer_get_current (p0); + ip41 = vlib_buffer_get_current (p1); + + /* + * Encapsulated IPv4 packet + * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled + * - Lookup/Rewrite or Fragment node in case of packet > MTU + * Fragmented IPv6 packet + * ICMP IPv6 packet + * - Error -> Pass to ICMPv6/ICMPv4 relay + * - Info -> Pass to IPv6 local + * Anything else -> drop + */ + if (PREDICT_TRUE + (ip60->protocol == IP_PROTOCOL_IP_IN_IP + && clib_net_to_host_u16 (ip60->payload_length) > 20)) + { + d0 = + ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & ip40->src_address. + as_u32, &map_domain_index0, &error0); + } + else if (ip60->protocol == IP_PROTOCOL_ICMP6 && + clib_net_to_host_u16 (ip60->payload_length) > + sizeof (icmp46_header_t)) + { + icmp46_header_t *icmp = (void *) (ip60 + 1); + next0 = (icmp->type == ICMP6_echo_request + || icmp->type == + ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL : + IP6_MAP_NEXT_IP6_ICMP_RELAY; + } + else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) + { + next0 = IP6_MAP_NEXT_IP6_REASS; + } + else + { + error0 = MAP_ERROR_BAD_PROTOCOL; + } + if (PREDICT_TRUE + (ip61->protocol == IP_PROTOCOL_IP_IN_IP + && clib_net_to_host_u16 (ip61->payload_length) > 20)) + { + d1 = + ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & ip41->src_address. + as_u32, &map_domain_index1, &error1); + } + else if (ip61->protocol == IP_PROTOCOL_ICMP6 && + clib_net_to_host_u16 (ip61->payload_length) > + sizeof (icmp46_header_t)) + { + icmp46_header_t *icmp = (void *) (ip61 + 1); + next1 = (icmp->type == ICMP6_echo_request + || icmp->type == + ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL : + IP6_MAP_NEXT_IP6_ICMP_RELAY; + } + else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) + { + next1 = IP6_MAP_NEXT_IP6_REASS; + } + else + { + error1 = MAP_ERROR_BAD_PROTOCOL; + } + + if (d0) + { + /* MAP inbound security check */ + ip6_map_security_check (d0, ip40, ip60, &next0, &error0); + + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE && + next0 == IP6_MAP_NEXT_IP4_LOOKUP)) + { + if (PREDICT_FALSE + (d0->mtu + && (clib_host_to_net_u16 (ip40->length) > d0->mtu))) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.flags = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + vnet_buffer (p0)->ip_frag.mtu = d0->mtu; + next0 = IP6_MAP_NEXT_IP4_FRAGMENT; + } + else + { + next0 = + ip6_map_ip4_lookup_bypass (p0, + ip40) ? + IP6_MAP_NEXT_IP4_REWRITE : next0; + } + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + map_domain_index0, 1, + clib_net_to_host_u16 + (ip40->length)); + } + } + if (d1) + { + /* MAP inbound security check */ + ip6_map_security_check (d1, ip41, ip61, &next1, &error1); + + if (PREDICT_TRUE (error1 == MAP_ERROR_NONE && + next1 == IP6_MAP_NEXT_IP4_LOOKUP)) + { + if (PREDICT_FALSE + (d1->mtu + && (clib_host_to_net_u16 (ip41->length) > d1->mtu))) + { + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.flags = 0; + vnet_buffer (p1)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + vnet_buffer (p1)->ip_frag.mtu = d1->mtu; + next1 = IP6_MAP_NEXT_IP4_FRAGMENT; + } + else + { + next1 = + ip6_map_ip4_lookup_bypass (p1, + ip41) ? + IP6_MAP_NEXT_IP4_REWRITE : next1; + } + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + map_domain_index1, 1, + clib_net_to_host_u16 + (ip41->length)); + } + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = port0; + } + + if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr)); + tr->map_domain_index = map_domain_index1; + tr->port = port1; + } + + if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) + { + /* Set ICMP parameters */ + vlib_buffer_advance (p0, -sizeof (ip6_header_t)); + icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable, + ICMP6_destination_unreachable_source_address_failed_policy, + 0); + next0 = IP6_MAP_NEXT_ICMP; + } + else + { + next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP; + } + + if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled) + { + /* Set ICMP parameters */ + vlib_buffer_advance (p1, -sizeof (ip6_header_t)); + icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable, + ICMP6_destination_unreachable_source_address_failed_policy, + 0); + next1 = IP6_MAP_NEXT_ICMP; + } + else + { + next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP; + } + + /* Reset packet */ + if (next0 == IP6_MAP_NEXT_IP6_LOCAL) + vlib_buffer_advance (p0, -sizeof (ip6_header_t)); + if (next1 == IP6_MAP_NEXT_IP6_LOCAL) + vlib_buffer_advance (p1, -sizeof (ip6_header_t)); + + p0->error = error_node->errors[error0]; + p1->error = error_node->errors[error1]; + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, next0, + next1); + } + + /* Single loop */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + u8 error0 = MAP_ERROR_NONE; + map_domain_t *d0 = 0; + ip4_header_t *ip40; + ip6_header_t *ip60; + i32 port0 = 0; + u32 map_domain_index0 = ~0; + u32 next0 = IP6_MAP_NEXT_IP4_LOOKUP; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + vlib_buffer_advance (p0, sizeof (ip6_header_t)); + ip40 = vlib_buffer_get_current (p0); + + /* + * Encapsulated IPv4 packet + * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled + * - Lookup/Rewrite or Fragment node in case of packet > MTU + * Fragmented IPv6 packet + * ICMP IPv6 packet + * - Error -> Pass to ICMPv6/ICMPv4 relay + * - Info -> Pass to IPv6 local + * Anything else -> drop + */ + if (PREDICT_TRUE + (ip60->protocol == IP_PROTOCOL_IP_IN_IP + && clib_net_to_host_u16 (ip60->payload_length) > 20)) + { + d0 = + ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & ip40->src_address. + as_u32, &map_domain_index0, &error0); + } + else if (ip60->protocol == IP_PROTOCOL_ICMP6 && + clib_net_to_host_u16 (ip60->payload_length) > + sizeof (icmp46_header_t)) + { + icmp46_header_t *icmp = (void *) (ip60 + 1); + next0 = (icmp->type == ICMP6_echo_request + || icmp->type == + ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL : + IP6_MAP_NEXT_IP6_ICMP_RELAY; + } + else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION && + (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr == + IP_PROTOCOL_IP_IN_IP)) + { + next0 = IP6_MAP_NEXT_IP6_REASS; + } + else + { + error0 = MAP_ERROR_BAD_PROTOCOL; + } + + if (d0) + { + /* MAP inbound security check */ + ip6_map_security_check (d0, ip40, ip60, &next0, &error0); + + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE && + next0 == IP6_MAP_NEXT_IP4_LOOKUP)) + { + if (PREDICT_FALSE + (d0->mtu + && (clib_host_to_net_u16 (ip40->length) > d0->mtu))) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.flags = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + vnet_buffer (p0)->ip_frag.mtu = d0->mtu; + next0 = IP6_MAP_NEXT_IP4_FRAGMENT; + } + else + { + next0 = + ip6_map_ip4_lookup_bypass (p0, + ip40) ? + IP6_MAP_NEXT_IP4_REWRITE : next0; + } + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + map_domain_index0, 1, + clib_net_to_host_u16 + (ip40->length)); + } + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = (u16) port0; + } + + if (mm->icmp6_enabled && + (error0 == MAP_ERROR_DECAP_SEC_CHECK + || error0 == MAP_ERROR_NO_DOMAIN)) + { + /* Set ICMP parameters */ + vlib_buffer_advance (p0, -sizeof (ip6_header_t)); + icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable, + ICMP6_destination_unreachable_source_address_failed_policy, + 0); + next0 = IP6_MAP_NEXT_ICMP; + } + else + { + next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP; + } + + /* Reset packet */ + if (next0 == IP6_MAP_NEXT_IP6_LOCAL) + vlib_buffer_advance (p0, -sizeof (ip6_header_t)); + + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return frame->n_vectors; +} + + +static_always_inline void +ip6_map_ip6_reass_prepare (vlib_main_t * vm, vlib_node_runtime_t * node, + map_ip6_reass_t * r, u32 ** fragments_ready, + u32 ** fragments_to_drop) +{ + ip4_header_t *ip40; + ip6_header_t *ip60; + ip6_frag_hdr_t *frag0; + vlib_buffer_t *p0; + + if (!r->ip4_header.ip_version_and_header_length) + return; + + //The IP header is here, we need to check for packets + //that can be forwarded + int i; + for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + { + if (r->fragments[i].pi == ~0 || + ((!r->fragments[i].next_data_len) + && (r->fragments[i].next_data_offset != (0xffff)))) + continue; + + p0 = vlib_get_buffer (vm, r->fragments[i].pi); + ip60 = vlib_buffer_get_current (p0); + frag0 = (ip6_frag_hdr_t *) (ip60 + 1); + ip40 = (ip4_header_t *) (frag0 + 1); + + if (ip6_frag_hdr_offset (frag0)) + { + //Not first fragment, add the IPv4 header + clib_memcpy (ip40, &r->ip4_header, 20); + } + +#ifdef MAP_IP6_REASS_COUNT_BYTES + r->forwarded += + clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0); +#endif + + if (ip6_frag_hdr_more (frag0)) + { + //Not last fragment, we copy end of next + clib_memcpy (u8_ptr_add (ip60, p0->current_length), + r->fragments[i].next_data, 20); + p0->current_length += 20; + ip60->payload_length = u16_net_add (ip60->payload_length, 20); + } + + if (!ip4_is_fragment (ip40)) + { + ip40->fragment_id = frag_id_6to4 (frag0->identification); + ip40->flags_and_fragment_offset = + clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0)); + } + else + { + ip40->flags_and_fragment_offset = + clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) + + ip6_frag_hdr_offset (frag0)); + } + + if (ip6_frag_hdr_more (frag0)) + ip40->flags_and_fragment_offset |= + clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + + ip40->length = + clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) - + sizeof (*frag0)); + ip40->checksum = ip4_header_checksum (ip40); + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_ip6_map_ip6_reass_trace_t *tr = + vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->offset = ip4_get_fragment_offset (ip40); + tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40); + tr->out = 1; + } + + vec_add1 (*fragments_ready, r->fragments[i].pi); + r->fragments[i].pi = ~0; + r->fragments[i].next_data_len = 0; + r->fragments[i].next_data_offset = 0; + map_main.ip6_reass_buffered_counter--; + + //TODO: Best solution would be that ip6_map handles extension headers + // and ignores atomic fragment. But in the meantime, let's just copy the header. + + u8 protocol = frag0->next_hdr; + memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60)); + ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol = + protocol; + vlib_buffer_advance (p0, sizeof (*frag0)); + } +} + +void +map_ip6_drop_pi (u32 pi) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_node_runtime_t *n = + vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index); + vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP6_REASS_NEXT_DROP, pi); +} + +void +map_ip4_drop_pi (u32 pi) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_node_runtime_t *n = + vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index); + vlib_set_next_frame_buffer (vm, n, IP6_MAP_IP4_REASS_NEXT_DROP, pi); +} + +/* + * ip6_reass + * TODO: We should count the number of successfully + * transmitted fragment bytes and compare that to the last fragment + * offset such that we can free the reassembly structure when all fragments + * have been forwarded. + */ +static uword +ip6_map_ip6_reass (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_ip6_reass_node.index); + u32 *fragments_to_drop = NULL; + u32 *fragments_ready = NULL; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Single loop */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + u8 error0 = MAP_ERROR_NONE; + ip6_header_t *ip60; + ip6_frag_hdr_t *frag0; + u16 offset; + u16 next_offset; + u16 frag_len; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + frag0 = (ip6_frag_hdr_t *) (ip60 + 1); + offset = + clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7); + frag_len = + clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0); + next_offset = + ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff); + + //FIXME: Support other extension headers, maybe + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_ip6_map_ip6_reass_trace_t *tr = + vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->offset = offset; + tr->frag_len = frag_len; + tr->out = 0; + } + + map_ip6_reass_lock (); + map_ip6_reass_t *r = + map_ip6_reass_get (&ip60->src_address, &ip60->dst_address, + frag0->identification, frag0->next_hdr, + &fragments_to_drop); + //FIXME: Use better error codes + if (PREDICT_FALSE (!r)) + { + // Could not create a caching entry + error0 = MAP_ERROR_FRAGMENT_MEMORY; + } + else if (PREDICT_FALSE ((frag_len <= 20 && + (ip6_frag_hdr_more (frag0) || (!offset))))) + { + //Very small fragment are restricted to the last one and + //can't be the first one + error0 = MAP_ERROR_FRAGMENT_MALFORMED; + } + else + if (map_ip6_reass_add_fragment + (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len)) + { + map_ip6_reass_free (r, &fragments_to_drop); + error0 = MAP_ERROR_FRAGMENT_MEMORY; + } + else + { +#ifdef MAP_IP6_REASS_COUNT_BYTES + if (!ip6_frag_hdr_more (frag0)) + r->expected_total = offset + frag_len; +#endif + ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready, + &fragments_to_drop); +#ifdef MAP_IP6_REASS_COUNT_BYTES + if (r->forwarded >= r->expected_total) + map_ip6_reass_free (r, &fragments_to_drop); +#endif + } + map_ip6_reass_unlock (); + + if (error0 == MAP_ERROR_NONE) + { + if (frag_len > 20) + { + //Dequeue the packet + n_left_to_next++; + to_next--; + } + else + { + //All data from that packet was copied no need to keep it, but this is not an error + p0->error = error_node->errors[MAP_ERROR_NONE]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + pi0, + IP6_MAP_IP6_REASS_NEXT_DROP); + } + } + else + { + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, + IP6_MAP_IP6_REASS_NEXT_DROP); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + map_send_all_to_node (vm, fragments_ready, node, + &error_node->errors[MAP_ERROR_NONE], + IP6_MAP_IP6_REASS_NEXT_IP6_MAP); + map_send_all_to_node (vm, fragments_to_drop, node, + &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED], + IP6_MAP_IP6_REASS_NEXT_DROP); + + vec_free (fragments_to_drop); + vec_free (fragments_ready); + return frame->n_vectors; +} + +/* + * ip6_ip4_virt_reass + */ +static uword +ip6_map_ip4_reass (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index); + map_main_t *mm = &map_main; + vlib_combined_counter_main_t *cm = mm->domain_counters; + u32 cpu_index = os_get_cpu_number (); + u32 *fragments_to_drop = NULL; + u32 *fragments_to_loopback = NULL; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Single loop */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + u8 error0 = MAP_ERROR_NONE; + map_domain_t *d0; + ip4_header_t *ip40; + ip6_header_t *ip60; + i32 port0 = 0; + u32 map_domain_index0 = ~0; + u32 next0 = IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP; + u8 cached = 0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip40 = vlib_buffer_get_current (p0); + ip60 = ((ip6_header_t *) ip40) - 1; + + d0 = + ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & ip40->src_address.as_u32, + &map_domain_index0, &error0); + + map_ip4_reass_lock (); + //This node only deals with fragmented ip4 + map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32, + ip40->dst_address.as_u32, + ip40->fragment_id, + ip40->protocol, + &fragments_to_drop); + if (PREDICT_FALSE (!r)) + { + // Could not create a caching entry + error0 = MAP_ERROR_FRAGMENT_MEMORY; + } + else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40))) + { + // This is a fragment + if (r->port >= 0) + { + // We know the port already + port0 = r->port; + } + else if (map_ip4_reass_add_fragment (r, pi0)) + { + // Not enough space for caching + error0 = MAP_ERROR_FRAGMENT_MEMORY; + map_ip4_reass_free (r, &fragments_to_drop); + } + else + { + cached = 1; + } + } + else + if ((port0 = + ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0) + { + // Could not find port from first fragment. Stop reassembling. + error0 = MAP_ERROR_BAD_PROTOCOL; + port0 = 0; + map_ip4_reass_free (r, &fragments_to_drop); + } + else + { + // Found port. Remember it and loopback saved fragments + r->port = port0; + map_ip4_reass_get_fragments (r, &fragments_to_loopback); + } + +#ifdef MAP_IP4_REASS_COUNT_BYTES + if (!cached && r) + { + r->forwarded += clib_host_to_net_u16 (ip40->length) - 20; + if (!ip4_get_fragment_more (ip40)) + r->expected_total = + ip4_get_fragment_offset (ip40) * 8 + + clib_host_to_net_u16 (ip40->length) - 20; + if (r->forwarded >= r->expected_total) + map_ip4_reass_free (r, &fragments_to_drop); + } +#endif + + map_ip4_reass_unlock (); + + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) + error0 = + ip6_map_sec_check (d0, port0, ip40, + ip60) ? MAP_ERROR_NONE : + MAP_ERROR_DECAP_SEC_CHECK; + + if (PREDICT_FALSE + (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu) + && error0 == MAP_ERROR_NONE && !cached)) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.flags = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + vnet_buffer (p0)->ip_frag.mtu = d0->mtu; + next0 = IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT; + } + + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_ip6_map_ip4_reass_trace_t *tr = + vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = map_domain_index0; + tr->port = port0; + tr->cached = cached; + } + + if (cached) + { + //Dequeue the packet + n_left_to_next++; + to_next--; + } + else + { + if (error0 == MAP_ERROR_NONE) + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, map_domain_index0, + 1, + clib_net_to_host_u16 + (ip40->length)); + next0 = + (error0 == + MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP; + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } + + //Loopback when we reach the end of the inpu vector + if (n_left_from == 0 && vec_len (fragments_to_loopback)) + { + from = vlib_frame_vector_args (frame); + u32 len = vec_len (fragments_to_loopback); + if (len <= VLIB_FRAME_SIZE) + { + clib_memcpy (from, fragments_to_loopback, + sizeof (u32) * len); + n_left_from = len; + vec_reset_length (fragments_to_loopback); + } + else + { + clib_memcpy (from, + fragments_to_loopback + (len - + VLIB_FRAME_SIZE), + sizeof (u32) * VLIB_FRAME_SIZE); + n_left_from = VLIB_FRAME_SIZE; + _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE; + } + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + map_send_all_to_node (vm, fragments_to_drop, node, + &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED], + IP6_MAP_IP4_REASS_NEXT_DROP); + + vec_free (fragments_to_drop); + vec_free (fragments_to_loopback); + return frame->n_vectors; +} + +/* + * ip6_icmp_relay + */ +static uword +ip6_map_icmp_relay (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index); + map_main_t *mm = &map_main; + u32 cpu_index = os_get_cpu_number (); + u16 *fragment_ids, *fid; + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + /* Get random fragment IDs for replies. */ + fid = fragment_ids = + clib_random_buffer_get_data (&vm->random_buffer, + n_left_from * sizeof (fragment_ids[0])); + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + /* Single loop */ + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + u8 error0 = MAP_ERROR_NONE; + ip6_header_t *ip60; + u32 next0 = IP6_ICMP_RELAY_NEXT_IP4_LOOKUP; + u32 mtu; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + u16 tlen = clib_net_to_host_u16 (ip60->payload_length); + + /* + * In: + * IPv6 header (40) + * ICMPv6 header (8) + * IPv6 header (40) + * Original IPv4 header / packet + * Out: + * New IPv4 header + * New ICMP header + * Original IPv4 header / packet + */ + + /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */ + if (tlen < 76) + { + error0 = MAP_ERROR_ICMP_RELAY; + goto error; + } + + icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1); + ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2); + + if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP) + { + error0 = MAP_ERROR_ICMP_RELAY; + goto error; + } + + ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1); + vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */ + ip4_header_t *new_ip40 = vlib_buffer_get_current (p0); + icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1); + + /* + * Relay according to RFC2473, section 8.3 + */ + switch (icmp60->type) + { + case ICMP6_destination_unreachable: + case ICMP6_time_exceeded: + case ICMP6_parameter_problem: + /* Type 3 - destination unreachable, Code 1 - host unreachable */ + new_icmp40->type = ICMP4_destination_unreachable; + new_icmp40->code = + ICMP4_destination_unreachable_destination_unreachable_host; + break; + + case ICMP6_packet_too_big: + /* Type 3 - destination unreachable, Code 4 - packet too big */ + /* Potential TODO: Adjust domain tunnel MTU based on the value received here */ + mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1))); + + /* Check DF flag */ + if (! + (inner_ip40->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT))) + { + error0 = MAP_ERROR_ICMP_RELAY; + goto error; + } + + new_icmp40->type = ICMP4_destination_unreachable; + new_icmp40->code = + ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set; + *((u32 *) (new_icmp40 + 1)) = + clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu); + break; + + default: + error0 = MAP_ERROR_ICMP_RELAY; + break; + } + + /* + * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812) + */ + new_ip40->ip_version_and_header_length = 0x45; + new_ip40->tos = 0; + u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20; + new_ip40->length = clib_host_to_net_u16 (nlen); + new_ip40->fragment_id = fid[0]; + fid++; + new_ip40->ttl = 64; + new_ip40->protocol = IP_PROTOCOL_ICMP; + new_ip40->src_address = mm->icmp4_src_address; + new_ip40->dst_address = inner_ip40->src_address; + new_ip40->checksum = ip4_header_checksum (new_ip40); + + new_icmp40->checksum = 0; + ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20); + new_icmp40->checksum = ~ip_csum_fold (sum); + + vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1); + + error: + if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) + { + map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr)); + tr->map_domain_index = 0; + tr->port = 0; + } + + next0 = + (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP; + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + + return frame->n_vectors; + +} + +static char *map_error_strings[] = { +#define _(sym,string) string, + foreach_map_error +#undef _ +}; + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_node) = { + .function = ip6_map, + .name = "ip6-map", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + + .n_next_nodes = IP6_MAP_N_NEXT, + .next_nodes = { + [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup", +#ifdef MAP_SKIP_IP6_LOOKUP + [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite", +#endif + [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass", + [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass", + [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag", + [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay", + [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local", + [IP6_MAP_NEXT_DROP] = "error-drop", + [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_ip6_reass_node) = { + .function = ip6_map_ip6_reass, + .name = "ip6-map-ip6-reass", + .vector_size = sizeof(u32), + .format_trace = format_ip6_map_ip6_reass_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT, + .next_nodes = { + [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map", + [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_ip4_reass_node) = { + .function = ip6_map_ip4_reass, + .name = "ip6-map-ip4-reass", + .vector_size = sizeof(u32), + .format_trace = format_ip6_map_ip4_reass_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT, + .next_nodes = { + [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag", + [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_icmp_relay_node, static) = { + .function = ip6_map_icmp_relay, + .name = "ip6-map-icmp-relay", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, //FIXME + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = MAP_N_ERROR, + .error_strings = map_error_strings, + .n_next_nodes = IP6_ICMP_RELAY_N_NEXT, + .next_nodes = { + [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/ip6_map_t.c b/src/vnet/map/ip6_map_t.c new file mode 100644 index 00000000..eb3996c2 --- /dev/null +++ b/src/vnet/map/ip6_map_t.c @@ -0,0 +1,1517 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "map.h" + +#include "../ip/ip_frag.h" + +#define IP6_MAP_T_DUAL_LOOP + +typedef enum +{ + IP6_MAPT_NEXT_MAPT_TCP_UDP, + IP6_MAPT_NEXT_MAPT_ICMP, + IP6_MAPT_NEXT_MAPT_FRAGMENTED, + IP6_MAPT_NEXT_DROP, + IP6_MAPT_N_NEXT +} ip6_mapt_next_t; + +typedef enum +{ + IP6_MAPT_ICMP_NEXT_IP4_LOOKUP, + IP6_MAPT_ICMP_NEXT_IP4_FRAG, + IP6_MAPT_ICMP_NEXT_DROP, + IP6_MAPT_ICMP_N_NEXT +} ip6_mapt_icmp_next_t; + +typedef enum +{ + IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP, + IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG, + IP6_MAPT_TCP_UDP_NEXT_DROP, + IP6_MAPT_TCP_UDP_N_NEXT +} ip6_mapt_tcp_udp_next_t; + +typedef enum +{ + IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP, + IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG, + IP6_MAPT_FRAGMENTED_NEXT_DROP, + IP6_MAPT_FRAGMENTED_N_NEXT +} ip6_mapt_fragmented_next_t; + +static_always_inline int +ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag, + map_domain_t * d, u16 port) +{ + u32 *ignore = NULL; + map_ip4_reass_lock (); + map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address), + ip6_map_t_embedded_address (d, + &ip6-> + dst_address), + frag_id_6to4 (frag->identification), + (ip6->protocol == + IP_PROTOCOL_ICMP6) ? + IP_PROTOCOL_ICMP : ip6->protocol, + &ignore); + if (r) + r->port = port; + + map_ip4_reass_unlock (); + return !r; +} + +/* Returns the associated port or -1 */ +static_always_inline i32 +ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag, + map_domain_t * d) +{ + u32 *ignore = NULL; + map_ip4_reass_lock (); + map_ip4_reass_t *r = map_ip4_reass_get (map_get_ip4 (&ip6->src_address), + ip6_map_t_embedded_address (d, + &ip6-> + dst_address), + frag_id_6to4 (frag->identification), + (ip6->protocol == + IP_PROTOCOL_ICMP6) ? + IP_PROTOCOL_ICMP : ip6->protocol, + &ignore); + i32 ret = r ? r->port : -1; + map_ip4_reass_unlock (); + return ret; +} + +static_always_inline u8 +ip6_translate_tos (const ip6_header_t * ip6) +{ +#ifdef IP6_MAP_T_OVERRIDE_TOS + return IP6_MAP_T_OVERRIDE_TOS; +#else + return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label) + & 0x0ff00000) >> 20; +#endif +} + +//TODO: Find right place in memory for that +/* *INDENT-OFF* */ +static u8 icmp6_to_icmp_updater_pointer_table[] = + { 0, 1, ~0, ~0, + 2, 2, 9, 8, + 12, 12, 12, 12, + 12, 12, 12, 12, + 12, 12, 12, 12, + 12, 12, 12, 12, + 24, 24, 24, 24, + 24, 24, 24, 24, + 24, 24, 24, 24, + 24, 24, 24, 24 + }; +/* *INDENT-ON* */ + +static_always_inline int +ip6_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len, + i32 * sender_port, ip6_header_t ** inner_ip6) +{ + *inner_ip6 = NULL; + switch (icmp->type) + { + case ICMP6_echo_request: + *sender_port = ((u16 *) icmp)[2]; + icmp->type = ICMP4_echo_request; + break; + case ICMP6_echo_reply: + *sender_port = ((u16 *) icmp)[2]; + icmp->type = ICMP4_echo_reply; + break; + case ICMP6_destination_unreachable: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); + + switch (icmp->code) + { + case ICMP6_destination_unreachable_no_route_to_destination: //0 + case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2 + case ICMP6_destination_unreachable_address_unreachable: //3 + icmp->type = ICMP4_destination_unreachable; + icmp->code = + ICMP4_destination_unreachable_destination_unreachable_host; + break; + case ICMP6_destination_unreachable_destination_administratively_prohibited: //1 + icmp->type = + ICMP4_destination_unreachable; + icmp->code = + ICMP4_destination_unreachable_communication_administratively_prohibited; + break; + case ICMP6_destination_unreachable_port_unreachable: + icmp->type = ICMP4_destination_unreachable; + icmp->code = ICMP4_destination_unreachable_port_unreachable; + break; + default: + return -1; + } + break; + case ICMP6_packet_too_big: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); + + icmp->type = ICMP4_destination_unreachable; + icmp->code = 4; + { + u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + advertised_mtu -= 20; + //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20) + ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu); + } + break; + + case ICMP6_time_exceeded: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); + + icmp->type = ICMP4_time_exceeded; + break; + + case ICMP6_parameter_problem: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); + + switch (icmp->code) + { + case ICMP6_parameter_problem_erroneous_header_field: + icmp->type = ICMP4_parameter_problem; + icmp->code = ICMP4_parameter_problem_pointer_indicates_error; + u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + if (pointer >= 40) + return -1; + + ((u8 *) (icmp + 1))[0] = + icmp6_to_icmp_updater_pointer_table[pointer]; + break; + case ICMP6_parameter_problem_unrecognized_next_header: + icmp->type = ICMP4_destination_unreachable; + icmp->code = ICMP4_destination_unreachable_port_unreachable; + break; + case ICMP6_parameter_problem_unrecognized_option: + default: + return -1; + } + break; + default: + return -1; + break; + } + return 0; +} + +static_always_inline void +_ip6_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error) +{ + ip6_header_t *ip6, *inner_ip6; + ip4_header_t *ip4, *inner_ip4; + u32 ip6_pay_len; + icmp46_header_t *icmp; + i32 sender_port; + ip_csum_t csum; + u32 ip4_sadr, inner_ip4_dadr; + + ip6 = vlib_buffer_get_current (p); + ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length); + icmp = (icmp46_header_t *) (ip6 + 1); + ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length); + + if (ip6->protocol != IP_PROTOCOL_ICMP6) + { + //No extensions headers allowed here + //TODO: SR header + *error = MAP_ERROR_MALFORMED; + return; + } + + //There are no fragmented ICMP messages, so no extension header for now + + if (ip6_icmp_to_icmp6_in_place + (icmp, ip6_pay_len, &sender_port, &inner_ip6)) + { + //TODO: In case of 1:1 mapping it is not necessary to have the sender port + *error = MAP_ERROR_ICMP; + return; + } + + if (sender_port < 0) + { + // In case of 1:1 mapping, we don't care about the port + if (d->ea_bits_len == 0 && d->rules) + { + sender_port = 0; + } + else + { + *error = MAP_ERROR_ICMP; + return; + } + } + + //Security check + //Note that this prevents an intermediate IPv6 router from answering the request + ip4_sadr = map_get_ip4 (&ip6->src_address); + if (ip6->src_address.as_u64[0] != map_get_pfx_net (d, ip4_sadr, sender_port) + || ip6->src_address.as_u64[1] != map_get_sfx_net (d, ip4_sadr, + sender_port)) + { + *error = MAP_ERROR_SEC_CHECK; + return; + } + + if (inner_ip6) + { + u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, + inner_frag_id; + u8 *inner_l4, inner_protocol; + + //We have two headers to translate + // FROM + // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ... + // Handled cases: + // [ IPv6 ][IC][ IPv6 ][L4 header ... + // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ... + // TO + // [ IPv4][IC][ IPv4][L4 header ... + + //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place + //We shouldn't have to do it again + if (ip6_parse (inner_ip6, ip6_pay_len - 8, + &inner_protocol, &inner_l4_offset, &inner_frag_offset)) + { + *error = MAP_ERROR_MALFORMED; + return; + } + + inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset); + inner_ip4 = + (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4)); + if (inner_frag_offset) + { + ip6_frag_hdr_t *inner_frag = + (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset); + inner_frag_id = frag_id_6to4 (inner_frag->identification); + } + else + { + inner_frag_id = 0; + } + + //Do the translation of the inner packet + if (inner_protocol == IP_PROTOCOL_TCP) + { + inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16); + } + else if (inner_protocol == IP_PROTOCOL_UDP) + { + inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6); + } + else if (inner_protocol == IP_PROTOCOL_ICMP6) + { + icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4; + csum = inner_icmp->checksum; + csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); + //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded + inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ? + ICMP4_echo_request : ICMP4_echo_reply; + csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); + inner_icmp->checksum = ip_csum_fold (csum); + inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later + inner_L4_checksum = &inner_icmp->checksum; + } + else + { + *error = MAP_ERROR_BAD_PROTOCOL; + return; + } + + csum = *inner_L4_checksum; + csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]); + csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]); + csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]); + csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]); + + //Sanity check of the outer destination address + if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] && + ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1]) + { + *error = MAP_ERROR_SEC_CHECK; + return; + } + + //Security check of inner packet + inner_ip4_dadr = map_get_ip4 (&inner_ip6->dst_address); + if (inner_ip6->dst_address.as_u64[0] != + map_get_pfx_net (d, inner_ip4_dadr, sender_port) + || inner_ip6->dst_address.as_u64[1] != map_get_sfx_net (d, + inner_ip4_dadr, + sender_port)) + { + *error = MAP_ERROR_SEC_CHECK; + return; + } + + inner_ip4->dst_address.as_u32 = inner_ip4_dadr; + inner_ip4->src_address.as_u32 = + ip6_map_t_embedded_address (d, &inner_ip6->src_address); + inner_ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + inner_ip4->tos = ip6_translate_tos (inner_ip6); + inner_ip4->length = + u16_net_add (inner_ip6->payload_length, + sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset); + inner_ip4->fragment_id = inner_frag_id; + inner_ip4->flags_and_fragment_offset = + clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + inner_ip4->ttl = inner_ip6->hop_limit; + inner_ip4->protocol = inner_protocol; + inner_ip4->checksum = ip4_header_checksum (inner_ip4); + + if (inner_ip4->protocol == IP_PROTOCOL_ICMP) + { + //Remove remainings of the pseudo-header in the csum + csum = + ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); + csum = + ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4)); + } + else + { + //Update to new pseudo-header + csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32); + csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32); + } + *inner_L4_checksum = ip_csum_fold (csum); + + //Move up icmp header + ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8); + clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8); + icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8); + } + else + { + //Only one header to translate + ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + } + vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6))); + + ip4->dst_address.as_u32 = ip6_map_t_embedded_address (d, &ip6->dst_address); + ip4->src_address.as_u32 = ip4_sadr; + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->fragment_id = 0; + ip4->flags_and_fragment_offset = 0; + ip4->ttl = ip6->hop_limit; + ip4->protocol = IP_PROTOCOL_ICMP; + //TODO fix the length depending on offset length + ip4->length = u16_net_add (ip6->payload_length, + (inner_ip6 == + NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) - + sizeof (*ip6))); + ip4->checksum = ip4_header_checksum (ip4); + + //TODO: We could do an easy diff-checksum for echo requests/replies + //Recompute ICMP checksum + icmp->checksum = 0; + csum = + ip_incremental_checksum (0, icmp, + clib_net_to_host_u16 (ip4->length) - + sizeof (*ip4)); + icmp->checksum = ~ip_csum_fold (csum); +} + +static uword +ip6_map_t_icmp (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_t_icmp_node.index); + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + vlib_combined_counter_main_t *cm = map_main.domain_counters; + u32 cpu_index = os_get_cpu_number (); + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + u8 error0; + ip6_mapt_icmp_next_t next0; + map_domain_t *d0; + u16 len0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + error0 = MAP_ERROR_NONE; + next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP; + + p0 = vlib_get_buffer (vm, pi0); + len0 = + clib_net_to_host_u16 (((ip6_header_t *) + vlib_buffer_get_current + (p0))->payload_length); + d0 = + pool_elt_at_index (map_main.domains, + vnet_buffer (p0)->map_t.map_domain_index); + _ip6_map_t_icmp (d0, p0, &error0); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG; + } + + if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + len0); + } + else + { + next0 = IP6_MAPT_ICMP_NEXT_DROP; + } + + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static uword +ip6_map_t_fragmented (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + +#ifdef IP6_MAP_T_DUAL_LOOP + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + ip6_header_t *ip60, *ip61; + ip6_frag_hdr_t *frag0, *frag1; + ip4_header_t *ip40, *ip41; + u16 frag_id0, frag_offset0, frag_id1, frag_offset1; + u8 frag_more0, frag_more1; + u32 next0, next1; + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip60 = vlib_buffer_get_current (p0); + ip61 = vlib_buffer_get_current (p1); + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.frag_offset); + frag1 = + (ip6_frag_hdr_t *) u8_ptr_add (ip61, + vnet_buffer (p1)->map_t. + v6.frag_offset); + ip40 = + (ip4_header_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.l4_offset - sizeof (*ip40)); + ip41 = + (ip4_header_t *) u8_ptr_add (ip61, + vnet_buffer (p1)->map_t. + v6.l4_offset - sizeof (*ip40)); + vlib_buffer_advance (p0, + vnet_buffer (p0)->map_t.v6.l4_offset - + sizeof (*ip40)); + vlib_buffer_advance (p1, + vnet_buffer (p1)->map_t.v6.l4_offset - + sizeof (*ip40)); + + frag_id0 = frag_id_6to4 (frag0->identification); + frag_id1 = frag_id_6to4 (frag1->identification); + frag_more0 = ip6_frag_hdr_more (frag0); + frag_more1 = ip6_frag_hdr_more (frag1); + frag_offset0 = ip6_frag_hdr_offset (frag0); + frag_offset1 = ip6_frag_hdr_offset (frag1); + + ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; + ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr; + ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; + ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr; + ip40->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip41->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip40->tos = ip6_translate_tos (ip60); + ip41->tos = ip6_translate_tos (ip61); + ip40->length = u16_net_add (ip60->payload_length, + sizeof (*ip40) - + vnet_buffer (p0)->map_t.v6.l4_offset + + sizeof (*ip60)); + ip41->length = + u16_net_add (ip61->payload_length, + sizeof (*ip40) - + vnet_buffer (p1)->map_t.v6.l4_offset + + sizeof (*ip60)); + ip40->fragment_id = frag_id0; + ip41->fragment_id = frag_id1; + ip40->flags_and_fragment_offset = + clib_host_to_net_u16 (frag_offset0 | + (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS + : 0)); + ip41->flags_and_fragment_offset = + clib_host_to_net_u16 (frag_offset1 | + (frag_more1 ? IP4_HEADER_FLAG_MORE_FRAGMENTS + : 0)); + ip40->ttl = ip60->hop_limit; + ip41->ttl = ip61->hop_limit; + ip40->protocol = + (vnet_buffer (p0)->map_t.v6.l4_protocol == + IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)-> + map_t.v6.l4_protocol; + ip41->protocol = + (vnet_buffer (p1)->map_t.v6.l4_protocol == + IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p1)-> + map_t.v6.l4_protocol; + ip40->checksum = ip4_header_checksum (ip40); + ip41->checksum = ip4_header_checksum (ip41); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } + + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, + to_next, n_left_to_next, pi0, pi1, + next0, next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip6_header_t *ip60; + ip6_frag_hdr_t *frag0; + ip4_header_t *ip40; + u16 frag_id0; + u8 frag_more0; + u16 frag_offset0; + u32 next0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.frag_offset); + ip40 = + (ip4_header_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.l4_offset - sizeof (*ip40)); + vlib_buffer_advance (p0, + vnet_buffer (p0)->map_t.v6.l4_offset - + sizeof (*ip40)); + + frag_id0 = frag_id_6to4 (frag0->identification); + frag_more0 = ip6_frag_hdr_more (frag0); + frag_offset0 = ip6_frag_hdr_offset (frag0); + + ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; + ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; + ip40->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip40->tos = ip6_translate_tos (ip60); + ip40->length = u16_net_add (ip60->payload_length, + sizeof (*ip40) - + vnet_buffer (p0)->map_t.v6.l4_offset + + sizeof (*ip60)); + ip40->fragment_id = frag_id0; + ip40->flags_and_fragment_offset = + clib_host_to_net_u16 (frag_offset0 | + (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS + : 0)); + ip40->ttl = ip60->hop_limit; + ip40->protocol = + (vnet_buffer (p0)->map_t.v6.l4_protocol == + IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)-> + map_t.v6.l4_protocol; + ip40->checksum = ip4_header_checksum (ip40); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static uword +ip6_map_t_tcp_udp (vlib_main_t * vm, + vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + +#ifdef IP6_MAP_T_DUAL_LOOP + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + ip6_header_t *ip60, *ip61; + ip_csum_t csum0, csum1; + ip4_header_t *ip40, *ip41; + u16 fragment_id0, flags0, *checksum0, + fragment_id1, flags1, *checksum1; + ip6_mapt_tcp_udp_next_t next0, next1; + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip60 = vlib_buffer_get_current (p0); + ip61 = vlib_buffer_get_current (p1); + ip40 = + (ip4_header_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.l4_offset - sizeof (*ip40)); + ip41 = + (ip4_header_t *) u8_ptr_add (ip61, + vnet_buffer (p1)->map_t. + v6.l4_offset - sizeof (*ip40)); + vlib_buffer_advance (p0, + vnet_buffer (p0)->map_t.v6.l4_offset - + sizeof (*ip40)); + vlib_buffer_advance (p1, + vnet_buffer (p1)->map_t.v6.l4_offset - + sizeof (*ip40)); + checksum0 = + (u16 *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.checksum_offset); + checksum1 = + (u16 *) u8_ptr_add (ip61, + vnet_buffer (p1)->map_t.checksum_offset); + + csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]); + csum1 = ip_csum_sub_even (*checksum1, ip61->src_address.as_u64[0]); + csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]); + csum1 = ip_csum_sub_even (csum1, ip61->src_address.as_u64[1]); + csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]); + csum1 = ip_csum_sub_even (csum0, ip61->dst_address.as_u64[0]); + csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]); + csum1 = ip_csum_sub_even (csum1, ip61->dst_address.as_u64[1]); + csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr); + csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.daddr); + csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr); + csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.saddr); + *checksum0 = ip_csum_fold (csum0); + *checksum1 = ip_csum_fold (csum1); + + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset)) + { + ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer + (p0)-> + map_t. + v6.frag_offset); + fragment_id0 = frag_id_6to4 (hdr->identification); + flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + } + else + { + fragment_id0 = 0; + flags0 = 0; + } + + if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset)) + { + ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip61, + vnet_buffer + (p1)-> + map_t. + v6.frag_offset); + fragment_id1 = frag_id_6to4 (hdr->identification); + flags1 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + } + else + { + fragment_id1 = 0; + flags1 = 0; + } + + ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; + ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr; + ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; + ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr; + ip40->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip41->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip40->tos = ip6_translate_tos (ip60); + ip41->tos = ip6_translate_tos (ip61); + ip40->length = u16_net_add (ip60->payload_length, + sizeof (*ip40) + sizeof (*ip60) - + vnet_buffer (p0)->map_t.v6.l4_offset); + ip41->length = + u16_net_add (ip61->payload_length, + sizeof (*ip40) + sizeof (*ip60) - + vnet_buffer (p1)->map_t.v6.l4_offset); + ip40->fragment_id = fragment_id0; + ip41->fragment_id = fragment_id1; + ip40->flags_and_fragment_offset = flags0; + ip41->flags_and_fragment_offset = flags1; + ip40->ttl = ip60->hop_limit; + ip41->ttl = ip61->hop_limit; + ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol; + ip41->protocol = vnet_buffer (p1)->map_t.v6.l4_protocol; + ip40->checksum = ip4_header_checksum (ip40); + ip41->checksum = ip4_header_checksum (ip41); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } + + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } + + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, next0, + next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip6_header_t *ip60; + u16 *checksum0; + ip_csum_t csum0; + ip4_header_t *ip40; + u16 fragment_id0; + u16 flags0; + ip6_mapt_tcp_udp_next_t next0; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + ip40 = + (ip4_header_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.l4_offset - sizeof (*ip40)); + vlib_buffer_advance (p0, + vnet_buffer (p0)->map_t.v6.l4_offset - + sizeof (*ip40)); + checksum0 = + (u16 *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.checksum_offset); + + //TODO: This can probably be optimized + csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]); + csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]); + csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]); + csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]); + csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr); + csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr); + *checksum0 = ip_csum_fold (csum0); + + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset)) + { + //Only the first fragment + ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer + (p0)-> + map_t. + v6.frag_offset); + fragment_id0 = frag_id_6to4 (hdr->identification); + flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + } + else + { + fragment_id0 = 0; + flags0 = 0; + } + + ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; + ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; + ip40->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip40->tos = ip6_translate_tos (ip60); + ip40->length = u16_net_add (ip60->payload_length, + sizeof (*ip40) + sizeof (*ip60) - + vnet_buffer (p0)->map_t.v6.l4_offset); + ip40->fragment_id = fragment_id0; + ip40->flags_and_fragment_offset = flags0; + ip40->ttl = ip60->hop_limit; + ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol; + ip40->checksum = ip4_header_checksum (ip40); + + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } + + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static_always_inline void +ip6_map_t_classify (vlib_buffer_t * p0, ip6_header_t * ip60, + map_domain_t * d0, i32 * src_port0, + u8 * error0, ip6_mapt_next_t * next0, + u32 l4_len0, ip6_frag_hdr_t * frag0) +{ + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && + ip6_frag_hdr_offset (frag0))) + { + *next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED; + if (d0->ea_bits_len == 0 && d0->rules) + { + *src_port0 = 0; + } + else + { + *src_port0 = ip6_map_fragment_get (ip60, frag0, d0); + *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED; + } + } + else + if (PREDICT_TRUE + (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) + { + *error0 = + l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0; + vnet_buffer (p0)->map_t.checksum_offset = + vnet_buffer (p0)->map_t.v6.l4_offset + 16; + *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; + *src_port0 = + (i32) * + ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); + } + else + if (PREDICT_TRUE + (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) + { + *error0 = + l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0; + vnet_buffer (p0)->map_t.checksum_offset = + vnet_buffer (p0)->map_t.v6.l4_offset + 6; + *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; + *src_port0 = + (i32) * + ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); + } + else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) + { + *error0 = + l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0; + *next0 = IP6_MAPT_NEXT_MAPT_ICMP; + if (d0->ea_bits_len == 0 && d0->rules) + { + *src_port0 = 0; + } + else + if (((icmp46_header_t *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.v6.l4_offset))->code == + ICMP6_echo_reply + || ((icmp46_header_t *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.v6.l4_offset))->code == + ICMP6_echo_request) + { + *src_port0 = + (i32) * + ((u16 *) + u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6)); + } + } + else + { + //TODO: In case of 1:1 mapping, it might be possible to do something with those packets. + *error0 = MAP_ERROR_BAD_PROTOCOL; + } +} + +static uword +ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_t_node.index); + vlib_combined_counter_main_t *cm = map_main.domain_counters; + u32 cpu_index = os_get_cpu_number (); + + from = vlib_frame_vector_args (frame); + n_left_from = frame->n_vectors; + next_index = node->cached_next_index; + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + +#ifdef IP6_MAP_T_DUAL_LOOP + while (n_left_from >= 4 && n_left_to_next >= 2) + { + u32 pi0, pi1; + vlib_buffer_t *p0, *p1; + ip6_header_t *ip60, *ip61; + u8 error0, error1; + ip6_mapt_next_t next0, next1; + u32 l4_len0, l4_len1; + i32 src_port0, src_port1; + map_domain_t *d0, *d1; + ip6_frag_hdr_t *frag0, *frag1; + u32 saddr0, saddr1; + next0 = next1 = 0; //Because compiler whines + + pi0 = to_next[0] = from[0]; + pi1 = to_next[1] = from[1]; + from += 2; + n_left_from -= 2; + to_next += 2; + n_left_to_next -= 2; + + error0 = MAP_ERROR_NONE; + error1 = MAP_ERROR_NONE; + + p0 = vlib_get_buffer (vm, pi0); + p1 = vlib_get_buffer (vm, pi1); + ip60 = vlib_buffer_get_current (p0); + ip61 = vlib_buffer_get_current (p1); + + saddr0 = map_get_ip4 (&ip60->src_address); + saddr1 = map_get_ip4 (&ip61->src_address); + d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & saddr0, + &vnet_buffer (p0)->map_t.map_domain_index, + &error0); + d1 = + ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & saddr1, + &vnet_buffer (p1)->map_t.map_domain_index, + &error1); + + vnet_buffer (p0)->map_t.v6.saddr = saddr0; + vnet_buffer (p1)->map_t.v6.saddr = saddr1; + vnet_buffer (p0)->map_t.v6.daddr = + ip6_map_t_embedded_address (d0, &ip60->dst_address); + vnet_buffer (p1)->map_t.v6.daddr = + ip6_map_t_embedded_address (d1, &ip61->dst_address); + vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; + vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0; + + if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length, + &(vnet_buffer (p0)->map_t. + v6.l4_protocol), + &(vnet_buffer (p0)->map_t. + v6.l4_offset), + &(vnet_buffer (p0)->map_t. + v6.frag_offset)))) + { + error0 = MAP_ERROR_MALFORMED; + next0 = IP6_MAPT_NEXT_DROP; + } + + if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length, + &(vnet_buffer (p1)->map_t. + v6.l4_protocol), + &(vnet_buffer (p1)->map_t. + v6.l4_offset), + &(vnet_buffer (p1)->map_t. + v6.frag_offset)))) + { + error1 = MAP_ERROR_MALFORMED; + next1 = IP6_MAPT_NEXT_DROP; + } + + src_port0 = src_port1 = -1; + l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) + + sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset; + l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) + + sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset; + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.frag_offset); + frag1 = + (ip6_frag_hdr_t *) u8_ptr_add (ip61, + vnet_buffer (p1)->map_t. + v6.frag_offset); + + ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0, + l4_len0, frag0); + ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1, + l4_len1, frag1); + + if (PREDICT_FALSE + ((src_port0 != -1) + && (ip60->src_address.as_u64[0] != + map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr, + src_port0) + || ip60->src_address.as_u64[1] != map_get_sfx_net (d0, + vnet_buffer + (p0)->map_t.v6.saddr, + src_port0)))) + { + error0 = MAP_ERROR_SEC_CHECK; + } + + if (PREDICT_FALSE + ((src_port1 != -1) + && (ip61->src_address.as_u64[0] != + map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr, + src_port1) + || ip61->src_address.as_u64[1] != map_get_sfx_net (d1, + vnet_buffer + (p1)->map_t.v6.saddr, + src_port1)))) + { + error1 = MAP_ERROR_SEC_CHECK; + } + + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && + !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) + u8_ptr_add (ip60, + vnet_buffer + (p0)->map_t. + v6.frag_offset))) + && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) + && (error0 == MAP_ERROR_NONE)) + { + ip6_map_fragment_cache (ip60, + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer + (p0)->map_t. + v6.frag_offset), + d0, src_port0); + } + + if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset && + !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) + u8_ptr_add (ip61, + vnet_buffer + (p1)->map_t. + v6.frag_offset))) + && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) + && (error1 == MAP_ERROR_NONE)) + { + ip6_map_fragment_cache (ip61, + (ip6_frag_hdr_t *) u8_ptr_add (ip61, + vnet_buffer + (p1)->map_t. + v6.frag_offset), + d1, src_port1); + } + + if (PREDICT_TRUE + (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip60->payload_length)); + } + + if (PREDICT_TRUE + (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + vnet_buffer (p1)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip61->payload_length)); + } + + next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0; + next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1; + p0->error = error_node->errors[error0]; + p1->error = error_node->errors[error1]; + vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, + n_left_to_next, pi0, pi1, next0, + next1); + } +#endif + + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0; + vlib_buffer_t *p0; + ip6_header_t *ip60; + u8 error0; + u32 l4_len0; + i32 src_port0; + map_domain_t *d0; + ip6_frag_hdr_t *frag0; + ip6_mapt_next_t next0 = 0; + u32 saddr; + + pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + error0 = MAP_ERROR_NONE; + + p0 = vlib_get_buffer (vm, pi0); + ip60 = vlib_buffer_get_current (p0); + //Save saddr in a different variable to not overwrite ip.adj_index + saddr = map_get_ip4 (&ip60->src_address); + d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], + (ip4_address_t *) & saddr, + &vnet_buffer (p0)->map_t.map_domain_index, + &error0); + + //FIXME: What if d0 is null + vnet_buffer (p0)->map_t.v6.saddr = saddr; + vnet_buffer (p0)->map_t.v6.daddr = + ip6_map_t_embedded_address (d0, &ip60->dst_address); + vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; + + if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length, + &(vnet_buffer (p0)->map_t. + v6.l4_protocol), + &(vnet_buffer (p0)->map_t. + v6.l4_offset), + &(vnet_buffer (p0)->map_t. + v6.frag_offset)))) + { + error0 = MAP_ERROR_MALFORMED; + next0 = IP6_MAPT_NEXT_DROP; + } + + src_port0 = -1; + l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) + + sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset; + frag0 = + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer (p0)->map_t. + v6.frag_offset); + + + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && + ip6_frag_hdr_offset (frag0))) + { + src_port0 = ip6_map_fragment_get (ip60, frag0, d0); + error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY; + next0 = IP6_MAPT_NEXT_MAPT_FRAGMENTED; + } + else + if (PREDICT_TRUE + (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) + { + error0 = + l4_len0 < + sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0; + vnet_buffer (p0)->map_t.checksum_offset = + vnet_buffer (p0)->map_t.v6.l4_offset + 16; + next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; + src_port0 = + (i32) * + ((u16 *) + u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); + } + else + if (PREDICT_TRUE + (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) + { + error0 = + l4_len0 < + sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0; + vnet_buffer (p0)->map_t.checksum_offset = + vnet_buffer (p0)->map_t.v6.l4_offset + 6; + next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP; + src_port0 = + (i32) * + ((u16 *) + u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset)); + } + else if (vnet_buffer (p0)->map_t.v6.l4_protocol == + IP_PROTOCOL_ICMP6) + { + error0 = + l4_len0 < + sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0; + next0 = IP6_MAPT_NEXT_MAPT_ICMP; + if (((icmp46_header_t *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.v6.l4_offset))->code == + ICMP6_echo_reply + || ((icmp46_header_t *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.v6. + l4_offset))->code == ICMP6_echo_request) + src_port0 = + (i32) * + ((u16 *) + u8_ptr_add (ip60, + vnet_buffer (p0)->map_t.v6.l4_offset + 6)); + } + else + { + //TODO: In case of 1:1 mapping, it might be possible to do something with those packets. + error0 = MAP_ERROR_BAD_PROTOCOL; + } + + //Security check + if (PREDICT_FALSE + ((src_port0 != -1) + && (ip60->src_address.as_u64[0] != + map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr, + src_port0) + || ip60->src_address.as_u64[1] != map_get_sfx_net (d0, + vnet_buffer + (p0)->map_t.v6.saddr, + src_port0)))) + { + //Security check when src_port0 is not zero (non-first fragment, UDP or TCP) + error0 = MAP_ERROR_SEC_CHECK; + } + + //Fragmented first packet needs to be cached for following packets + if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset && + !ip6_frag_hdr_offset ((ip6_frag_hdr_t *) + u8_ptr_add (ip60, + vnet_buffer + (p0)->map_t. + v6.frag_offset))) + && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) + && (error0 == MAP_ERROR_NONE)) + { + ip6_map_fragment_cache (ip60, + (ip6_frag_hdr_t *) u8_ptr_add (ip60, + vnet_buffer + (p0)->map_t. + v6.frag_offset), + d0, src_port0); + } + + if (PREDICT_TRUE + (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) + { + vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, + cpu_index, + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip60->payload_length)); + } + + next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0; + p0->error = error_node->errors[error0]; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, pi0, + next0); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + } + return frame->n_vectors; +} + +static char *map_t_error_strings[] = { +#define _(sym,string) string, + foreach_map_error +#undef _ +}; + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_t_fragmented_node) = { + .function = ip6_map_t_fragmented, + .name = "ip6-map-t-fragmented", + .vector_size = sizeof (u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT, + .next_nodes = { + [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_t_icmp_node) = { + .function = ip6_map_t_icmp, + .name = "ip6-map-t-icmp", + .vector_size = sizeof (u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP6_MAPT_ICMP_N_NEXT, + .next_nodes = { + [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_ICMP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_t_tcp_udp_node) = { + .function = ip6_map_t_tcp_udp, + .name = "ip6-map-t-tcp-udp", + .vector_size = sizeof (u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT, + .next_nodes = { + [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup", + [IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG] = IP4_FRAG_NODE_NAME, + [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* *INDENT-OFF* */ +VLIB_REGISTER_NODE(ip6_map_t_node) = { + .function = ip6_map_t, + .name = "ip6-map-t", + .vector_size = sizeof(u32), + .format_trace = format_map_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + + .n_errors = MAP_N_ERROR, + .error_strings = map_t_error_strings, + + .n_next_nodes = IP6_MAPT_N_NEXT, + .next_nodes = { + [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp", + [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp", + [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented", + [IP6_MAPT_NEXT_DROP] = "error-drop", + }, +}; +/* *INDENT-ON* */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/map.api b/src/vnet/map/map.api new file mode 100644 index 00000000..4e4be85e --- /dev/null +++ b/src/vnet/map/map.api @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/** \brief Add MAP domains + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param ip6_prefix - Rule IPv6 prefix + @param ip4_prefix - Rule IPv4 prefix + @param ip6_src - MAP domain IPv6 BR address / Tunnel source + @param ip6_prefix_len - Rule IPv6 prefix length + @param ip4_prefix_len - Rule IPv4 prefix length + @param ea_bits_len - Embedded Address bits length + @param psid_offset - Port Set Identifider (PSID) offset + @param psid_length - PSID length + @param is_translation - MAP-E / MAP-T + @param mtu - MTU +*/ +define map_add_domain +{ + u32 client_index; + u32 context; + u8 ip6_prefix[16]; + u8 ip4_prefix[4]; + u8 ip6_src[16]; + u8 ip6_prefix_len; + u8 ip4_prefix_len; + u8 ip6_src_prefix_len; + u8 ea_bits_len; + u8 psid_offset; + u8 psid_length; + u8 is_translation; + u16 mtu; +}; + +/** \brief Reply for MAP domain add + @param context - returned sender context, to match reply w/ request + @param index - MAP domain index + @param retval - return code +*/ +define map_add_domain_reply +{ + u32 context; + u32 index; + i32 retval; +}; + +/** \brief Delete MAP domain + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param index - MAP Domain index +*/ +define map_del_domain +{ + u32 client_index; + u32 context; + u32 index; +}; + +/** \brief Reply for MAP domain del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define map_del_domain_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Add or Delete MAP rule from a domain (Only used for shared IPv4 per subscriber) + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request + @param index - MAP Domain index + @param is_add - If 1 add rule, if 0 delete rule + @param ip6_dst - MAP CE IPv6 address + @param psid - Rule PSID +*/ +define map_add_del_rule +{ + u32 client_index; + u32 context; + u32 index; + u32 is_add; + u8 ip6_dst[16]; + u16 psid; +}; + +/** \brief Reply for MAP rule add/del + @param context - returned sender context, to match reply w/ request + @param retval - return code +*/ +define map_add_del_rule_reply +{ + u32 context; + i32 retval; +}; + +/** \brief Get list of map domains + @param client_index - opaque cookie to identify the sender +*/ +define map_domain_dump +{ + u32 client_index; + u32 context; +}; + +define map_domain_details +{ + u32 context; + u32 domain_index; + u8 ip6_prefix[16]; + u8 ip4_prefix[4]; + u8 ip6_src[16]; + u8 ip6_prefix_len; + u8 ip4_prefix_len; + u8 ip6_src_len; + u8 ea_bits_len; + u8 psid_offset; + u8 psid_length; + u8 flags; + u16 mtu; + u8 is_translation; +}; + +define map_rule_dump +{ + u32 client_index; + u32 context; + u32 domain_index; +}; + +define map_rule_details +{ + u32 context; + u8 ip6_dst[16]; + u16 psid; +}; + +/** \brief Request for a single block of summary stats + @param client_index - opaque cookie to identify the sender + @param context - sender context, to match reply w/ request +*/ +define map_summary_stats +{ + u32 client_index; + u32 context; +}; + +/** \brief Reply for map_summary_stats request + @param context - sender context, to match reply w/ request + @param retval - return code for request + @param total_bindings - + @param total_pkts - + @param total_ip4_fragments - + @param total_security_check - +*/ +define map_summary_stats_reply +{ + u32 context; + i32 retval; + u64 total_bindings; + u64 total_pkts[2]; + u64 total_bytes[2]; + u64 total_ip4_fragments; + u64 total_security_check[2]; +}; diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c new file mode 100644 index 00000000..aeec6a94 --- /dev/null +++ b/src/vnet/map/map.c @@ -0,0 +1,2166 @@ +/* + * map.c : MAP support + * + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include "map.h" + +#ifdef __SSE4_2__ +static inline u32 +crc_u32 (u32 data, u32 value) +{ + __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] + "rm" (data)); + return value; +} +#else +#include + +static inline u32 +crc_u32 (u32 data, u32 value) +{ + u64 tmp = ((u64) data << 32) | (u64) value; + return (u32) clib_xxhash (tmp); +} +#endif + +/* + * This code supports the following MAP modes: + * + * Algorithmic Shared IPv4 address (ea_bits_len > 0): + * ea_bits_len + ip4_prefix > 32 + * psid_length > 0, ip6_prefix < 64, ip4_prefix <= 32 + * Algorithmic Full IPv4 address (ea_bits_len > 0): + * ea_bits_len + ip4_prefix = 32 + * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32 + * Algorithmic IPv4 prefix (ea_bits_len > 0): + * ea_bits_len + ip4_prefix < 32 + * psid_length = 0, ip6_prefix < 64, ip4_prefix <= 32 + * + * Independent Shared IPv4 address (ea_bits_len = 0): + * ip4_prefix = 32 + * psid_length > 0 + * Rule IPv6 address = 128, Rule PSID Set + * Independent Full IPv4 address (ea_bits_len = 0): + * ip4_prefix = 32 + * psid_length = 0, ip6_prefix = 128 + * Independent IPv4 prefix (ea_bits_len = 0): + * ip4_prefix < 32 + * psid_length = 0, ip6_prefix = 128 + * + */ + +/* + * This code supports MAP-T: + * + * With DMR prefix length equal to 96. + * + */ + + +i32 +ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len) +{ + //TODO: use buffer length + if (ip->ip_version_and_header_length != 0x45 || + ip4_get_fragment_offset (ip)) + return -1; + + if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || + (ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (ip + 1); + return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port; + } + else if (ip->protocol == IP_PROTOCOL_ICMP) + { + icmp46_header_t *icmp = (void *) (ip + 1); + if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) + { + return *((u16 *) (icmp + 1)); + } + else if (clib_net_to_host_u16 (ip->length) >= 64) + { + ip = (ip4_header_t *) (icmp + 2); + if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || + (ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (ip + 1); + return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port; + } + else if (ip->protocol == IP_PROTOCOL_ICMP) + { + icmp46_header_t *icmp = (void *) (ip + 1); + if (icmp->type == ICMP4_echo_request || + icmp->type == ICMP4_echo_reply) + { + return *((u16 *) (icmp + 1)); + } + } + } + } + return -1; +} + +i32 +ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len) +{ + u8 l4_protocol; + u16 l4_offset; + u16 frag_offset; + u8 *l4; + + if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset)) + return -1; + + //TODO: Use buffer length + + if (frag_offset && + ip6_frag_hdr_offset (((ip6_frag_hdr_t *) + u8_ptr_add (ip6, frag_offset)))) + return -1; //Can't deal with non-first fragment for now + + l4 = u8_ptr_add (ip6, l4_offset); + if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP) + { + return (dir == + MAP_SENDER) ? ((udp_header_t *) (l4))->src_port : ((udp_header_t + *) + (l4))->dst_port; + } + else if (l4_protocol == IP_PROTOCOL_ICMP6) + { + icmp46_header_t *icmp = (icmp46_header_t *) (l4); + if (icmp->type == ICMP6_echo_request) + { + return (dir == MAP_SENDER) ? ((u16 *) (icmp))[2] : -1; + } + else if (icmp->type == ICMP6_echo_reply) + { + return (dir == MAP_SENDER) ? -1 : ((u16 *) (icmp))[2]; + } + } + return -1; +} + + +int +map_create_domain (ip4_address_t * ip4_prefix, + u8 ip4_prefix_len, + ip6_address_t * ip6_prefix, + u8 ip6_prefix_len, + ip6_address_t * ip6_src, + u8 ip6_src_len, + u8 ea_bits_len, + u8 psid_offset, + u8 psid_length, u32 * map_domain_index, u16 mtu, u8 flags) +{ + u8 suffix_len, suffix_shift; + map_main_t *mm = &map_main; + dpo_id_t dpo_v4 = DPO_INVALID; + dpo_id_t dpo_v6 = DPO_INVALID; + fib_node_index_t fei; + map_domain_t *d; + + /* Sanity check on the src prefix length */ + if (flags & MAP_DOMAIN_TRANSLATION) + { + if (ip6_src_len != 96) + { + clib_warning ("MAP-T only supports ip6_src_len = 96 for now."); + return -1; + } + } + else + { + if (ip6_src_len != 128) + { + clib_warning + ("MAP-E requires a BR address, not a prefix (ip6_src_len should " + "be 128)."); + return -1; + } + } + + /* How many, and which bits to grab from the IPv4 DA */ + if (ip4_prefix_len + ea_bits_len < 32) + { + flags |= MAP_DOMAIN_PREFIX; + suffix_shift = 32 - ip4_prefix_len - ea_bits_len; + suffix_len = ea_bits_len; + } + else + { + suffix_shift = 0; + suffix_len = 32 - ip4_prefix_len; + } + + /* EA bits must be within the first 64 bits */ + if (ea_bits_len > 0 && ((ip6_prefix_len + ea_bits_len) > 64 || + ip6_prefix_len + suffix_len + psid_length > 64)) + { + clib_warning + ("Embedded Address bits must be within the first 64 bits of " + "the IPv6 prefix"); + return -1; + } + + /* Get domain index */ + pool_get_aligned (mm->domains, d, CLIB_CACHE_LINE_BYTES); + memset (d, 0, sizeof (*d)); + *map_domain_index = d - mm->domains; + + /* Init domain struct */ + d->ip4_prefix.as_u32 = ip4_prefix->as_u32; + d->ip4_prefix_len = ip4_prefix_len; + d->ip6_prefix = *ip6_prefix; + d->ip6_prefix_len = ip6_prefix_len; + d->ip6_src = *ip6_src; + d->ip6_src_len = ip6_src_len; + d->ea_bits_len = ea_bits_len; + d->psid_offset = psid_offset; + d->psid_length = psid_length; + d->mtu = mtu; + d->flags = flags; + d->suffix_shift = suffix_shift; + d->suffix_mask = (1 << suffix_len) - 1; + + d->psid_shift = 16 - psid_length - psid_offset; + d->psid_mask = (1 << d->psid_length) - 1; + d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length; + + /* MAP data-plane object */ + if (d->flags & MAP_DOMAIN_TRANSLATION) + map_t_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4); + else + map_dpo_create (DPO_PROTO_IP4, *map_domain_index, &dpo_v4); + + /* Create ip4 route */ + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP4, + .fp_len = d->ip4_prefix_len, + .fp_addr = { + .ip4 = d->ip4_prefix, + } + , + }; + fib_table_entry_special_dpo_add (0, &pfx, + FIB_SOURCE_MAP, + FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v4); + dpo_reset (&dpo_v4); + + /* + * Multiple MAP domains may share same source IPv6 TEP. + * In this case the route will exist and be MAP sourced. + * Find the adj (if any) already contributed and modify it + */ + fib_prefix_t pfx6 = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = d->ip6_src_len, + .fp_addr = { + .ip6 = d->ip6_src, + } + , + }; + fei = fib_table_lookup_exact_match (0, &pfx6); + + if (FIB_NODE_INDEX_INVALID != fei) + { + dpo_id_t dpo = DPO_INVALID; + + if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_MAP, &dpo)) + { + /* + * modify the existing MAP to indicate it's shared + * skip to route add. + */ + const dpo_id_t *md_dpo; + map_dpo_t *md; + + ASSERT (DPO_LOAD_BALANCE == dpo.dpoi_type); + + md_dpo = load_balance_get_bucket (dpo.dpoi_index, 0); + md = map_dpo_get (md_dpo->dpoi_index); + + md->md_domain = ~0; + dpo_copy (&dpo_v6, md_dpo); + dpo_reset (&dpo); + + goto route_add; + } + } + + if (d->flags & MAP_DOMAIN_TRANSLATION) + map_t_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); + else + map_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); + +route_add: + /* + * Create ip6 route. This is a reference counted add. If the prefix + * already exists and is MAP sourced, it is now MAP source n+1 times + * and will need to be removed n+1 times. + */ + fib_table_entry_special_dpo_add (0, &pfx6, + FIB_SOURCE_MAP, + FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v6); + dpo_reset (&dpo_v6); + + /* Validate packet/byte counters */ + map_domain_counter_lock (mm); + int i; + for (i = 0; i < vec_len (mm->simple_domain_counters); i++) + { + vlib_validate_simple_counter (&mm->simple_domain_counters[i], + *map_domain_index); + vlib_zero_simple_counter (&mm->simple_domain_counters[i], + *map_domain_index); + } + for (i = 0; i < vec_len (mm->domain_counters); i++) + { + vlib_validate_combined_counter (&mm->domain_counters[i], + *map_domain_index); + vlib_zero_combined_counter (&mm->domain_counters[i], *map_domain_index); + } + map_domain_counter_unlock (mm); + + return 0; +} + +/* + * map_delete_domain + */ +int +map_delete_domain (u32 map_domain_index) +{ + map_main_t *mm = &map_main; + map_domain_t *d; + + if (pool_is_free_index (mm->domains, map_domain_index)) + { + clib_warning ("MAP domain delete: domain does not exist: %d", + map_domain_index); + return -1; + } + + d = pool_elt_at_index (mm->domains, map_domain_index); + + fib_prefix_t pfx = { + .fp_proto = FIB_PROTOCOL_IP4, + .fp_len = d->ip4_prefix_len, + .fp_addr = { + .ip4 = d->ip4_prefix, + } + , + }; + fib_table_entry_special_remove (0, &pfx, FIB_SOURCE_MAP); + + fib_prefix_t pfx6 = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = d->ip6_src_len, + .fp_addr = { + .ip6 = d->ip6_src, + } + , + }; + fib_table_entry_special_remove (0, &pfx6, FIB_SOURCE_MAP); + + /* Deleting rules */ + if (d->rules) + clib_mem_free (d->rules); + + pool_put (mm->domains, d); + + return 0; +} + +int +map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, + u8 is_add) +{ + map_domain_t *d; + map_main_t *mm = &map_main; + + if (pool_is_free_index (mm->domains, map_domain_index)) + { + clib_warning ("MAP rule: domain does not exist: %d", map_domain_index); + return -1; + } + d = pool_elt_at_index (mm->domains, map_domain_index); + + /* Rules are only used in 1:1 independent case */ + if (d->ea_bits_len > 0) + return (-1); + + if (!d->rules) + { + u32 l = (0x1 << d->psid_length) * sizeof (ip6_address_t); + d->rules = clib_mem_alloc_aligned (l, CLIB_CACHE_LINE_BYTES); + if (!d->rules) + return -1; + memset (d->rules, 0, l); + } + + if (psid >= (0x1 << d->psid_length)) + { + clib_warning ("MAP rule: PSID outside bounds: %d [%d]", psid, + 0x1 << d->psid_length); + return -1; + } + + if (is_add) + { + d->rules[psid] = *tep; + } + else + { + memset (&d->rules[psid], 0, sizeof (ip6_address_t)); + } + return 0; +} + +#ifdef MAP_SKIP_IP6_LOOKUP +static void +map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6) +{ + map_main_t *mm = &map_main; + ip6_main_t *im6 = &ip6_main; + + if (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0) + { + // FIXME NOT an ADJ + mm->adj6_index = ip6_fib_table_fwding_lookup (im6, 0, ip6); + clib_warning ("FIB lookup results in: %u", mm->adj6_index); + } + if (ip4->as_u32 != 0) + { + // FIXME NOT an ADJ + mm->adj4_index = ip4_fib_table_lookup_lb (0, ip4); + clib_warning ("FIB lookup results in: %u", mm->adj4_index); + } +} +#endif + +static clib_error_t * +map_security_check_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "off")) + mm->sec_check = false; + else if (unformat (line_input, "on")) + mm->sec_check = true; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + return 0; +} + +static clib_error_t * +map_security_check_frag_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "off")) + mm->sec_check_frag = false; + else if (unformat (line_input, "on")) + mm->sec_check_frag = true; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + return 0; +} + +static clib_error_t * +map_add_domain_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + ip4_address_t ip4_prefix; + ip6_address_t ip6_prefix; + ip6_address_t ip6_src; + u32 ip6_prefix_len = 0, ip4_prefix_len = 0, map_domain_index, ip6_src_len; + u32 num_m_args = 0; + /* Optional arguments */ + u32 ea_bits_len = 0, psid_offset = 0, psid_length = 0; + u32 mtu = 0; + u8 flags = 0; + ip6_src_len = 128; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat + (line_input, "ip4-pfx %U/%d", unformat_ip4_address, &ip4_prefix, + &ip4_prefix_len)) + num_m_args++; + else + if (unformat + (line_input, "ip6-pfx %U/%d", unformat_ip6_address, &ip6_prefix, + &ip6_prefix_len)) + num_m_args++; + else + if (unformat + (line_input, "ip6-src %U/%d", unformat_ip6_address, &ip6_src, + &ip6_src_len)) + num_m_args++; + else + if (unformat + (line_input, "ip6-src %U", unformat_ip6_address, &ip6_src)) + num_m_args++; + else if (unformat (line_input, "ea-bits-len %d", &ea_bits_len)) + num_m_args++; + else if (unformat (line_input, "psid-offset %d", &psid_offset)) + num_m_args++; + else if (unformat (line_input, "psid-len %d", &psid_length)) + num_m_args++; + else if (unformat (line_input, "mtu %d", &mtu)) + num_m_args++; + else if (unformat (line_input, "map-t")) + flags |= MAP_DOMAIN_TRANSLATION; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + if (num_m_args < 3) + return clib_error_return (0, "mandatory argument(s) missing"); + + map_create_domain (&ip4_prefix, ip4_prefix_len, + &ip6_prefix, ip6_prefix_len, &ip6_src, ip6_src_len, + ea_bits_len, psid_offset, psid_length, &map_domain_index, + mtu, flags); + + return 0; +} + +static clib_error_t * +map_del_domain_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + u32 num_m_args = 0; + u32 map_domain_index; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "index %d", &map_domain_index)) + num_m_args++; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + if (num_m_args != 1) + return clib_error_return (0, "mandatory argument(s) missing"); + + map_delete_domain (map_domain_index); + + return 0; +} + +static clib_error_t * +map_add_rule_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + ip6_address_t tep; + u32 num_m_args = 0; + u32 psid = 0, map_domain_index; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "index %d", &map_domain_index)) + num_m_args++; + else if (unformat (line_input, "psid %d", &psid)) + num_m_args++; + else + if (unformat (line_input, "ip6-dst %U", unformat_ip6_address, &tep)) + num_m_args++; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + if (num_m_args != 3) + return clib_error_return (0, "mandatory argument(s) missing"); + + if (map_add_del_psid (map_domain_index, psid, &tep, 1) != 0) + { + return clib_error_return (0, "Failing to add Mapping Rule"); + } + return 0; +} + +#if MAP_SKIP_IP6_LOOKUP +static clib_error_t * +map_pre_resolve_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + ip4_address_t ip4nh; + ip6_address_t ip6nh; + map_main_t *mm = &map_main; + + memset (&ip4nh, 0, sizeof (ip4nh)); + memset (&ip6nh, 0, sizeof (ip6nh)); + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "ip4-nh %U", unformat_ip4_address, &ip4nh)) + mm->preresolve_ip4 = ip4nh; + else + if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh)) + mm->preresolve_ip6 = ip6nh; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + map_pre_resolve (&ip4nh, &ip6nh); + + return 0; +} +#endif + +static clib_error_t * +map_icmp_relay_source_address_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + ip4_address_t icmp_src_address; + map_main_t *mm = &map_main; + + mm->icmp4_src_address.as_u32 = 0; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat + (line_input, "%U", unformat_ip4_address, &icmp_src_address)) + mm->icmp4_src_address = icmp_src_address; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + return 0; +} + +static clib_error_t * +map_icmp_unreachables_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + int num_m_args = 0; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + num_m_args++; + if (unformat (line_input, "on")) + mm->icmp6_enabled = true; + else if (unformat (line_input, "off")) + mm->icmp6_enabled = false; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + + if (num_m_args != 1) + return clib_error_return (0, "mandatory argument(s) missing"); + + return 0; +} + +static clib_error_t * +map_fragment_command_fn (vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "inner")) + mm->frag_inner = true; + else if (unformat (line_input, "outer")) + mm->frag_inner = false; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + return 0; +} + +static clib_error_t * +map_fragment_df_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "on")) + mm->frag_ignore_df = true; + else if (unformat (line_input, "off")) + mm->frag_ignore_df = false; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + return 0; +} + +static clib_error_t * +map_traffic_class_command_fn (vlib_main_t * vm, + unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + u32 tc = 0; + + mm->tc_copy = false; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "copy")) + mm->tc_copy = true; + else if (unformat (line_input, "%x", &tc)) + mm->tc = tc & 0xff; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + return 0; +} + +static u8 * +format_map_domain (u8 * s, va_list * args) +{ + map_domain_t *d = va_arg (*args, map_domain_t *); + bool counters = va_arg (*args, int); + map_main_t *mm = &map_main; + ip6_address_t ip6_prefix; + + if (d->rules) + memset (&ip6_prefix, 0, sizeof (ip6_prefix)); + else + ip6_prefix = d->ip6_prefix; + + s = format (s, + "[%d] ip4-pfx %U/%d ip6-pfx %U/%d ip6-src %U/%d ea_bits_len %d psid-offset %d psid-len %d mtu %d %s", + d - mm->domains, + format_ip4_address, &d->ip4_prefix, d->ip4_prefix_len, + format_ip6_address, &ip6_prefix, d->ip6_prefix_len, + format_ip6_address, &d->ip6_src, d->ip6_src_len, + d->ea_bits_len, d->psid_offset, d->psid_length, d->mtu, + (d->flags & MAP_DOMAIN_TRANSLATION) ? "map-t" : ""); + + if (counters) + { + map_domain_counter_lock (mm); + vlib_counter_t v; + vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_TX], + d - mm->domains, &v); + s = format (s, " TX: %lld/%lld", v.packets, v.bytes); + vlib_get_combined_counter (&mm->domain_counters[MAP_DOMAIN_COUNTER_RX], + d - mm->domains, &v); + s = format (s, " RX: %lld/%lld", v.packets, v.bytes); + map_domain_counter_unlock (mm); + } + s = format (s, "\n"); + + if (d->rules) + { + int i; + ip6_address_t dst; + for (i = 0; i < (0x1 << d->psid_length); i++) + { + dst = d->rules[i]; + if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0) + continue; + s = format (s, + " rule psid: %d ip6-dst %U\n", i, format_ip6_address, + &dst); + } + } + return s; +} + +static u8 * +format_map_ip4_reass (u8 * s, va_list * args) +{ + map_main_t *mm = &map_main; + map_ip4_reass_t *r = va_arg (*args, map_ip4_reass_t *); + map_ip4_reass_key_t *k = &r->key; + f64 now = vlib_time_now (mm->vlib_main); + f64 lifetime = (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000); + f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1; + s = format (s, + "ip4-reass src=%U dst=%U protocol=%d identifier=%d port=%d lifetime=%.3lf\n", + format_ip4_address, &k->src.as_u8, format_ip4_address, + &k->dst.as_u8, k->protocol, + clib_net_to_host_u16 (k->fragment_id), + (r->port >= 0) ? clib_net_to_host_u16 (r->port) : -1, dt); + return s; +} + +static u8 * +format_map_ip6_reass (u8 * s, va_list * args) +{ + map_main_t *mm = &map_main; + map_ip6_reass_t *r = va_arg (*args, map_ip6_reass_t *); + map_ip6_reass_key_t *k = &r->key; + f64 now = vlib_time_now (mm->vlib_main); + f64 lifetime = (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000); + f64 dt = (r->ts + lifetime > now) ? (r->ts + lifetime - now) : -1; + s = format (s, + "ip6-reass src=%U dst=%U protocol=%d identifier=%d lifetime=%.3lf\n", + format_ip6_address, &k->src.as_u8, format_ip6_address, + &k->dst.as_u8, k->protocol, + clib_net_to_host_u32 (k->fragment_id), dt); + return s; +} + +static clib_error_t * +show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + map_main_t *mm = &map_main; + map_domain_t *d; + bool counters = false; + u32 map_domain_index = ~0; + + /* Get a line of input. */ + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "counters")) + counters = true; + else if (unformat (line_input, "index %d", &map_domain_index)) + ; + else + return clib_error_return (0, "unknown input `%U'", + format_unformat_error, input); + } + unformat_free (line_input); + + if (pool_elts (mm->domains) == 0) + vlib_cli_output (vm, "No MAP domains are configured..."); + + if (map_domain_index == ~0) + { + /* *INDENT-OFF* */ + pool_foreach(d, mm->domains, ({vlib_cli_output(vm, "%U", format_map_domain, d, counters);})); + /* *INDENT-ON* */ + } + else + { + if (pool_is_free_index (mm->domains, map_domain_index)) + { + return clib_error_return (0, "MAP domain does not exists %d", + map_domain_index); + } + + d = pool_elt_at_index (mm->domains, map_domain_index); + vlib_cli_output (vm, "%U", format_map_domain, d, counters); + } + + return 0; +} + +static clib_error_t * +show_map_fragments_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + map_main_t *mm = &map_main; + map_ip4_reass_t *f4; + map_ip6_reass_t *f6; + + /* *INDENT-OFF* */ + pool_foreach(f4, mm->ip4_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip4_reass, f4);})); + /* *INDENT-ON* */ + /* *INDENT-OFF* */ + pool_foreach(f6, mm->ip6_reass_pool, ({vlib_cli_output (vm, "%U", format_map_ip6_reass, f6);})); + /* *INDENT-ON* */ + return (0); +} + +u64 +map_error_counter_get (u32 node_index, map_error_t map_error) +{ + vlib_main_t *vm = vlib_get_main (); + vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, node_index); + vlib_error_main_t *em = &vm->error_main; + vlib_error_t e = error_node->errors[map_error]; + vlib_node_t *n = vlib_get_node (vm, node_index); + u32 ci; + + ci = vlib_error_get_code (e); + ASSERT (ci < n->n_errors); + ci += n->error_heap_index; + + return (em->counters[ci]); +} + +static clib_error_t * +show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + map_main_t *mm = &map_main; + map_domain_t *d; + int domains = 0, rules = 0, domaincount = 0, rulecount = 0; + if (pool_elts (mm->domains) == 0) + vlib_cli_output (vm, "No MAP domains are configured..."); + + /* *INDENT-OFF* */ + pool_foreach(d, mm->domains, ({ + if (d->rules) { + rulecount+= 0x1 << d->psid_length; + rules += sizeof(ip6_address_t) * 0x1 << d->psid_length; + } + domains += sizeof(*d); + domaincount++; + })); + /* *INDENT-ON* */ + + vlib_cli_output (vm, "MAP domains structure: %d\n", sizeof (map_domain_t)); + vlib_cli_output (vm, "MAP domains: %d (%d bytes)\n", domaincount, domains); + vlib_cli_output (vm, "MAP rules: %d (%d bytes)\n", rulecount, rules); + vlib_cli_output (vm, "Total: %d bytes)\n", rules + domains); + +#if MAP_SKIP_IP6_LOOKUP + vlib_cli_output (vm, + "MAP pre-resolve: IP6 next-hop: %U (%u), IP4 next-hop: %U (%u)\n", + format_ip6_address, &mm->preresolve_ip6, mm->adj6_index, + format_ip4_address, &mm->preresolve_ip4, mm->adj4_index); +#endif + + if (mm->tc_copy) + vlib_cli_output (vm, "MAP traffic-class: copy"); + else + vlib_cli_output (vm, "MAP traffic-class: %x", mm->tc); + + vlib_cli_output (vm, + "MAP IPv6 inbound security check: %s, fragmented packet security check: %s", + mm->sec_check ? "enabled" : "disabled", + mm->sec_check_frag ? "enabled" : "disabled"); + + vlib_cli_output (vm, "ICMP-relay IPv4 source address: %U\n", + format_ip4_address, &mm->icmp4_src_address); + vlib_cli_output (vm, "ICMP6 unreachables sent for unmatched packets: %s\n", + mm->icmp6_enabled ? "enabled" : "disabled"); + vlib_cli_output (vm, "Inner fragmentation: %s\n", + mm->frag_inner ? "enabled" : "disabled"); + vlib_cli_output (vm, "Fragment packets regardless of DF flag: %s\n", + mm->frag_ignore_df ? "enabled" : "disabled"); + + /* + * Counters + */ + vlib_combined_counter_main_t *cm = mm->domain_counters; + u64 total_pkts[MAP_N_DOMAIN_COUNTER]; + u64 total_bytes[MAP_N_DOMAIN_COUNTER]; + int which, i; + vlib_counter_t v; + + memset (total_pkts, 0, sizeof (total_pkts)); + memset (total_bytes, 0, sizeof (total_bytes)); + + map_domain_counter_lock (mm); + vec_foreach (cm, mm->domain_counters) + { + which = cm - mm->domain_counters; + + for (i = 0; i < vec_len (cm->maxi); i++) + { + vlib_get_combined_counter (cm, i, &v); + total_pkts[which] += v.packets; + total_bytes[which] += v.bytes; + } + } + map_domain_counter_unlock (mm); + + vlib_cli_output (vm, "Encapsulated packets: %lld bytes: %lld\n", + total_pkts[MAP_DOMAIN_COUNTER_TX], + total_bytes[MAP_DOMAIN_COUNTER_TX]); + vlib_cli_output (vm, "Decapsulated packets: %lld bytes: %lld\n", + total_pkts[MAP_DOMAIN_COUNTER_RX], + total_bytes[MAP_DOMAIN_COUNTER_RX]); + + vlib_cli_output (vm, "ICMP relayed packets: %d\n", + vlib_get_simple_counter (&mm->icmp_relayed, 0)); + + return 0; +} + +static clib_error_t * +map_params_reass_command_fn (vlib_main_t * vm, unformat_input_t * input, + vlib_cli_command_t * cmd) +{ + unformat_input_t _line_input, *line_input = &_line_input; + u32 lifetime = ~0; + f64 ht_ratio = (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1); + u32 pool_size = ~0; + u64 buffers = ~(0ull); + u8 ip4 = 0, ip6 = 0; + + if (!unformat_user (input, unformat_line_input, line_input)) + return 0; + + while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) + { + if (unformat (line_input, "lifetime %u", &lifetime)) + ; + else if (unformat (line_input, "ht-ratio %lf", &ht_ratio)) + ; + else if (unformat (line_input, "pool-size %u", &pool_size)) + ; + else if (unformat (line_input, "buffers %llu", &buffers)) + ; + else if (unformat (line_input, "ip4")) + ip4 = 1; + else if (unformat (line_input, "ip6")) + ip6 = 1; + else + { + unformat_free (line_input); + return clib_error_return (0, "invalid input"); + } + } + unformat_free (line_input); + + if (!ip4 && !ip6) + return clib_error_return (0, "must specify ip4 and/or ip6"); + + if (ip4) + { + if (pool_size != ~0 && pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX) + return clib_error_return (0, "invalid ip4-reass pool-size ( > %d)", + MAP_IP4_REASS_CONF_POOL_SIZE_MAX); + if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1) + && ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX) + return clib_error_return (0, "invalid ip4-reass ht-ratio ( > %d)", + MAP_IP4_REASS_CONF_HT_RATIO_MAX); + if (lifetime != ~0 && lifetime > MAP_IP4_REASS_CONF_LIFETIME_MAX) + return clib_error_return (0, "invalid ip4-reass lifetime ( > %d)", + MAP_IP4_REASS_CONF_LIFETIME_MAX); + if (buffers != ~(0ull) && buffers > MAP_IP4_REASS_CONF_BUFFERS_MAX) + return clib_error_return (0, "invalid ip4-reass buffers ( > %ld)", + MAP_IP4_REASS_CONF_BUFFERS_MAX); + } + + if (ip6) + { + if (pool_size != ~0 && pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX) + return clib_error_return (0, "invalid ip6-reass pool-size ( > %d)", + MAP_IP6_REASS_CONF_POOL_SIZE_MAX); + if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1) + && ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX) + return clib_error_return (0, "invalid ip6-reass ht-log2len ( > %d)", + MAP_IP6_REASS_CONF_HT_RATIO_MAX); + if (lifetime != ~0 && lifetime > MAP_IP6_REASS_CONF_LIFETIME_MAX) + return clib_error_return (0, "invalid ip6-reass lifetime ( > %d)", + MAP_IP6_REASS_CONF_LIFETIME_MAX); + if (buffers != ~(0ull) && buffers > MAP_IP6_REASS_CONF_BUFFERS_MAX) + return clib_error_return (0, "invalid ip6-reass buffers ( > %ld)", + MAP_IP6_REASS_CONF_BUFFERS_MAX); + } + + if (ip4) + { + u32 reass = 0, packets = 0; + if (pool_size != ~0) + { + if (map_ip4_reass_conf_pool_size (pool_size, &reass, &packets)) + { + vlib_cli_output (vm, "Could not set ip4-reass pool-size"); + } + else + { + vlib_cli_output (vm, + "Setting ip4-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)", + reass, packets); + } + } + if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)) + { + if (map_ip4_reass_conf_ht_ratio (ht_ratio, &reass, &packets)) + { + vlib_cli_output (vm, "Could not set ip4-reass ht-log2len"); + } + else + { + vlib_cli_output (vm, + "Setting ip4-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)", + reass, packets); + } + } + if (lifetime != ~0) + { + if (map_ip4_reass_conf_lifetime (lifetime)) + vlib_cli_output (vm, "Could not set ip4-reass lifetime"); + else + vlib_cli_output (vm, "Setting ip4-reass lifetime"); + } + if (buffers != ~(0ull)) + { + if (map_ip4_reass_conf_buffers (buffers)) + vlib_cli_output (vm, "Could not set ip4-reass buffers"); + else + vlib_cli_output (vm, "Setting ip4-reass buffers"); + } + + if (map_main.ip4_reass_conf_buffers > + map_main.ip4_reass_conf_pool_size * + MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) + { + vlib_cli_output (vm, + "Note: 'ip4-reass buffers' > pool-size * max-fragments-per-reassembly."); + } + } + + if (ip6) + { + u32 reass = 0, packets = 0; + if (pool_size != ~0) + { + if (map_ip6_reass_conf_pool_size (pool_size, &reass, &packets)) + { + vlib_cli_output (vm, "Could not set ip6-reass pool-size"); + } + else + { + vlib_cli_output (vm, + "Setting ip6-reass pool-size (destroyed-reassembly=%u , dropped-fragments=%u)", + reass, packets); + } + } + if (ht_ratio != (MAP_IP4_REASS_CONF_HT_RATIO_MAX + 1)) + { + if (map_ip6_reass_conf_ht_ratio (ht_ratio, &reass, &packets)) + { + vlib_cli_output (vm, "Could not set ip6-reass ht-log2len"); + } + else + { + vlib_cli_output (vm, + "Setting ip6-reass ht-log2len (destroyed-reassembly=%u , dropped-fragments=%u)", + reass, packets); + } + } + if (lifetime != ~0) + { + if (map_ip6_reass_conf_lifetime (lifetime)) + vlib_cli_output (vm, "Could not set ip6-reass lifetime"); + else + vlib_cli_output (vm, "Setting ip6-reass lifetime"); + } + if (buffers != ~(0ull)) + { + if (map_ip6_reass_conf_buffers (buffers)) + vlib_cli_output (vm, "Could not set ip6-reass buffers"); + else + vlib_cli_output (vm, "Setting ip6-reass buffers"); + } + + if (map_main.ip6_reass_conf_buffers > + map_main.ip6_reass_conf_pool_size * + MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) + { + vlib_cli_output (vm, + "Note: 'ip6-reass buffers' > pool-size * max-fragments-per-reassembly."); + } + } + + return 0; +} + + +/* + * packet trace format function + */ +u8 * +format_map_trace (u8 * s, va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + map_trace_t *t = va_arg (*args, map_trace_t *); + u32 map_domain_index = t->map_domain_index; + u16 port = t->port; + + s = + format (s, "MAP domain index: %d L4 port: %u", map_domain_index, + clib_net_to_host_u16 (port)); + + return s; +} + +static_always_inline map_ip4_reass_t * +map_ip4_reass_lookup (map_ip4_reass_key_t * k, u32 bucket, f64 now) +{ + map_main_t *mm = &map_main; + u32 ri = mm->ip4_reass_hash_table[bucket]; + while (ri != MAP_REASS_INDEX_NONE) + { + map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri); + if (r->key.as_u64[0] == k->as_u64[0] && + r->key.as_u64[1] == k->as_u64[1] && + now < r->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000)) + { + return r; + } + ri = r->bucket_next; + } + return NULL; +} + +#define map_ip4_reass_pool_index(r) (r - map_main.ip4_reass_pool) + +void +map_ip4_reass_free (map_ip4_reass_t * r, u32 ** pi_to_drop) +{ + map_main_t *mm = &map_main; + map_ip4_reass_get_fragments (r, pi_to_drop); + + // Unlink in hash bucket + map_ip4_reass_t *r2 = NULL; + u32 r2i = mm->ip4_reass_hash_table[r->bucket]; + while (r2i != map_ip4_reass_pool_index (r)) + { + ASSERT (r2i != MAP_REASS_INDEX_NONE); + r2 = pool_elt_at_index (mm->ip4_reass_pool, r2i); + r2i = r2->bucket_next; + } + if (r2) + { + r2->bucket_next = r->bucket_next; + } + else + { + mm->ip4_reass_hash_table[r->bucket] = r->bucket_next; + } + + // Unlink in list + if (r->fifo_next == map_ip4_reass_pool_index (r)) + { + mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE; + } + else + { + if (mm->ip4_reass_fifo_last == map_ip4_reass_pool_index (r)) + mm->ip4_reass_fifo_last = r->fifo_prev; + pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next = + r->fifo_next; + pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev = + r->fifo_prev; + } + + pool_put (mm->ip4_reass_pool, r); + mm->ip4_reass_allocated--; +} + +map_ip4_reass_t * +map_ip4_reass_get (u32 src, u32 dst, u16 fragment_id, + u8 protocol, u32 ** pi_to_drop) +{ + map_ip4_reass_t *r; + map_main_t *mm = &map_main; + map_ip4_reass_key_t k = {.src.data_u32 = src, + .dst.data_u32 = dst, + .fragment_id = fragment_id, + .protocol = protocol + }; + + u32 h = 0; + h = crc_u32 (k.as_u32[0], h); + h = crc_u32 (k.as_u32[1], h); + h = crc_u32 (k.as_u32[2], h); + h = crc_u32 (k.as_u32[3], h); + h = h >> (32 - mm->ip4_reass_ht_log2len); + + f64 now = vlib_time_now (mm->vlib_main); + + //Cache garbage collection + while (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + map_ip4_reass_t *last = + pool_elt_at_index (mm->ip4_reass_pool, mm->ip4_reass_fifo_last); + if (last->ts + (((f64) mm->ip4_reass_conf_lifetime_ms) / 1000) < now) + map_ip4_reass_free (last, pi_to_drop); + else + break; + } + + if ((r = map_ip4_reass_lookup (&k, h, now))) + return r; + + if (mm->ip4_reass_allocated >= mm->ip4_reass_conf_pool_size) + return NULL; + + pool_get (mm->ip4_reass_pool, r); + mm->ip4_reass_allocated++; + int i; + for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + r->fragments[i] = ~0; + + u32 ri = map_ip4_reass_pool_index (r); + + //Link in new bucket + r->bucket = h; + r->bucket_next = mm->ip4_reass_hash_table[h]; + mm->ip4_reass_hash_table[h] = ri; + + //Link in fifo + if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + r->fifo_next = + pool_elt_at_index (mm->ip4_reass_pool, + mm->ip4_reass_fifo_last)->fifo_next; + r->fifo_prev = mm->ip4_reass_fifo_last; + pool_elt_at_index (mm->ip4_reass_pool, r->fifo_prev)->fifo_next = ri; + pool_elt_at_index (mm->ip4_reass_pool, r->fifo_next)->fifo_prev = ri; + } + else + { + r->fifo_next = r->fifo_prev = ri; + mm->ip4_reass_fifo_last = ri; + } + + //Set other fields + r->ts = now; + r->key = k; + r->port = -1; +#ifdef MAP_IP4_REASS_COUNT_BYTES + r->expected_total = 0xffff; + r->forwarded = 0; +#endif + + return r; +} + +int +map_ip4_reass_add_fragment (map_ip4_reass_t * r, u32 pi) +{ + if (map_main.ip4_reass_buffered_counter >= map_main.ip4_reass_conf_buffers) + return -1; + + int i; + for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + if (r->fragments[i] == ~0) + { + r->fragments[i] = pi; + map_main.ip4_reass_buffered_counter++; + return 0; + } + return -1; +} + +static_always_inline map_ip6_reass_t * +map_ip6_reass_lookup (map_ip6_reass_key_t * k, u32 bucket, f64 now) +{ + map_main_t *mm = &map_main; + u32 ri = mm->ip6_reass_hash_table[bucket]; + while (ri != MAP_REASS_INDEX_NONE) + { + map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri); + if (now < r->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) && + r->key.as_u64[0] == k->as_u64[0] && + r->key.as_u64[1] == k->as_u64[1] && + r->key.as_u64[2] == k->as_u64[2] && + r->key.as_u64[3] == k->as_u64[3] && + r->key.as_u64[4] == k->as_u64[4]) + return r; + ri = r->bucket_next; + } + return NULL; +} + +#define map_ip6_reass_pool_index(r) (r - map_main.ip6_reass_pool) + +void +map_ip6_reass_free (map_ip6_reass_t * r, u32 ** pi_to_drop) +{ + map_main_t *mm = &map_main; + int i; + for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + if (r->fragments[i].pi != ~0) + { + vec_add1 (*pi_to_drop, r->fragments[i].pi); + r->fragments[i].pi = ~0; + map_main.ip6_reass_buffered_counter--; + } + + // Unlink in hash bucket + map_ip6_reass_t *r2 = NULL; + u32 r2i = mm->ip6_reass_hash_table[r->bucket]; + while (r2i != map_ip6_reass_pool_index (r)) + { + ASSERT (r2i != MAP_REASS_INDEX_NONE); + r2 = pool_elt_at_index (mm->ip6_reass_pool, r2i); + r2i = r2->bucket_next; + } + if (r2) + { + r2->bucket_next = r->bucket_next; + } + else + { + mm->ip6_reass_hash_table[r->bucket] = r->bucket_next; + } + + // Unlink in list + if (r->fifo_next == map_ip6_reass_pool_index (r)) + { + //Single element in the list, list is now empty + mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE; + } + else + { + if (mm->ip6_reass_fifo_last == map_ip6_reass_pool_index (r)) //First element + mm->ip6_reass_fifo_last = r->fifo_prev; + pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next = + r->fifo_next; + pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev = + r->fifo_prev; + } + + // Free from pool if necessary + pool_put (mm->ip6_reass_pool, r); + mm->ip6_reass_allocated--; +} + +map_ip6_reass_t * +map_ip6_reass_get (ip6_address_t * src, ip6_address_t * dst, u32 fragment_id, + u8 protocol, u32 ** pi_to_drop) +{ + map_ip6_reass_t *r; + map_main_t *mm = &map_main; + map_ip6_reass_key_t k = { + .src = *src, + .dst = *dst, + .fragment_id = fragment_id, + .protocol = protocol + }; + + u32 h = 0; + int i; + for (i = 0; i < 10; i++) + h = crc_u32 (k.as_u32[i], h); + h = h >> (32 - mm->ip6_reass_ht_log2len); + + f64 now = vlib_time_now (mm->vlib_main); + + //Cache garbage collection + while (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + map_ip6_reass_t *last = + pool_elt_at_index (mm->ip6_reass_pool, mm->ip6_reass_fifo_last); + if (last->ts + (((f64) mm->ip6_reass_conf_lifetime_ms) / 1000) < now) + map_ip6_reass_free (last, pi_to_drop); + else + break; + } + + if ((r = map_ip6_reass_lookup (&k, h, now))) + return r; + + if (mm->ip6_reass_allocated >= mm->ip6_reass_conf_pool_size) + return NULL; + + pool_get (mm->ip6_reass_pool, r); + mm->ip6_reass_allocated++; + for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + { + r->fragments[i].pi = ~0; + r->fragments[i].next_data_len = 0; + r->fragments[i].next_data_offset = 0; + } + + u32 ri = map_ip6_reass_pool_index (r); + + //Link in new bucket + r->bucket = h; + r->bucket_next = mm->ip6_reass_hash_table[h]; + mm->ip6_reass_hash_table[h] = ri; + + //Link in fifo + if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + r->fifo_next = + pool_elt_at_index (mm->ip6_reass_pool, + mm->ip6_reass_fifo_last)->fifo_next; + r->fifo_prev = mm->ip6_reass_fifo_last; + pool_elt_at_index (mm->ip6_reass_pool, r->fifo_prev)->fifo_next = ri; + pool_elt_at_index (mm->ip6_reass_pool, r->fifo_next)->fifo_prev = ri; + } + else + { + r->fifo_next = r->fifo_prev = ri; + mm->ip6_reass_fifo_last = ri; + } + + //Set other fields + r->ts = now; + r->key = k; + r->ip4_header.ip_version_and_header_length = 0; +#ifdef MAP_IP6_REASS_COUNT_BYTES + r->expected_total = 0xffff; + r->forwarded = 0; +#endif + return r; +} + +int +map_ip6_reass_add_fragment (map_ip6_reass_t * r, u32 pi, + u16 data_offset, u16 next_data_offset, + u8 * data_start, u16 data_len) +{ + map_ip6_fragment_t *f = NULL, *prev_f = NULL; + u16 copied_len = (data_len > 20) ? 20 : data_len; + + if (map_main.ip6_reass_buffered_counter >= map_main.ip6_reass_conf_buffers) + return -1; + + //Lookup for fragments for the current buffer + //and the one before that + int i; + for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + { + if (data_offset && r->fragments[i].next_data_offset == data_offset) + { + prev_f = &r->fragments[i]; // This is buffer for previous packet + } + else if (r->fragments[i].next_data_offset == next_data_offset) + { + f = &r->fragments[i]; // This is a buffer for the current packet + } + else if (r->fragments[i].next_data_offset == 0) + { //Available + if (f == NULL) + f = &r->fragments[i]; + else if (prev_f == NULL) + prev_f = &r->fragments[i]; + } + } + + if (!f || f->pi != ~0) + return -1; + + if (data_offset) + { + if (!prev_f) + return -1; + + clib_memcpy (prev_f->next_data, data_start, copied_len); + prev_f->next_data_len = copied_len; + prev_f->next_data_offset = data_offset; + } + else + { + if (((ip4_header_t *) data_start)->ip_version_and_header_length != 0x45) + return -1; + + if (r->ip4_header.ip_version_and_header_length == 0) + clib_memcpy (&r->ip4_header, data_start, sizeof (ip4_header_t)); + } + + if (data_len > 20) + { + f->next_data_offset = next_data_offset; + f->pi = pi; + map_main.ip6_reass_buffered_counter++; + } + return 0; +} + +void +map_ip4_reass_reinit (u32 * trashed_reass, u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + int i; + + if (dropped_packets) + *dropped_packets = mm->ip4_reass_buffered_counter; + if (trashed_reass) + *trashed_reass = mm->ip4_reass_allocated; + if (mm->ip4_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + u16 ri = mm->ip4_reass_fifo_last; + do + { + map_ip4_reass_t *r = pool_elt_at_index (mm->ip4_reass_pool, ri); + for (i = 0; i < MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + if (r->fragments[i] != ~0) + map_ip4_drop_pi (r->fragments[i]); + + ri = r->fifo_next; + pool_put (mm->ip4_reass_pool, r); + } + while (ri != mm->ip4_reass_fifo_last); + } + + vec_free (mm->ip4_reass_hash_table); + vec_resize (mm->ip4_reass_hash_table, 1 << mm->ip4_reass_ht_log2len); + for (i = 0; i < (1 << mm->ip4_reass_ht_log2len); i++) + mm->ip4_reass_hash_table[i] = MAP_REASS_INDEX_NONE; + pool_free (mm->ip4_reass_pool); + pool_alloc (mm->ip4_reass_pool, mm->ip4_reass_conf_pool_size); + + mm->ip4_reass_allocated = 0; + mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE; + mm->ip4_reass_buffered_counter = 0; +} + +u8 +map_get_ht_log2len (f32 ht_ratio, u16 pool_size) +{ + u32 desired_size = (u32) (pool_size * ht_ratio); + u8 i; + for (i = 1; i < 31; i++) + if ((1 << i) >= desired_size) + return i; + return 4; +} + +int +map_ip4_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass, + u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + if (ht_ratio > MAP_IP4_REASS_CONF_HT_RATIO_MAX) + return -1; + + map_ip4_reass_lock (); + mm->ip4_reass_conf_ht_ratio = ht_ratio; + mm->ip4_reass_ht_log2len = + map_get_ht_log2len (ht_ratio, mm->ip4_reass_conf_pool_size); + map_ip4_reass_reinit (trashed_reass, dropped_packets); + map_ip4_reass_unlock (); + return 0; +} + +int +map_ip4_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass, + u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + if (pool_size > MAP_IP4_REASS_CONF_POOL_SIZE_MAX) + return -1; + + map_ip4_reass_lock (); + mm->ip4_reass_conf_pool_size = pool_size; + map_ip4_reass_reinit (trashed_reass, dropped_packets); + map_ip4_reass_unlock (); + return 0; +} + +int +map_ip4_reass_conf_lifetime (u16 lifetime_ms) +{ + map_main.ip4_reass_conf_lifetime_ms = lifetime_ms; + return 0; +} + +int +map_ip4_reass_conf_buffers (u32 buffers) +{ + map_main.ip4_reass_conf_buffers = buffers; + return 0; +} + +void +map_ip6_reass_reinit (u32 * trashed_reass, u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + if (dropped_packets) + *dropped_packets = mm->ip6_reass_buffered_counter; + if (trashed_reass) + *trashed_reass = mm->ip6_reass_allocated; + int i; + if (mm->ip6_reass_fifo_last != MAP_REASS_INDEX_NONE) + { + u16 ri = mm->ip6_reass_fifo_last; + do + { + map_ip6_reass_t *r = pool_elt_at_index (mm->ip6_reass_pool, ri); + for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++) + if (r->fragments[i].pi != ~0) + map_ip6_drop_pi (r->fragments[i].pi); + + ri = r->fifo_next; + pool_put (mm->ip6_reass_pool, r); + } + while (ri != mm->ip6_reass_fifo_last); + mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE; + } + + vec_free (mm->ip6_reass_hash_table); + vec_resize (mm->ip6_reass_hash_table, 1 << mm->ip6_reass_ht_log2len); + for (i = 0; i < (1 << mm->ip6_reass_ht_log2len); i++) + mm->ip6_reass_hash_table[i] = MAP_REASS_INDEX_NONE; + pool_free (mm->ip6_reass_pool); + pool_alloc (mm->ip6_reass_pool, mm->ip4_reass_conf_pool_size); + + mm->ip6_reass_allocated = 0; + mm->ip6_reass_buffered_counter = 0; +} + +int +map_ip6_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass, + u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + if (ht_ratio > MAP_IP6_REASS_CONF_HT_RATIO_MAX) + return -1; + + map_ip6_reass_lock (); + mm->ip6_reass_conf_ht_ratio = ht_ratio; + mm->ip6_reass_ht_log2len = + map_get_ht_log2len (ht_ratio, mm->ip6_reass_conf_pool_size); + map_ip6_reass_reinit (trashed_reass, dropped_packets); + map_ip6_reass_unlock (); + return 0; +} + +int +map_ip6_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass, + u32 * dropped_packets) +{ + map_main_t *mm = &map_main; + if (pool_size > MAP_IP6_REASS_CONF_POOL_SIZE_MAX) + return -1; + + map_ip6_reass_lock (); + mm->ip6_reass_conf_pool_size = pool_size; + map_ip6_reass_reinit (trashed_reass, dropped_packets); + map_ip6_reass_unlock (); + return 0; +} + +int +map_ip6_reass_conf_lifetime (u16 lifetime_ms) +{ + map_main.ip6_reass_conf_lifetime_ms = lifetime_ms; + return 0; +} + +int +map_ip6_reass_conf_buffers (u32 buffers) +{ + map_main.ip6_reass_conf_buffers = buffers; + return 0; +} + +/* *INDENT-OFF* */ + +/*? + * Configure MAP reassembly behaviour + * + * @cliexpar + * @cliexstart{map params reassembly} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_ip4_reass_lifetime_command, static) = { + .path = "map params reassembly", + .short_help = "map params reassembly [ip4 | ip6] [lifetime ] " + "[pool-size ] [buffers ] " + "[ht-ratio ]", + .function = map_params_reass_command_fn, +}; + +/*? + * Set or copy the IP TOS/Traffic Class field + * + * @cliexpar + * @cliexstart{map params traffic-class} + * + * This command is used to set the traffic-class field in translated + * or encapsulated packets. If copy is specifed (the default) then the + * traffic-class/TOS field is copied from the original packet to the + * translated / encapsulating header. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_traffic_class_command, static) = { + .path = "map params traffic-class", + .short_help = "map params traffic-class {0x0-0xff | copy}", + .function = map_traffic_class_command_fn, +}; + +/*? + * Bypass IP4/IP6 lookup + * + * @cliexpar + * @cliexstart{map params pre-resolve} + * + * Bypass a second FIB lookup of the translated or encapsulated + * packet, and forward the packet directly to the specified + * next-hop. This optimization trades forwarding flexibility for + * performance. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_pre_resolve_command, static) = { + .path = "map params pre-resolve", + .short_help = " map params pre-resolve {ip4-nh
} " + "| {ip6-nh
}", + .function = map_pre_resolve_command_fn, +}; + +/*? + * Enable or disable the MAP-E inbound security check + * + * @cliexpar + * @cliexstart{map params security-check} + * + * By default, a decapsulated packet's IPv4 source address will be + * verified against the outer header's IPv6 source address. Disabling + * this feature will allow IPv4 source address spoofing. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_security_check_command, static) = { + .path = "map params security-check", + .short_help = "map params security-check on|off", + .function = map_security_check_command_fn, +}; + +/*? + * Specifiy the IPv4 source address used for relayed ICMP error messages + * + * @cliexpar + * @cliexstart{map params icmp source-address} + * + * This command specifies which IPv4 source address (must be local to + * the system), that is used for relayed received IPv6 ICMP error + * messages. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_icmp_relay_source_address_command, static) = { + .path = "map params icmp source-address", + .short_help = "map params icmp source-address ", + .function = map_icmp_relay_source_address_command_fn, +}; + +/*? + * Send IPv6 ICMP unreachables + * + * @cliexpar + * @cliexstart{map params icmp6 unreachables} + * + * Send IPv6 ICMP unreachable messages back if security check fails or + * no MAP domain exists. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_icmp_unreachables_command, static) = { + .path = "map params icmp6 unreachables", + .short_help = "map params icmp6 unreachables {on|off}", + .function = map_icmp_unreachables_command_fn, +}; + +/*? + * Configure MAP fragmentation behaviour + * + * @cliexpar + * @cliexstart{map params fragment} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_fragment_command, static) = { + .path = "map params fragment", + .short_help = "map params fragment inner|outer", + .function = map_fragment_command_fn, +}; + +/*? + * Ignore the IPv4 Don't fragment bit + * + * @cliexpar + * @cliexstart{map params fragment ignore-df} + * + * Allows fragmentation of the IPv4 packet even if the DF bit is + * set. The choice between inner or outer fragmentation of tunnel + * packets is complicated. The benefit of inner fragmentation is that + * the ultimate endpoint must reassemble, instead of the tunnel + * endpoint. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_fragment_df_command, static) = { + .path = "map params fragment ignore-df", + .short_help = "map params fragment ignore-df on|off", + .function = map_fragment_df_command_fn, +}; + +/*? + * Specifiy if the inbound security check should be done on fragments + * + * @cliexpar + * @cliexstart{map params security-check fragments} + * + * Typically the inbound on-decapsulation security check is only done + * on the first packet. The packet that contains the L4 + * information. While a security check on every fragment is possible, + * it has a cost. State must be created on the first fragment. + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_security_check_frag_command, static) = { + .path = "map params security-check fragments", + .short_help = "map params security-check fragments on|off", + .function = map_security_check_frag_command_fn, +}; + +/*? + * Add MAP domain + * + * @cliexpar + * @cliexstart{map add domain} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_add_domain_command, static) = { + .path = "map add domain", + .short_help = "map add domain ip4-pfx ip6-pfx " + "ip6-src ea-bits-len psid-offset psid-len " + "[map-t] [mtu ]", + .function = map_add_domain_command_fn, +}; + +/*? + * Add MAP rule to a domain + * + * @cliexpar + * @cliexstart{map add rule} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_add_rule_command, static) = { + .path = "map add rule", + .short_help = "map add rule index psid ip6-dst ", + .function = map_add_rule_command_fn, +}; + +/*? + * Delete MAP domain + * + * @cliexpar + * @cliexstart{map del domain} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(map_del_command, static) = { + .path = "map del domain", + .short_help = "map del domain index ", + .function = map_del_domain_command_fn, +}; + +/*? + * Show MAP domains + * + * @cliexpar + * @cliexstart{show map domain} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(show_map_domain_command, static) = { + .path = "show map domain", + .short_help = "show map domain index [counters]", + .function = show_map_domain_command_fn, +}; + +/*? + * Show MAP statistics + * + * @cliexpar + * @cliexstart{show map stats} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(show_map_stats_command, static) = { + .path = "show map stats", + .short_help = "show map stats", + .function = show_map_stats_command_fn, +}; + +/*? + * Show MAP fragmentation information + * + * @cliexpar + * @cliexstart{show map fragments} + * @cliexend + ?*/ +VLIB_CLI_COMMAND(show_map_fragments_command, static) = { + .path = "show map fragments", + .short_help = "show map fragments", + .function = show_map_fragments_command_fn, +}; +/* *INDENT-ON* */ + +/* + * map_init + */ +clib_error_t * +map_init (vlib_main_t * vm) +{ + map_main_t *mm = &map_main; + mm->vnet_main = vnet_get_main (); + mm->vlib_main = vm; + +#ifdef MAP_SKIP_IP6_LOOKUP + memset (&mm->preresolve_ip4, 0, sizeof (mm->preresolve_ip4)); + memset (&mm->preresolve_ip6, 0, sizeof (mm->preresolve_ip6)); + mm->adj4_index = 0; + mm->adj6_index = 0; +#endif + + /* traffic class */ + mm->tc = 0; + mm->tc_copy = true; + + /* Inbound security check */ + mm->sec_check = true; + mm->sec_check_frag = false; + + /* ICMP6 Type 1, Code 5 for security check failure */ + mm->icmp6_enabled = false; + + /* Inner or outer fragmentation */ + mm->frag_inner = false; + mm->frag_ignore_df = false; + + vec_validate (mm->domain_counters, MAP_N_DOMAIN_COUNTER - 1); + mm->domain_counters[MAP_DOMAIN_COUNTER_RX].name = "rx"; + mm->domain_counters[MAP_DOMAIN_COUNTER_TX].name = "tx"; + + vlib_validate_simple_counter (&mm->icmp_relayed, 0); + vlib_zero_simple_counter (&mm->icmp_relayed, 0); + + /* IP4 virtual reassembly */ + mm->ip4_reass_hash_table = 0; + mm->ip4_reass_pool = 0; + mm->ip4_reass_lock = + clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT; + mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT; + mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT; + mm->ip4_reass_conf_buffers = MAP_IP4_REASS_BUFFERS_DEFAULT; + mm->ip4_reass_ht_log2len = + map_get_ht_log2len (mm->ip4_reass_conf_ht_ratio, + mm->ip4_reass_conf_pool_size); + mm->ip4_reass_fifo_last = MAP_REASS_INDEX_NONE; + map_ip4_reass_reinit (NULL, NULL); + + /* IP6 virtual reassembly */ + mm->ip6_reass_hash_table = 0; + mm->ip6_reass_pool = 0; + mm->ip6_reass_lock = + clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT; + mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT; + mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT; + mm->ip6_reass_conf_buffers = MAP_IP6_REASS_BUFFERS_DEFAULT; + mm->ip6_reass_ht_log2len = + map_get_ht_log2len (mm->ip6_reass_conf_ht_ratio, + mm->ip6_reass_conf_pool_size); + mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE; + map_ip6_reass_reinit (NULL, NULL); + + map_dpo_module_init (); + + return 0; +} + +VLIB_INIT_FUNCTION (map_init); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/map.h b/src/vnet/map/map.h new file mode 100644 index 00000000..f446b739 --- /dev/null +++ b/src/vnet/map/map.h @@ -0,0 +1,591 @@ +/* + * Copyright (c) 2015 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAP_SKIP_IP6_LOOKUP 1 + +typedef enum +{ + MAP_SENDER, + MAP_RECEIVER +} map_dir_e; + +int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len, + ip6_address_t * ip6_prefix, u8 ip6_prefix_len, + ip6_address_t * ip6_src, u8 ip6_src_len, + u8 ea_bits_len, u8 psid_offset, u8 psid_length, + u32 * map_domain_index, u16 mtu, u8 flags); +int map_delete_domain (u32 map_domain_index); +int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, + u8 is_add); +u8 *format_map_trace (u8 * s, va_list * args); +i32 ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len); +i32 ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len); +u16 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir); + +typedef enum __attribute__ ((__packed__)) +{ + MAP_DOMAIN_PREFIX = 1 << 0, MAP_DOMAIN_TRANSLATION = 1 << 1, // The domain uses MAP-T +} map_domain_flags_e; + +/** + * IP4 reassembly logic: + * One virtually reassembled flow requires a map_ip4_reass_t structure in order + * to keep the first-fragment port number and, optionally, cache out of sequence + * packets. + * There are up to MAP_IP4_REASS_MAX_REASSEMBLY such structures. + * When in use, those structures are stored in a hash table of MAP_IP4_REASS_BUCKETS buckets. + * When a new structure needs to be used, it is allocated from available ones. + * If there is no structure available, the oldest in use is selected and used if and + * only if it was first allocated more than MAP_IP4_REASS_LIFETIME seconds ago. + * In case no structure can be allocated, the fragment is dropped. + */ + +#define MAP_IP4_REASS_LIFETIME_DEFAULT (100) /* ms */ +#define MAP_IP4_REASS_HT_RATIO_DEFAULT (1.0) +#define MAP_IP4_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures +#define MAP_IP4_REASS_BUFFERS_DEFAULT 2048 + +#define MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 // Number of fragment per reassembly + +#define MAP_IP6_REASS_LIFETIME_DEFAULT (100) /* ms */ +#define MAP_IP6_REASS_HT_RATIO_DEFAULT (1.0) +#define MAP_IP6_REASS_POOL_SIZE_DEFAULT 1024 // Number of reassembly structures +#define MAP_IP6_REASS_BUFFERS_DEFAULT 2048 + +#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY 5 + +#define MAP_IP6_REASS_COUNT_BYTES +#define MAP_IP4_REASS_COUNT_BYTES + +//#define IP6_MAP_T_OVERRIDE_TOS 0 + +/* + * This structure _MUST_ be no larger than a single cache line (64 bytes). + * If more space is needed make a union of ip6_prefix and *rules, those are mutually exclusive. + */ +typedef struct +{ + ip6_address_t ip6_src; + ip6_address_t ip6_prefix; + ip6_address_t *rules; + u32 suffix_mask; + ip4_address_t ip4_prefix; + u16 psid_mask; + u16 mtu; + map_domain_flags_e flags; + u8 ip6_prefix_len; + u8 ip6_src_len; + u8 ea_bits_len; + u8 psid_offset; + u8 psid_length; + + /* helpers */ + u8 psid_shift; + u8 suffix_shift; + u8 ea_shift; + + /* not used by forwarding */ + u8 ip4_prefix_len; +} map_domain_t; + +STATIC_ASSERT ((sizeof (map_domain_t) <= CLIB_CACHE_LINE_BYTES), + "MAP domain fits in one cacheline"); + +#define MAP_REASS_INDEX_NONE ((u16)0xffff) + +/* + * Hash key, padded out to 16 bytes for fast compare + */ +/* *INDENT-OFF* */ +typedef union { + CLIB_PACKED (struct { + ip4_address_t src; + ip4_address_t dst; + u16 fragment_id; + u8 protocol; + }); + u64 as_u64[2]; + u32 as_u32[4]; +} map_ip4_reass_key_t; +/* *INDENT-ON* */ + +typedef struct +{ + map_ip4_reass_key_t key; + f64 ts; +#ifdef MAP_IP4_REASS_COUNT_BYTES + u16 expected_total; + u16 forwarded; +#endif + i32 port; + u16 bucket; + u16 bucket_next; + u16 fifo_prev; + u16 fifo_next; + u32 fragments[MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]; +} map_ip4_reass_t; + +/* + * MAP domain counters + */ +typedef enum +{ + /* Simple counters */ + MAP_DOMAIN_IPV4_FRAGMENT = 0, + /* Combined counters */ + MAP_DOMAIN_COUNTER_RX = 0, + MAP_DOMAIN_COUNTER_TX, + MAP_N_DOMAIN_COUNTER +} map_domain_counter_t; + +/* + * main_main_t + */ +/* *INDENT-OFF* */ +typedef union { + CLIB_PACKED (struct { + ip6_address_t src; + ip6_address_t dst; + u32 fragment_id; + u8 protocol; + }); + u64 as_u64[5]; + u32 as_u32[10]; +} map_ip6_reass_key_t; +/* *INDENT-OFF* */ + +typedef struct { + u32 pi; //Cached packet or ~0 + u16 next_data_offset; //The data offset of the additional 20 bytes or ~0 + u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment) + u8 next_data[20]; //The 20 additional bytes +} map_ip6_fragment_t; + +typedef struct { + map_ip6_reass_key_t key; + f64 ts; +#ifdef MAP_IP6_REASS_COUNT_BYTES + u16 expected_total; + u16 forwarded; +#endif + u16 bucket; //What hash bucket this element is linked in + u16 bucket_next; + u16 fifo_prev; + u16 fifo_next; + ip4_header_t ip4_header; + map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]; +} map_ip6_reass_t; + +typedef struct { + /* pool of MAP domains */ + map_domain_t *domains; + + /* MAP Domain packet/byte counters indexed by map domain index */ + vlib_simple_counter_main_t *simple_domain_counters; + vlib_combined_counter_main_t *domain_counters; + volatile u32 *counter_lock; + +#ifdef MAP_SKIP_IP6_LOOKUP + /* pre-presolve */ + u32 adj6_index, adj4_index; + ip4_address_t preresolve_ip4; + ip6_address_t preresolve_ip6; +#endif + + /* Traffic class: zero, copy (~0) or fixed value */ + u8 tc; + bool tc_copy; + + bool sec_check; /* Inbound security check */ + bool sec_check_frag; /* Inbound security check for (subsequent) fragments */ + bool icmp6_enabled; /* Send destination unreachable for security check failure */ + + /* ICMPv6 -> ICMPv4 relay parameters */ + ip4_address_t icmp4_src_address; + vlib_simple_counter_main_t icmp_relayed; + + /* convenience */ + vlib_main_t *vlib_main; + vnet_main_t *vnet_main; + + /* + * IPv4 encap and decap reassembly + */ + /* Configuration */ + f32 ip4_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) + u16 ip4_reass_conf_pool_size; //Max number of allocated reass structures + u16 ip4_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms + u32 ip4_reass_conf_buffers; //Maximum number of buffers used by ip4 reassembly + + /* Runtime */ + map_ip4_reass_t *ip4_reass_pool; + u8 ip4_reass_ht_log2len; //Hash table size is 2^log2len + u16 ip4_reass_allocated; + u16 *ip4_reass_hash_table; + u16 ip4_reass_fifo_last; + volatile u32 *ip4_reass_lock; + + /* Counters */ + u32 ip4_reass_buffered_counter; + + bool frag_inner; /* Inner or outer fragmentation */ + bool frag_ignore_df; /* Fragment (outer) packet even if DF is set */ + + /* + * IPv6 decap reassembly + */ + /* Configuration */ + f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) + u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures + u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms + u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly + + /* Runtime */ + map_ip6_reass_t *ip6_reass_pool; + u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len + u16 ip6_reass_allocated; + u16 *ip6_reass_hash_table; + u16 ip6_reass_fifo_last; + volatile u32 *ip6_reass_lock; + + /* Counters */ + u32 ip6_reass_buffered_counter; + +} map_main_t; + +/* + * MAP Error counters/messages + */ +#define foreach_map_error \ + /* Must be first. */ \ + _(NONE, "valid MAP packets") \ + _(BAD_PROTOCOL, "bad protocol") \ + _(SEC_CHECK, "security check failed") \ + _(ENCAP_SEC_CHECK, "encap security check failed") \ + _(DECAP_SEC_CHECK, "decap security check failed") \ + _(ICMP, "unable to translate ICMP") \ + _(ICMP_RELAY, "unable to relay ICMP") \ + _(UNKNOWN, "unknown") \ + _(NO_BINDING, "no binding") \ + _(NO_DOMAIN, "no domain") \ + _(FRAGMENTED, "packet is a fragment") \ + _(FRAGMENT_MEMORY, "could not cache fragment") \ + _(FRAGMENT_MALFORMED, "fragment has unexpected format")\ + _(FRAGMENT_DROPPED, "dropped cached fragment") \ + _(MALFORMED, "malformed packet") \ + _(DF_SET, "can't fragment, DF set") + +typedef enum { +#define _(sym,str) MAP_ERROR_##sym, + foreach_map_error +#undef _ + MAP_N_ERROR, + } map_error_t; + +u64 map_error_counter_get(u32 node_index, map_error_t map_error); + +typedef struct { + u32 map_domain_index; + u16 port; +} map_trace_t; + +map_main_t map_main; + +extern vlib_node_registration_t ip4_map_node; +extern vlib_node_registration_t ip6_map_node; + +extern vlib_node_registration_t ip4_map_t_node; +extern vlib_node_registration_t ip4_map_t_fragmented_node; +extern vlib_node_registration_t ip4_map_t_tcp_udp_node; +extern vlib_node_registration_t ip4_map_t_icmp_node; + +extern vlib_node_registration_t ip6_map_t_node; +extern vlib_node_registration_t ip6_map_t_fragmented_node; +extern vlib_node_registration_t ip6_map_t_tcp_udp_node; +extern vlib_node_registration_t ip6_map_t_icmp_node; + +/* + * map_get_pfx + */ +static_always_inline u64 +map_get_pfx (map_domain_t *d, u32 addr, u16 port) +{ + u16 psid = (port >> d->psid_shift) & d->psid_mask; + + if (d->ea_bits_len == 0 && d->rules) + return clib_net_to_host_u64(d->rules[psid].as_u64[0]); + + u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask; + u64 ea = d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid; + + return clib_net_to_host_u64(d->ip6_prefix.as_u64[0]) | ea << d->ea_shift; +} + +static_always_inline u64 +map_get_pfx_net (map_domain_t *d, u32 addr, u16 port) +{ + return clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr), + clib_net_to_host_u16(port))); +} + +/* + * map_get_sfx + */ +static_always_inline u64 +map_get_sfx (map_domain_t *d, u32 addr, u16 port) +{ + u16 psid = (port >> d->psid_shift) & d->psid_mask; + + /* Shared 1:1 mode. */ + if (d->ea_bits_len == 0 && d->rules) + return clib_net_to_host_u64(d->rules[psid].as_u64[1]); + if (d->ip6_prefix_len == 128) + return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]); + + /* IPv4 prefix */ + if (d->flags & MAP_DOMAIN_PREFIX) + return (u64) (addr & (0xFFFFFFFF << d->suffix_shift)) << 16; + + /* Shared or full IPv4 address */ + return ((u64) addr << 16) | psid; +} + +static_always_inline u64 +map_get_sfx_net (map_domain_t *d, u32 addr, u16 port) +{ + return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr), + clib_net_to_host_u16(port))); +} + +static_always_inline u32 +map_get_ip4 (ip6_address_t *addr) +{ + return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16); +} + +/* + * Get the MAP domain from an IPv4 lookup adjacency. + */ +static_always_inline map_domain_t * +ip4_map_get_domain (u32 mdi, + u32 *map_domain_index) +{ + map_main_t *mm = &map_main; + map_dpo_t *md; + + md = map_dpo_get(mdi); + + ASSERT(md); + *map_domain_index = md->md_domain; + return pool_elt_at_index(mm->domains, *map_domain_index); +} + +/* + * Get the MAP domain from an IPv6 lookup adjacency. + * If the IPv6 address or prefix is not shared, no lookup is required. + * The IPv4 address is used otherwise. + */ +static_always_inline map_domain_t * +ip6_map_get_domain (u32 mdi, ip4_address_t *addr, + u32 *map_domain_index, u8 *error) +{ + map_main_t *mm = &map_main; + map_dpo_t *md; + + /* + * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA. + * (That's done implicitly when MAP domain is looked up in the IPv4 FIB) + */ +#ifdef MAP_NONSHARED_DOMAIN_ENABLED + md = map_dpo_get(mdi); + + ASSERT(md); + *map_domain_index = md->md_domain; + if (*map_domain_index != ~0) + return pool_elt_at_index(mm->domains, *map_domain_index); +#endif + + u32 lbi = ip4_fib_forwarding_lookup(0, addr); + const dpo_id_t *dpo = load_balance_get_bucket(lbi, 0); + if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type || + dpo->dpoi_type == map_t_dpo_type)) + { + md = map_dpo_get(dpo->dpoi_index); + *map_domain_index = md->md_domain; + return pool_elt_at_index(mm->domains, *map_domain_index); + } + *error = MAP_ERROR_NO_DOMAIN; + return NULL; +} + +map_ip4_reass_t * +map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, + u8 protocol, u32 **pi_to_drop); +void +map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop); + +#define map_ip4_reass_lock() while (__sync_lock_test_and_set(map_main.ip4_reass_lock, 1)) {} +#define map_ip4_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip4_reass_lock = 0;} while(0) + +static_always_inline void +map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi) +{ + int i; + for (i=0; ifragments[i] != ~0) { + vec_add1(*pi, r->fragments[i]); + r->fragments[i] = ~0; + map_main.ip4_reass_buffered_counter--; + } +} + +int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi); + +map_ip6_reass_t * +map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, + u8 protocol, u32 **pi_to_drop); +void +map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop); + +#define map_ip6_reass_lock() while (__sync_lock_test_and_set(map_main.ip6_reass_lock, 1)) {} +#define map_ip6_reass_unlock() do {CLIB_MEMORY_BARRIER(); *map_main.ip6_reass_lock = 0;} while(0) + +int +map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, + u16 data_offset, u16 next_data_offset, + u8 *data_start, u16 data_len); + +void map_ip4_drop_pi(u32 pi); + +int map_ip4_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); +#define MAP_IP4_REASS_CONF_HT_RATIO_MAX 100 +int map_ip4_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); +#define MAP_IP4_REASS_CONF_POOL_SIZE_MAX (0xfeff) +int map_ip4_reass_conf_lifetime(u16 lifetime_ms); +#define MAP_IP4_REASS_CONF_LIFETIME_MAX 0xffff +int map_ip4_reass_conf_buffers(u32 buffers); +#define MAP_IP4_REASS_CONF_BUFFERS_MAX (0xffffffff) + +void map_ip6_drop_pi(u32 pi); + + +int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); +#define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100 +int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); +#define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff) +int map_ip6_reass_conf_lifetime(u16 lifetime_ms); +#define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff +int map_ip6_reass_conf_buffers(u32 buffers); +#define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff) + +static_always_inline +int ip6_parse(const ip6_header_t *ip6, u32 buff_len, + u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset) +{ + if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) { + *l4_protocol = ((ip6_frag_hdr_t *)(ip6 + 1))->next_hdr; + *frag_hdr_offset = sizeof(*ip6); + *l4_offset = sizeof(*ip6) + sizeof(ip6_frag_hdr_t); + } else { + *l4_protocol = ip6->protocol; + *frag_hdr_offset = 0; + *l4_offset = sizeof(*ip6); + } + + return (buff_len < (*l4_offset + 4)) || + (clib_net_to_host_u16(ip6->payload_length) < (*l4_offset + 4 - sizeof(*ip6))); +} + + +#define u8_ptr_add(ptr, index) (((u8 *)ptr) + index) +#define u16_net_add(u, val) clib_host_to_net_u16(clib_net_to_host_u16(u) + (val)) + +#define frag_id_6to4(id) ((id) ^ ((id) >> 16)) + +static_always_inline void +ip4_map_t_embedded_address (map_domain_t *d, + ip6_address_t *ip6, const ip4_address_t *ip4) +{ + ASSERT(d->ip6_src_len == 96); //No support for other lengths for now + ip6->as_u64[0] = d->ip6_src.as_u64[0]; + ip6->as_u32[2] = d->ip6_src.as_u32[2]; + ip6->as_u32[3] = ip4->as_u32; +} + +static_always_inline u32 +ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr) +{ + ASSERT(d->ip6_src_len == 96); //No support for other lengths for now + return addr->as_u32[3]; +} + +static inline void +map_domain_counter_lock (map_main_t *mm) +{ + if (mm->counter_lock) + while (__sync_lock_test_and_set(mm->counter_lock, 1)) + /* zzzz */ ; +} +static inline void +map_domain_counter_unlock (map_main_t *mm) +{ + if (mm->counter_lock) + *mm->counter_lock = 0; +} + + +static_always_inline void +map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, + vlib_node_runtime_t *node, vlib_error_t *error, + u32 next) +{ + u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + //Deal with fragments that are ready + from = pi_vector; + n_left_from = vec_len(pi_vector); + next_index = node->cached_next_index; + while (n_left_from > 0) { + vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); + while (n_left_from > 0 && n_left_to_next > 0) { + u32 pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0); + p0->error = *error; + vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next); + } + vlib_put_next_frame(vm, node, next_index, n_left_to_next); + } +} + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/map_api.c b/src/vnet/map/map_api.c new file mode 100644 index 00000000..7febeb3d --- /dev/null +++ b/src/vnet/map/map_api.c @@ -0,0 +1,295 @@ +/* + *------------------------------------------------------------------ + * map_api.c - vnet map api + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + *------------------------------------------------------------------ + */ + +#include +#include + +#include "map.h" +#include +#include +#include +#include + +#define vl_typedefs /* define message structures */ +#include +#undef vl_typedefs + +#define vl_endianfun /* define message structures */ +#include +#undef vl_endianfun + +/* instantiate all the print functions we know about */ +#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__) +#define vl_printfun +#include +#undef vl_printfun + +#include + +#define foreach_vpe_api_msg \ +_(MAP_ADD_DOMAIN, map_add_domain) \ +_(MAP_DEL_DOMAIN, map_del_domain) \ +_(MAP_ADD_DEL_RULE, map_add_del_rule) \ +_(MAP_DOMAIN_DUMP, map_domain_dump) \ +_(MAP_RULE_DUMP, map_rule_dump) \ +_(MAP_SUMMARY_STATS, map_summary_stats) + +static void +vl_api_map_add_domain_t_handler (vl_api_map_add_domain_t * mp) +{ + vl_api_map_add_domain_reply_t *rmp; + int rv = 0; + u32 index; + u8 flags = mp->is_translation ? MAP_DOMAIN_TRANSLATION : 0; + rv = + map_create_domain ((ip4_address_t *) & mp->ip4_prefix, mp->ip4_prefix_len, + (ip6_address_t *) & mp->ip6_prefix, mp->ip6_prefix_len, + (ip6_address_t *) & mp->ip6_src, + mp->ip6_src_prefix_len, mp->ea_bits_len, + mp->psid_offset, mp->psid_length, &index, + ntohs (mp->mtu), flags); + + /* *INDENT-OFF* */ + REPLY_MACRO2(VL_API_MAP_ADD_DOMAIN_REPLY, + ({ + rmp->index = ntohl(index); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_map_del_domain_t_handler (vl_api_map_del_domain_t * mp) +{ + vl_api_map_del_domain_reply_t *rmp; + int rv = 0; + + rv = map_delete_domain (ntohl (mp->index)); + + REPLY_MACRO (VL_API_MAP_DEL_DOMAIN_REPLY); +} + +static void +vl_api_map_add_del_rule_t_handler (vl_api_map_add_del_rule_t * mp) +{ + vl_api_map_del_domain_reply_t *rmp; + int rv = 0; + + rv = + map_add_del_psid (ntohl (mp->index), ntohs (mp->psid), + (ip6_address_t *) mp->ip6_dst, mp->is_add); + + REPLY_MACRO (VL_API_MAP_ADD_DEL_RULE_REPLY); +} + +static void +vl_api_map_domain_dump_t_handler (vl_api_map_domain_dump_t * mp) +{ + vl_api_map_domain_details_t *rmp; + map_main_t *mm = &map_main; + map_domain_t *d; + unix_shared_memory_queue_t *q; + + if (pool_elts (mm->domains) == 0) + return; + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + /* *INDENT-OFF* */ + pool_foreach(d, mm->domains, + ({ + /* Make sure every field is initiated (or don't skip the memset()) */ + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs(VL_API_MAP_DOMAIN_DETAILS); + rmp->domain_index = htonl(d - mm->domains); + rmp->ea_bits_len = d->ea_bits_len; + rmp->psid_offset = d->psid_offset; + rmp->psid_length = d->psid_length; + clib_memcpy(rmp->ip4_prefix, &d->ip4_prefix, sizeof(rmp->ip4_prefix)); + rmp->ip4_prefix_len = d->ip4_prefix_len; + clib_memcpy(rmp->ip6_prefix, &d->ip6_prefix, sizeof(rmp->ip6_prefix)); + rmp->ip6_prefix_len = d->ip6_prefix_len; + clib_memcpy(rmp->ip6_src, &d->ip6_src, sizeof(rmp->ip6_src)); + rmp->ip6_src_len = d->ip6_src_len; + rmp->mtu = htons(d->mtu); + rmp->is_translation = (d->flags & MAP_DOMAIN_TRANSLATION); + rmp->context = mp->context; + + vl_msg_api_send_shmem (q, (u8 *)&rmp); + })); + /* *INDENT-ON* */ +} + +static void +vl_api_map_rule_dump_t_handler (vl_api_map_rule_dump_t * mp) +{ + unix_shared_memory_queue_t *q; + u16 i; + ip6_address_t dst; + vl_api_map_rule_details_t *rmp; + map_main_t *mm = &map_main; + u32 domain_index = ntohl (mp->domain_index); + map_domain_t *d; + + if (pool_elts (mm->domains) == 0) + return; + + d = pool_elt_at_index (mm->domains, domain_index); + if (!d || !d->rules) + { + return; + } + + q = vl_api_client_index_to_input_queue (mp->client_index); + if (q == 0) + { + return; + } + + for (i = 0; i < (0x1 << d->psid_length); i++) + { + dst = d->rules[i]; + if (dst.as_u64[0] == 0 && dst.as_u64[1] == 0) + { + continue; + } + rmp = vl_msg_api_alloc (sizeof (*rmp)); + memset (rmp, 0, sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_MAP_RULE_DETAILS); + rmp->psid = htons (i); + clib_memcpy (rmp->ip6_dst, &dst, sizeof (rmp->ip6_dst)); + rmp->context = mp->context; + vl_msg_api_send_shmem (q, (u8 *) & rmp); + } +} + +static void +vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp) +{ + vl_api_map_summary_stats_reply_t *rmp; + vlib_combined_counter_main_t *cm; + vlib_counter_t v; + int i, which; + u64 total_pkts[VLIB_N_RX_TX]; + u64 total_bytes[VLIB_N_RX_TX]; + map_main_t *mm = &map_main; + unix_shared_memory_queue_t *q = + vl_api_client_index_to_input_queue (mp->client_index); + + if (!q) + return; + + rmp = vl_msg_api_alloc (sizeof (*rmp)); + rmp->_vl_msg_id = ntohs (VL_API_MAP_SUMMARY_STATS_REPLY); + rmp->context = mp->context; + rmp->retval = 0; + + memset (total_pkts, 0, sizeof (total_pkts)); + memset (total_bytes, 0, sizeof (total_bytes)); + + map_domain_counter_lock (mm); + vec_foreach (cm, mm->domain_counters) + { + which = cm - mm->domain_counters; + + for (i = 0; i < vec_len (cm->maxi); i++) + { + vlib_get_combined_counter (cm, i, &v); + total_pkts[which] += v.packets; + total_bytes[which] += v.bytes; + } + } + + map_domain_counter_unlock (mm); + + /* Note: in network byte order! */ + rmp->total_pkts[MAP_DOMAIN_COUNTER_RX] = + clib_host_to_net_u64 (total_pkts[MAP_DOMAIN_COUNTER_RX]); + rmp->total_bytes[MAP_DOMAIN_COUNTER_RX] = + clib_host_to_net_u64 (total_bytes[MAP_DOMAIN_COUNTER_RX]); + rmp->total_pkts[MAP_DOMAIN_COUNTER_TX] = + clib_host_to_net_u64 (total_pkts[MAP_DOMAIN_COUNTER_TX]); + rmp->total_bytes[MAP_DOMAIN_COUNTER_TX] = + clib_host_to_net_u64 (total_bytes[MAP_DOMAIN_COUNTER_TX]); + rmp->total_bindings = clib_host_to_net_u64 (pool_elts (mm->domains)); + rmp->total_ip4_fragments = 0; // Not yet implemented. Should be a simple counter. + rmp->total_security_check[MAP_DOMAIN_COUNTER_TX] = + clib_host_to_net_u64 (map_error_counter_get + (ip4_map_node.index, MAP_ERROR_ENCAP_SEC_CHECK)); + rmp->total_security_check[MAP_DOMAIN_COUNTER_RX] = + clib_host_to_net_u64 (map_error_counter_get + (ip4_map_node.index, MAP_ERROR_DECAP_SEC_CHECK)); + + vl_msg_api_send_shmem (q, (u8 *) & rmp); +} + +/* + * vpe_api_hookup + * Add vpe's API message handlers to the table. + * vlib has alread mapped shared memory and + * added the client registration handlers. + * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process() + */ +#define vl_msg_name_crc_list +#include +#undef vl_msg_name_crc_list + +static void +setup_message_id_table (api_main_t * am) +{ +#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id); + foreach_vl_msg_name_crc_map; +#undef _ +} + +static clib_error_t * +map_api_hookup (vlib_main_t * vm) +{ + api_main_t *am = &api_main; + +#define _(N,n) \ + vl_msg_api_set_handlers(VL_API_##N, #n, \ + vl_api_##n##_t_handler, \ + vl_noop_handler, \ + vl_api_##n##_t_endian, \ + vl_api_##n##_t_print, \ + sizeof(vl_api_##n##_t), 1); + foreach_vpe_api_msg; +#undef _ + + /* + * Set up the (msg_name, crc, message-id) table + */ + setup_message_id_table (am); + + return 0; +} + +VLIB_API_INIT_FUNCTION (map_api_hookup); + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/map_doc.md b/src/vnet/map/map_doc.md new file mode 100644 index 00000000..17f3c511 --- /dev/null +++ b/src/vnet/map/map_doc.md @@ -0,0 +1,69 @@ +# VPP MAP and Lw4o6 implementation {#map_doc} + +This is a memo intended to contain documentation of the VPP MAP and Lw4o6 implementations. +Everything that is not directly obvious should come here. + + + +## MAP-E Virtual Reassembly + +The MAP-E implementation supports handling of IPv4 fragments as well as IPv4-in-IPv6 inner and outer fragments. This is called virtual reassembly because the fragments are not actually reassembled. Instead, some meta-data are kept about the first fragment and reused for subsequent fragments. + +Fragment caching and handling is not always necessary. It is performed when: +* An IPv4 fragment is received and the destination IPv4 address is shared. +* An IPv6 packet is received with an inner IPv4 fragment, the IPv4 source address is shared, and 'security-check fragments' is on. +* An IPv6 fragment is received. + +There are 3 dedicated nodes: +* ip4-map-reass +* ip6-map-ip4-reass +* ip6-map-ip6-reass + +ip4-map sends all fragments to ip4-map-reass. +ip6-map sends all inner-fragments to ip6-map-ip4-reass. +ip6-map sends all outer-fragments to ip6-map-ip6-reass. + +IPv4 (resp. IPv6) virtual reassembly makes use of a hash table in order to store IPv4 (resp. IPv6) reassembly structures. The hash-key is based on the IPv4-src:IPv4-dst:Frag-ID:Protocol tuple (resp. IPv6-src:IPv6-dst:Frag-ID tuple, as the protocol is IPv4-in-IPv6). Therefore, each packet reassembly makes use of exactly one reassembly structure. When such a structure is allocated, it is timestamped with the current time. Finally, those structures are capable of storing a limited number of buffer indexes. + +An IPv4 (resp. IPv6) reassembly structure can cache up to MAP_IP4_REASS_MAX_FRAGMENTS_PER_REASSEMBLY (resp. MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY) buffers. Buffers are cached until the first fragment is received. + +#### Virtual Reassembly configuration + +IPv4 and IPv6 virtual reassembly support the following configuration: + map params reassembly [ip4 | ip6] [lifetime ] [pool-size ] [buffers ] [ht-ratio ] + +lifetime: + The time in milliseconds a reassembly structure is considered valid. The longer, the more reliable is reassembly, but the more likely it is to exhaust the pool of reassembly structures. IPv4 standard suggests a lifetime of 15 seconds. IPv6 specifies a lifetime of 60 people. Those values are not realistic for high-throughput cases. + +buffers: + The upper limit of buffers that are allowed to be cached. It can be used to protect against fragmentation attacks which would aim to exhaust the global buffers pool. + +pool-size: + The number of reassembly structures that can be allocated. As each structure can store a small fixed number of fragments, it also sets an upper-bound of 'pool-size * MAP_IPX_REASS_MAX_FRAGMENTS_PER_REASSEMBLY' buffers that can be cached in total. + +ht-ratio: + The amount of buckets in the hash-table is pool-size * ht-ratio. + + +Any time pool-size and ht-ratio is modified, the hash-table is destroyed and created again, which means all current state is lost. + + +##### Additional considerations + +Reassembly at high rate is expensive in terms of buffers. There is a trade-off between the lifetime and number of allocated buffers. Reducing the lifetime helps, but at the cost of loosing state for fragments that are wide appart. + +Let: +R be the packet rate at which fragments are received. +F be the number of fragments per packet. + +Assuming the first fragment is always received last. We should have: +buffers > lifetime * R / F * (F - 1) +pool-size > lifetime * R/F + +This is a worst case. Receiving the first fragment earlier helps reducing the number of required buffers. Also, an optimization is implemented (MAP_IP6_REASS_COUNT_BYTES and MAP_IP4_REASS_COUNT_BYTES) which counts the number of transmitted bytes and remembers the total number of bytes which should be transmitted based on the last fragment, and therefore helps reducing 'pool-size'. + +But the formula shows that it is challenging to forward a significant amount of fragmented packets at high rates. For instance, with a lifetime of 1 second, 5Mpps packet rate would require buffering up to 2.5 millions fragments. + +If you want to do that, be prepared to configure a lot of fragments. + + diff --git a/src/vnet/map/map_dpo.c b/src/vnet/map/map_dpo.c new file mode 100644 index 00000000..df2b5fa4 --- /dev/null +++ b/src/vnet/map/map_dpo.c @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +/** + * pool of all MPLS Label DPOs + */ +map_dpo_t *map_dpo_pool; + +/** + * The register MAP DPO type + */ +dpo_type_t map_dpo_type; +dpo_type_t map_t_dpo_type; + +static map_dpo_t * +map_dpo_alloc (void) +{ + map_dpo_t *md; + + pool_get_aligned(map_dpo_pool, md, CLIB_CACHE_LINE_BYTES); + memset(md, 0, sizeof(*md)); + + return (md); +} + +static index_t +map_dpo_get_index (map_dpo_t *md) +{ + return (md - map_dpo_pool); +} + +void +map_dpo_create (dpo_proto_t dproto, + u32 domain_index, + dpo_id_t *dpo) +{ + map_dpo_t *md; + + md = map_dpo_alloc(); + md->md_domain = domain_index; + md->md_proto = dproto; + + dpo_set(dpo, + map_dpo_type, + dproto, + map_dpo_get_index(md)); +} + +void +map_t_dpo_create (dpo_proto_t dproto, + u32 domain_index, + dpo_id_t *dpo) +{ + map_dpo_t *md; + + md = map_dpo_alloc(); + md->md_domain = domain_index; + md->md_proto = dproto; + + dpo_set(dpo, + map_t_dpo_type, + dproto, + map_dpo_get_index(md)); +} + + +u8* +format_map_dpo (u8 *s, va_list *args) +{ + index_t index = va_arg (*args, index_t); + CLIB_UNUSED(u32 indent) = va_arg (*args, u32); + map_dpo_t *md; + + md = map_dpo_get(index); + + return (format(s, "map:[%d]:%U domain:%d", + index, + format_dpo_proto, md->md_proto, + md->md_domain)); +} + +u8* +format_map_t_dpo (u8 *s, va_list *args) +{ + index_t index = va_arg (*args, index_t); + CLIB_UNUSED(u32 indent) = va_arg (*args, u32); + map_dpo_t *md; + + md = map_dpo_get(index); + + return (format(s, "map-t:[%d]:%U domain:%d", + index, + format_dpo_proto, md->md_proto, + md->md_domain)); +} + + +static void +map_dpo_lock (dpo_id_t *dpo) +{ + map_dpo_t *md; + + md = map_dpo_get(dpo->dpoi_index); + + md->md_locks++; +} + +static void +map_dpo_unlock (dpo_id_t *dpo) +{ + map_dpo_t *md; + + md = map_dpo_get(dpo->dpoi_index); + + md->md_locks--; + + if (0 == md->md_locks) + { + pool_put(map_dpo_pool, md); + } +} + +const static dpo_vft_t md_vft = { + .dv_lock = map_dpo_lock, + .dv_unlock = map_dpo_unlock, + .dv_format = format_map_dpo, +}; + +const static char* const map_ip4_nodes[] = +{ + "ip4-map", + NULL, +}; +const static char* const map_ip6_nodes[] = +{ + "ip6-map", + NULL, +}; + +const static char* const * const map_nodes[DPO_PROTO_NUM] = +{ + [DPO_PROTO_IP4] = map_ip4_nodes, + [DPO_PROTO_IP6] = map_ip6_nodes, + [DPO_PROTO_MPLS] = NULL, +}; + +const static dpo_vft_t md_t_vft = { + .dv_lock = map_dpo_lock, + .dv_unlock = map_dpo_unlock, + .dv_format = format_map_t_dpo, +}; + +const static char* const map_t_ip4_nodes[] = +{ + "ip4-map-t", + NULL, +}; +const static char* const map_t_ip6_nodes[] = +{ + "ip6-map-t", + NULL, +}; + +const static char* const * const map_t_nodes[DPO_PROTO_NUM] = +{ + [DPO_PROTO_IP4] = map_t_ip4_nodes, + [DPO_PROTO_IP6] = map_t_ip6_nodes, + [DPO_PROTO_MPLS] = NULL, +}; + +void +map_dpo_module_init (void) +{ + map_dpo_type = dpo_register_new_type(&md_vft, map_nodes); + map_t_dpo_type = dpo_register_new_type(&md_t_vft, map_t_nodes); +} diff --git a/src/vnet/map/map_dpo.h b/src/vnet/map/map_dpo.h new file mode 100644 index 00000000..be510dba --- /dev/null +++ b/src/vnet/map/map_dpo.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __MAP_DPO_H__ +#define __MAP_DPO_H__ + +#include +#include + +/** + * A representation of a MAP DPO + */ +typedef struct map_dpo_t +{ + /** + * The dat-plane protocol + */ + dpo_proto_t md_proto; + + /** + * the MAP domain index + */ + u32 md_domain; + + /** + * Number of locks/users of the label + */ + u16 md_locks; +} map_dpo_t; + +extern void map_dpo_create (dpo_proto_t dproto, + u32 domain_index, + dpo_id_t *dpo); +extern void map_t_dpo_create (dpo_proto_t dproto, + u32 domain_index, + dpo_id_t *dpo); + +extern u8* format_map_dpo(u8 *s, va_list *args); + +/* + * Encapsulation violation for fast data-path access + */ +extern map_dpo_t *map_dpo_pool; +extern dpo_type_t map_dpo_type; +extern dpo_type_t map_t_dpo_type; + +static inline map_dpo_t * +map_dpo_get (index_t index) +{ + return (pool_elt_at_index(map_dpo_pool, index)); +} + +extern void map_dpo_module_init(void); + +#endif diff --git a/src/vnet/map/test.c b/src/vnet/map/test.c new file mode 100644 index 00000000..f3c893a7 --- /dev/null +++ b/src/vnet/map/test.c @@ -0,0 +1,205 @@ +/* + * test.c : MAP unit tests + * + * Copyright (c) 2016 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "map.h" + +static map_domain_t * +get_domain(ip4_address_t * ip4_prefix, u8 ip4_prefix_len, + ip6_address_t * ip6_prefix, u8 ip6_prefix_len, + ip6_address_t * ip6_src, u8 ip6_src_len, + u8 ea_bits_len, u8 psid_offset, + u8 psid_length, u16 mtu, u8 flags) +{ + map_domain_t * d = malloc(sizeof(*d)); + u8 suffix_len; + + /* EA bits must be within the first 64 bits */ + if (ea_bits_len > 0 && (ip6_prefix_len + ea_bits_len) > 64) + return NULL; + + /* Init domain struct */ + d->ip4_prefix.as_u32 = ip4_prefix->as_u32; + d->ip4_prefix_len = ip4_prefix_len; + d->ip6_prefix = *ip6_prefix; + d->ip6_prefix_len = ip6_prefix_len; + d->ip6_src = *ip6_src; + d->ip6_src_len = ip6_src_len; + d->ea_bits_len = ea_bits_len; + d->psid_offset = psid_offset; + d->psid_length = psid_length; + d->mtu = mtu; + d->flags = flags; + + /* How many, and which bits to grab from the IPv4 DA */ + if (ip4_prefix_len + ea_bits_len < 32) + { + d->flags |= MAP_DOMAIN_PREFIX; + d->suffix_shift = 32 - ip4_prefix_len - ea_bits_len; + suffix_len = ea_bits_len; + } + else + { + d->suffix_shift = 0; + suffix_len = 32 - ip4_prefix_len; + } + d->suffix_mask = (1 << suffix_len) - 1; + + d->psid_shift = 16 - psid_length - psid_offset; + d->psid_mask = (1 << d->psid_length) - 1; + + if (ip6_prefix_len + suffix_len + d->psid_length > 64) + return NULL; + + d->ea_shift = 64 - ip6_prefix_len - suffix_len - d->psid_length; + + return d; +} + + +/* + * VPP-340: + * map_add_domain ip4-pfx 20.0.0.0/8 ip6-pfx 2001:db8::/40 ip6-src 2001:db8:ffff::/96 ea-bits-len 24 psid-offset 0 psid-len 0 map-t + * IPv4 src = 100.0.0.1 + * IPv4 dst = 20.169.201.219 + * UDP dest port = 1232 + * IPv6 src = 2001:db8:ffff::6400:1 + * IPv6 dst = a9c9:dfb8::14a9:c9db:0 + * a9c9:dfb8::14a9:c9db:0 != 2001:db8:a9:c9db:0:14a9:c9db:0 + */ +static void +test_map_t_destaddr (void) +{ + ip4_address_t ip4_prefix; + ip6_address_t ip6_prefix; + ip6_address_t ip6_src; + + ip4_prefix.as_u32 = clib_host_to_net_u32(0x14000000); + ip6_prefix.as_u64[0] = clib_host_to_net_u64(0x20010db800000000); + ip6_prefix.as_u64[1] = 0; + ip6_src.as_u64[0] = clib_host_to_net_u64(0x20010db8ffff0000); + map_domain_t * d = get_domain (&ip4_prefix, 8, &ip6_prefix, 40, &ip6_src, 96, 24, 0, 0, 0, MAP_DOMAIN_TRANSLATION); + + ip6_address_t dst6; + + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db800a9c9db); + assert(dst6.as_u64[1] == 0x000014a9c9db0000); +} + +/* + * VPP-228 + * ip4-pfx 20.0.0.0/8 + * ip6-pfx 2001:db8::/ + * ip6-src 2001:db8:ffff::1 + * ea-bits-len 16 psid-offset 6 psid-len 8 + * 20.169.201.219 port 1232 + */ +static void +test_map_eabits (void) +{ + ip4_address_t ip4_prefix; + ip6_address_t ip6_prefix; + ip6_address_t ip6_src; + ip6_address_t dst6; + + ip4_prefix.as_u32 = clib_host_to_net_u32(0x14000000); + ip6_prefix.as_u64[0] = clib_host_to_net_u64(0x20010db800000000); + ip6_prefix.as_u64[1] = 0; + ip6_src.as_u64[0] = clib_host_to_net_u64(0x20010db8ffff0000); + ip6_src.as_u64[1] = clib_host_to_net_u64(0x0000000000000001); + map_domain_t * d = get_domain (&ip4_prefix, 16, &ip6_prefix, 48, &ip6_src, + 128, 16, 6, 8, 0, 0); + assert(!d); + + //20.0.0.0/8 2001:db8::/32 4 2001:db8:a000::14a0:0:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 4, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a0000000); + assert(dst6.as_u64[1] == 0x000014a000000000); + + //20.0.0.0/8 2001:db8::/32 8 2001:db8:a900::14a9:0:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 8, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a9000000); + assert(dst6.as_u64[1] == 0x000014a900000000); + + //20.0.0.0/8 2001:db8::/32 10 2001:db8:a9c0::14a9:c000:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 10, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a9c00000); + assert(dst6.as_u64[1] == 0x000014a9c0000000); + + //20.0.0.0/8 2001:db8::/32 16 2001:db8:a9c9::14a9:c900:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 16, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a9c90000); + assert(dst6.as_u64[1] == 0x000014a9c9000000); + + //20.0.0.0/8 2001:db8::/32 20 2001:db8:a9c9:d000:0:14a9:c9d0:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 20, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a9c9d000); + assert(dst6.as_u64[1] == 0x000014a9c9d00000); + + //20.0.0.0/8 2001:db8::/32 23 2001:db8:a9c9:da00:0:14a9:c9da:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 23, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a9c9da00); + assert(dst6.as_u64[1] == 0x000014a9c9da0000); + + //20.169.201.0/24 2001:db8::/32 7 2001:db8:da00::14a9:c9da:0 + d = get_domain (&ip4_prefix, 8, &ip6_prefix, 32, &ip6_src, + 128, 7, 0, 0, 0, 0); + dst6.as_u64[0] = map_get_pfx(d, 0x14a9c9db, 1232); + dst6.as_u64[1] = map_get_sfx(d, 0x14a9c9db, 1232); + assert(dst6.as_u64[0] == 0x20010db8a8000000); + assert(dst6.as_u64[1] == 0x000014a800000000); +} + +#define foreach_test_case \ + _(map_t_destaddr) \ + _(map_eabits) + +static void +run_tests (void) +{ +#define _(_test_name) \ + test_ ## _test_name (); + + foreach_test_case +#undef _ +} + +int main() +{ + run_tests (); + return 0; +} -- cgit 1.2.3-korg From a9a20e7f69f4a91a4d5267ab5ce14125bdc7d6c6 Mon Sep 17 00:00:00 2001 From: Billy McFall Date: Wed, 15 Feb 2017 11:39:12 -0500 Subject: VPP-635: CLI Memory leak with invalid parameter In the CLI parsing, below is a common pattern: /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "x")) x = 1; : else return clib_error_return (0, "unknown input `%U'", format_unformat_error, line_input); } unformat_free (line_input); The 'else' returns if an unknown string is encountered. There a memory leak because the 'unformat_free(line_input)' is not called. There is a large number of instances of this pattern. Replaced the previous pattern with: /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "x")) x = 1; : else { error = clib_error_return (0, "unknown input `%U'", format_unformat_error, line_input); goto done: } } /* ...Remaining code... */ done: unformat_free (line_input); return error; } In multiple files, 'unformat_free (line_input);' was never called, so there was a memory leak whether an invalid string was entered or not. Also, there were multiple instance where: error = clib_error_return (0, "unknown input `%U'", format_unformat_error, line_input); used 'input' as the last parameter instead of 'line_input'. The result is that output did not contain the substring in error, instead just an empty string. Fixed all of those as well. There are a lot of file, and very mind numbing work, so tried to keep it to a pattern to avoid mistakes. Change-Id: I8902f0c32a47dd7fb3bb3471a89818571702f1d2 Signed-off-by: Billy McFall Signed-off-by: Dave Barach --- build-root/emacs-lisp/tunnel-c-skel.el | 19 ++- src/plugins/ila/ila.c | 25 ++- src/plugins/lb/cli.c | 99 ++++++----- src/plugins/sixrd/sixrd.c | 42 +++-- src/plugins/snat/snat.c | 139 +++++++++++----- src/vlib/threads_cli.c | 79 ++++++--- src/vlib/trace.c | 13 +- src/vlib/unix/cli.c | 22 ++- src/vnet/devices/af_packet/cli.c | 56 +++++-- src/vnet/devices/dpdk/cli.c | 290 +++++++++++++++++++++++---------- src/vnet/devices/dpdk/ipsec/cli.c | 15 +- src/vnet/devices/netmap/cli.c | 54 ++++-- src/vnet/devices/virtio/vhost-user.c | 62 +++++-- src/vnet/gre/interface.c | 35 ++-- src/vnet/ip/ip4_source_check.c | 6 +- src/vnet/ip/ip4_test.c | 15 +- src/vnet/ip/ip6_neighbor.c | 27 ++- src/vnet/ip/lookup.c | 34 ++-- src/vnet/ipsec-gre/interface.c | 34 ++-- src/vnet/ipsec/ipsec_cli.c | 177 +++++++++++++------- src/vnet/l2/l2_patch.c | 26 ++- src/vnet/l2/l2_xcrw.c | 34 +++- src/vnet/l2tp/l2tp.c | 39 +++-- src/vnet/lisp-cp/lisp_cli.c | 139 ++++++++++++---- src/vnet/lisp-gpe/interface.c | 58 +++++-- src/vnet/lisp-gpe/lisp_gpe.c | 13 +- src/vnet/map/map.c | 186 +++++++++++++++------ src/vnet/mpls/mpls.c | 2 + src/vnet/mpls/mpls_tunnel.c | 19 ++- src/vnet/pg/cli.c | 39 +++-- src/vnet/policer/node_funcs.c | 19 ++- src/vnet/policer/policer.c | 13 +- src/vnet/unix/tapcli.c | 57 +++++-- src/vnet/vxlan-gpe/vxlan_gpe.c | 62 +++++-- src/vnet/vxlan/vxlan.c | 81 ++++++--- src/vpp/app/l2t.c | 9 +- src/vpp/app/vpe_cli.c | 24 ++- 37 files changed, 1487 insertions(+), 576 deletions(-) (limited to 'src/vnet/map') diff --git a/build-root/emacs-lisp/tunnel-c-skel.el b/build-root/emacs-lisp/tunnel-c-skel.el index aa260e53..a1b1757d 100644 --- a/build-root/emacs-lisp/tunnel-c-skel.el +++ b/build-root/emacs-lisp/tunnel-c-skel.el @@ -288,6 +288,7 @@ static clib_error_t * vlib_cli_command_t * cmd) { unformat_input_t _line_input, * line_input = &_line_input; + clib_error_t *error = 0; ip4_address_t src, dst; u8 is_add = 1; u8 src_set = 0; @@ -322,13 +323,19 @@ static clib_error_t * { encap_fib_index = fib_index_from_fib_id (tmp); if (encap_fib_index == ~0) - return clib_error_return (0, \"nonexistent encap fib id %d\", tmp); + { + unformat_free (line_input); + return clib_error_return (0, \"nonexistent encap fib id %d\", tmp); + } } else if (unformat (line_input, \"decap-vrf-id %d\", &tmp)) { decap_fib_index = fib_index_from_fib_id (tmp); if (decap_fib_index == ~0) - return clib_error_return (0, \"nonexistent decap fib id %d\", tmp); + { + unformat_free (line_input); + return clib_error_return (0, \"nonexistent decap fib id %d\", tmp); + } } else if (unformat (line_input, \"decap-next %U\", unformat_decap_next, &decap_next_index)) @@ -346,8 +353,12 @@ static clib_error_t * * in the " ENCAP_STACK " header */ else - return clib_error_return (0, \"parse error: '%U'\", - format_unformat_error, line_input); + { + error = clib_error_return (0, \"parse error: '%U'\", + format_unformat_error, line_input); + unformat_free (line_input); + return error; + } } unformat_free (line_input); diff --git a/src/plugins/ila/ila.c b/src/plugins/ila/ila.c index e0f3907f..52c7ea55 100644 --- a/src/plugins/ila/ila.c +++ b/src/plugins/ila/ila.c @@ -949,6 +949,7 @@ ila_entry_command_fn (vlib_main_t * vm, ila_add_del_entry_args_t args = { 0 }; u8 next_hop_set = 0; int ret; + clib_error_t *error = 0; args.type = ILA_TYPE_IID; args.csum_mode = ILA_CSUM_MODE_NO_ACTION; @@ -986,19 +987,29 @@ ila_entry_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) args.is_del = 1; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (!next_hop_set) - return clib_error_return (0, "Specified a next hop"); + { + error = clib_error_return (0, "Specified a next hop"); + goto done; + } if ((ret = ila_add_del_entry (&args))) - return clib_error_return (0, "ila_add_del_entry returned error %d", ret); + { + error = clib_error_return (0, "ila_add_del_entry returned error %d", ret); + goto done; + } - return NULL; +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (ila_entry_command, static) = diff --git a/src/plugins/lb/cli.c b/src/plugins/lb/cli.c index b59c6426..6452a875 100644 --- a/src/plugins/lb/cli.c +++ b/src/plugins/lb/cli.c @@ -28,13 +28,16 @@ lb_vip_command_fn (vlib_main_t * vm, int ret; u32 gre4 = 0; lb_vip_type_t type; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; - if (!unformat(line_input, "%U", unformat_ip46_prefix, &prefix, &plen, IP46_TYPE_ANY, &plen)) - return clib_error_return (0, "invalid vip prefix: '%U'", - format_unformat_error, line_input); + if (!unformat(line_input, "%U", unformat_ip46_prefix, &prefix, &plen, IP46_TYPE_ANY, &plen)) { + error = clib_error_return (0, "invalid vip prefix: '%U'", + format_unformat_error, line_input); + goto done; + } while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { @@ -46,13 +49,13 @@ lb_vip_command_fn (vlib_main_t * vm, gre4 = 1; else if (unformat(line_input, "encap gre6")) gre4 = 0; - else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + else { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (ip46_prefix_is_ip4(&prefix, plen)) { type = (gre4)?LB_VIP_TYPE_IP4_GRE4:LB_VIP_TYPE_IP4_GRE6; @@ -65,17 +68,25 @@ lb_vip_command_fn (vlib_main_t * vm, u32 index; if (!del) { if ((ret = lb_vip_add(&prefix, plen, type, new_len, &index))) { - return clib_error_return (0, "lb_vip_add error %d", ret); + error = clib_error_return (0, "lb_vip_add error %d", ret); + goto done; } else { vlib_cli_output(vm, "lb_vip_add ok %d", index); } } else { - if ((ret = lb_vip_find_index(&prefix, plen, &index))) - return clib_error_return (0, "lb_vip_find_index error %d", ret); - else if ((ret = lb_vip_del(index))) - return clib_error_return (0, "lb_vip_del error %d", ret); + if ((ret = lb_vip_find_index(&prefix, plen, &index))) { + error = clib_error_return (0, "lb_vip_find_index error %d", ret); + goto done; + } else if ((ret = lb_vip_del(index))) { + error = clib_error_return (0, "lb_vip_del error %d", ret); + goto done; + } } - return NULL; + +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (lb_vip_command, static) = @@ -96,16 +107,21 @@ lb_as_command_fn (vlib_main_t * vm, u32 vip_index; u8 del = 0; int ret; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; - if (!unformat(line_input, "%U", unformat_ip46_prefix, &vip_prefix, &vip_plen, IP46_TYPE_ANY)) - return clib_error_return (0, "invalid as address: '%U'", - format_unformat_error, line_input); + if (!unformat(line_input, "%U", unformat_ip46_prefix, &vip_prefix, &vip_plen, IP46_TYPE_ANY)) { + error = clib_error_return (0, "invalid as address: '%U'", + format_unformat_error, line_input); + goto done; + } - if ((ret = lb_vip_find_index(&vip_prefix, vip_plen, &vip_index))) - return clib_error_return (0, "lb_vip_find_index error %d", ret); + if ((ret = lb_vip_find_index(&vip_prefix, vip_plen, &vip_index))) { + error = clib_error_return (0, "lb_vip_find_index error %d", ret); + goto done; + } while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { @@ -114,15 +130,15 @@ lb_as_command_fn (vlib_main_t * vm, } else if (unformat(line_input, "del")) { del = 1; } else { - vec_free(as_array); - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; } } if (!vec_len(as_array)) { - vec_free(as_array); - return clib_error_return (0, "No AS address provided"); + error = clib_error_return (0, "No AS address provided"); + goto done; } lb_garbage_collection(); @@ -130,18 +146,21 @@ lb_as_command_fn (vlib_main_t * vm, if (del) { if ((ret = lb_vip_del_ass(vip_index, as_array, vec_len(as_array)))) { - vec_free(as_array); - return clib_error_return (0, "lb_vip_del_ass error %d", ret); + error = clib_error_return (0, "lb_vip_del_ass error %d", ret); + goto done; } } else { if ((ret = lb_vip_add_ass(vip_index, as_array, vec_len(as_array)))) { - vec_free(as_array); - return clib_error_return (0, "lb_vip_add_ass error %d", ret); + error = clib_error_return (0, "lb_vip_add_ass error %d", ret); + goto done; } } +done: + unformat_free (line_input); vec_free(as_array); - return 0; + + return error; } VLIB_CLI_COMMAND (lb_as_command, static) = @@ -163,6 +182,7 @@ lb_conf_command_fn (vlib_main_t * vm, u32 per_cpu_sticky_buckets_log2 = 0; u32 flow_timeout = lbm->flow_timeout; int ret; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -181,19 +201,24 @@ lb_conf_command_fn (vlib_main_t * vm, per_cpu_sticky_buckets = 1 << per_cpu_sticky_buckets_log2; } else if (unformat(line_input, "timeout %d", &flow_timeout)) ; - else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + else { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - lb_garbage_collection(); - if ((ret = lb_conf(&ip4, &ip6, per_cpu_sticky_buckets, flow_timeout))) - return clib_error_return (0, "lb_conf error %d", ret); + if ((ret = lb_conf(&ip4, &ip6, per_cpu_sticky_buckets, flow_timeout))) { + error = clib_error_return (0, "lb_conf error %d", ret); + goto done; + } - return NULL; +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (lb_conf_command, static) = diff --git a/src/plugins/sixrd/sixrd.c b/src/plugins/sixrd/sixrd.c index 71fc181f..67a9a3ad 100644 --- a/src/plugins/sixrd/sixrd.c +++ b/src/plugins/sixrd/sixrd.c @@ -192,6 +192,7 @@ sixrd_add_domain_command_fn (vlib_main_t *vm, u32 num_m_args = 0; /* Optional arguments */ u32 mtu = 0; + clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user(input, unformat_line_input, line_input)) @@ -205,19 +206,25 @@ sixrd_add_domain_command_fn (vlib_main_t *vm, num_m_args++; else if (unformat(line_input, "mtu %d", &mtu)) num_m_args++; - else - return clib_error_return(0, "unknown input `%U'", - format_unformat_error, input); + else { + error = clib_error_return(0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free(line_input); - if (num_m_args < 3) - return clib_error_return(0, "mandatory argument(s) missing"); + if (num_m_args < 3) { + error = clib_error_return(0, "mandatory argument(s) missing"); + goto done; + } sixrd_create_domain(&ip6_prefix, ip6_prefix_len, &ip4_prefix, ip4_prefix_len, &ip4_src, &sixrd_domain_index, mtu); - return 0; +done: + unformat_free (line_input); + + return error; } static clib_error_t * @@ -228,6 +235,7 @@ sixrd_del_domain_command_fn (vlib_main_t *vm, unformat_input_t _line_input, *line_input = &_line_input; u32 num_m_args = 0; u32 sixrd_domain_index; + clib_error_t *error = 0; /* Get a line of input. */ if (! unformat_user(input, unformat_line_input, line_input)) @@ -236,18 +244,24 @@ sixrd_del_domain_command_fn (vlib_main_t *vm, while (unformat_check_input(line_input) != UNFORMAT_END_OF_INPUT) { if (unformat(line_input, "index %d", &sixrd_domain_index)) num_m_args++; - else - return clib_error_return(0, "unknown input `%U'", - format_unformat_error, input); + else { + error = clib_error_return(0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free(line_input); - if (num_m_args != 1) - return clib_error_return(0, "mandatory argument(s) missing"); + if (num_m_args != 1) { + error = clib_error_return(0, "mandatory argument(s) missing"); + goto done; + } sixrd_delete_domain(sixrd_domain_index); - return 0; +done: + unformat_free (line_input); + + return error; } static u8 * diff --git a/src/plugins/snat/snat.c b/src/plugins/snat/snat.c index 73854a7a..8c2bacdb 100644 --- a/src/plugins/snat/snat.c +++ b/src/plugins/snat/snat.c @@ -1705,6 +1705,7 @@ add_address_command_fn (vlib_main_t * vm, int i, count; int is_add = 1; int rv = 0; + clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -1721,19 +1722,27 @@ add_address_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) is_add = 0; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (sm->static_mapping_only) - return clib_error_return (0, "static mapping only mode"); + { + error = clib_error_return (0, "static mapping only mode"); + goto done; + } start_host_order = clib_host_to_net_u32 (start_addr.as_u32); end_host_order = clib_host_to_net_u32 (end_addr.as_u32); if (end_host_order < start_host_order) - return clib_error_return (0, "end address less than start address"); + { + error = clib_error_return (0, "end address less than start address"); + goto done; + } count = (end_host_order - start_host_order) + 1; @@ -1755,11 +1764,11 @@ add_address_command_fn (vlib_main_t * vm, switch (rv) { case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "S-NAT address not exist."); - break; + error = clib_error_return (0, "S-NAT address not exist."); + goto done; case VNET_API_ERROR_UNSPECIFIED: - return clib_error_return (0, "S-NAT address used in static mapping."); - break; + error = clib_error_return (0, "S-NAT address used in static mapping."); + goto done; default: break; } @@ -1767,7 +1776,10 @@ add_address_command_fn (vlib_main_t * vm, increment_v4_address (&this_addr); } - return 0; +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (add_address_command, static) = { @@ -1807,10 +1819,12 @@ snat_feature_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) is_del = 1; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (vec_len (inside_sw_if_indices)) { @@ -1830,6 +1844,8 @@ snat_feature_command_fn (vlib_main_t * vm, } } +done: + unformat_free (line_input); vec_free (inside_sw_if_indices); vec_free (outside_sw_if_indices); @@ -1923,13 +1939,18 @@ add_static_mapping_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) is_add = 0; else - return clib_error_return (0, "unknown input: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (!addr_only && !proto_set) - return clib_error_return (0, "missing protocol"); + { + error = clib_error_return (0, "missing protocol"); + goto done; + } rv = snat_add_static_mapping(l_addr, e_addr, (u16) l_port, (u16) e_port, vrf_id, addr_only, sw_if_index, proto, is_add); @@ -1937,22 +1958,27 @@ add_static_mapping_command_fn (vlib_main_t * vm, switch (rv) { case VNET_API_ERROR_INVALID_VALUE: - return clib_error_return (0, "External port already in use."); - break; + error = clib_error_return (0, "External port already in use."); + goto done; case VNET_API_ERROR_NO_SUCH_ENTRY: if (is_add) - return clib_error_return (0, "External addres must be allocated."); + error = clib_error_return (0, "External addres must be allocated."); else - return clib_error_return (0, "Mapping not exist."); - break; + error = clib_error_return (0, "Mapping not exist."); + goto done; case VNET_API_ERROR_NO_SUCH_FIB: - return clib_error_return (0, "No such VRF id."); + error = clib_error_return (0, "No such VRF id."); + goto done; case VNET_API_ERROR_VALUE_EXIST: - return clib_error_return (0, "Mapping already exist."); + error = clib_error_return (0, "Mapping already exist."); + goto done; default: break; } +done: + unformat_free (line_input); + return error; } @@ -1985,6 +2011,7 @@ set_workers_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; uword *bitmap = 0; int rv = 0; + clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -1995,13 +2022,18 @@ set_workers_command_fn (vlib_main_t * vm, if (unformat (line_input, "%U", unformat_bitmap_list, &bitmap)) ; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (bitmap == 0) - return clib_error_return (0, "List of workers must be specified."); + { + error = clib_error_return (0, "List of workers must be specified."); + goto done; + } rv = snat_set_workers(bitmap); @@ -2010,17 +2042,20 @@ set_workers_command_fn (vlib_main_t * vm, switch (rv) { case VNET_API_ERROR_INVALID_WORKER: - return clib_error_return (0, "Invalid worker(s)."); - break; + error = clib_error_return (0, "Invalid worker(s)."); + goto done; case VNET_API_ERROR_FEATURE_DISABLED: - return clib_error_return (0, + error = clib_error_return (0, "Supported only if 2 or more workes available."); - break; + goto done; default: break; } - return 0; +done: + unformat_free (line_input); + + return error; } /*? @@ -2047,6 +2082,7 @@ snat_ipfix_logging_enable_disable_command_fn (vlib_main_t * vm, u32 src_port = 0; u8 enable = 1; int rv = 0; + clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -2061,17 +2097,25 @@ snat_ipfix_logging_enable_disable_command_fn (vlib_main_t * vm, else if (unformat (line_input, "disable")) enable = 0; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); rv = snat_ipfix_logging_enable_disable (enable, domain_id, (u16) src_port); if (rv) - return clib_error_return (0, "ipfix logging enable failed"); + { + error = clib_error_return (0, "ipfix logging enable failed"); + goto done; + } - return 0; +done: + unformat_free (line_input); + + return error; } /*? @@ -2604,6 +2648,7 @@ snat_add_interface_address_command_fn (vlib_main_t * vm, u32 sw_if_index; int rv; int is_del = 0; + clib_error_t *error = 0; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -2617,8 +2662,11 @@ snat_add_interface_address_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) is_del = 1; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } rv = snat_add_interface_address (sm, sw_if_index, is_del); @@ -2629,10 +2677,15 @@ snat_add_interface_address_command_fn (vlib_main_t * vm, break; default: - return clib_error_return (0, "snat_add_interface_address returned %d", - rv); + error = clib_error_return (0, "snat_add_interface_address returned %d", + rv); + goto done; } - return 0; + +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (snat_add_interface_address_command, static) = { diff --git a/src/vlib/threads_cli.c b/src/vlib/threads_cli.c index 54cc1aed..36f8109e 100644 --- a/src/vlib/threads_cli.c +++ b/src/vlib/threads_cli.c @@ -163,21 +163,31 @@ trace_frame_queue (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "index %u", &index)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (enable > 1) - return clib_error_return (0, "expecting on or off"); + { + error = clib_error_return (0, "expecting on or off"); + goto done; + } if (vec_len (tm->frame_queue_mains) == 0) - return clib_error_return (0, "no worker handoffs exist"); + { + error = clib_error_return (0, "no worker handoffs exist"); + goto done; + } if (index > vec_len (tm->frame_queue_mains) - 1) - return clib_error_return (0, - "expecting valid worker handoff queue index"); + { + error = clib_error_return (0, + "expecting valid worker handoff queue index"); + goto done; + } fqm = vec_elt_at_index (tm->frame_queue_mains, index); @@ -185,7 +195,7 @@ trace_frame_queue (vlib_main_t * vm, unformat_input_t * input, if (num_fq == 0) { vlib_cli_output (vm, "No frame queues exist\n"); - return error; + goto done; } // Allocate storage for trace if necessary @@ -204,6 +214,10 @@ trace_frame_queue (vlib_main_t * vm, unformat_input_t * input, memset (fqh, 0, sizeof (*fqh)); fqm->vlib_frame_queues[fqix]->trace = enable; } + +done: + unformat_free (line_input); + return error; } @@ -432,28 +446,33 @@ test_frame_queue_nelts (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "index %u", &index)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (index > vec_len (tm->frame_queue_mains) - 1) - return clib_error_return (0, - "expecting valid worker handoff queue index"); + { + error = clib_error_return (0, + "expecting valid worker handoff queue index"); + goto done; + } fqm = vec_elt_at_index (tm->frame_queue_mains, index); if ((nelts != 4) && (nelts != 8) && (nelts != 16) && (nelts != 32)) { - return clib_error_return (0, "expecting 4,8,16,32"); + error = clib_error_return (0, "expecting 4,8,16,32"); + goto done; } num_fq = vec_len (fqm->vlib_frame_queues); if (num_fq == 0) { vlib_cli_output (vm, "No frame queues exist\n"); - return error; + goto done; } for (fqix = 0; fqix < num_fq; fqix++) @@ -461,6 +480,9 @@ test_frame_queue_nelts (vlib_main_t * vm, unformat_input_t * input, fqm->vlib_frame_queues[fqix]->nelts = nelts; } +done: + unformat_free (line_input); + return error; } @@ -499,15 +521,19 @@ test_frame_queue_threshold (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "index %u", &index)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (index > vec_len (tm->frame_queue_mains) - 1) - return clib_error_return (0, - "expecting valid worker handoff queue index"); + { + error = clib_error_return (0, + "expecting valid worker handoff queue index"); + goto done; + } fqm = vec_elt_at_index (tm->frame_queue_mains, index); @@ -515,7 +541,7 @@ test_frame_queue_threshold (vlib_main_t * vm, unformat_input_t * input, if (threshold == ~(u32) 0) { vlib_cli_output (vm, "expecting threshold value\n"); - return error; + goto done; } if (threshold == 0) @@ -525,7 +551,7 @@ test_frame_queue_threshold (vlib_main_t * vm, unformat_input_t * input, if (num_fq == 0) { vlib_cli_output (vm, "No frame queues exist\n"); - return error; + goto done; } for (fqix = 0; fqix < num_fq; fqix++) @@ -533,6 +559,9 @@ test_frame_queue_threshold (vlib_main_t * vm, unformat_input_t * input, fqm->vlib_frame_queues[fqix]->vector_threshold = threshold; } +done: + unformat_free (line_input); + return error; } diff --git a/src/vlib/trace.c b/src/vlib/trace.c index dcdb837f..6d487ae1 100644 --- a/src/vlib/trace.c +++ b/src/vlib/trace.c @@ -372,6 +372,7 @@ cli_add_trace_buffer (vlib_main_t * vm, vlib_trace_node_t *tn; u32 node_index, add; u8 verbose = 0; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -384,8 +385,11 @@ cli_add_trace_buffer (vlib_main_t * vm, else if (unformat (line_input, "verbose")) verbose = 1; else - return clib_error_create ("expected NODE COUNT, got `%U'", - format_unformat_error, line_input); + { + error = clib_error_create ("expected NODE COUNT, got `%U'", + format_unformat_error, line_input); + goto done; + } } /* *INDENT-OFF* */ @@ -403,7 +407,10 @@ cli_add_trace_buffer (vlib_main_t * vm, })); /* *INDENT-ON* */ - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vlib/unix/cli.c b/src/vlib/unix/cli.c index 69fca6ec..88e2453c 100644 --- a/src/vlib/unix/cli.c +++ b/src/vlib/unix/cli.c @@ -2835,6 +2835,7 @@ unix_cli_set_terminal_pager (vlib_main_t * vm, unix_cli_main_t *cm = &unix_cli_main; unix_cli_file_t *cf; unformat_input_t _line_input, *line_input = &_line_input; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -2852,13 +2853,17 @@ unix_cli_set_terminal_pager (vlib_main_t * vm, "Pager limit set to %u lines; note, this is global.\n", um->cli_pager_buffer_limit); else - return clib_error_return (0, "unknown parameter: `%U`", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown parameter: `%U`", + format_unformat_error, line_input); + goto done; + } } +done: unformat_free (line_input); - return 0; + return error; } /*? @@ -2886,6 +2891,7 @@ unix_cli_set_terminal_history (vlib_main_t * vm, unix_cli_file_t *cf; unformat_input_t _line_input, *line_input = &_line_input; u32 limit; + clib_error_t *error = 0; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -2901,8 +2907,11 @@ unix_cli_set_terminal_history (vlib_main_t * vm, else if (unformat (line_input, "limit %u", &cf->history_limit)) ; else - return clib_error_return (0, "unknown parameter: `%U`", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown parameter: `%U`", + format_unformat_error, line_input); + goto done; + } /* If we reduced history size, or turned it off, purge the history */ limit = cf->has_history ? cf->history_limit : 0; @@ -2914,9 +2923,10 @@ unix_cli_set_terminal_history (vlib_main_t * vm, } } +done: unformat_free (line_input); - return 0; + return error; } /*? diff --git a/src/vnet/devices/af_packet/cli.c b/src/vnet/devices/af_packet/cli.c index 6baa26e1..d4aa7016 100644 --- a/src/vnet/devices/af_packet/cli.c +++ b/src/vnet/devices/af_packet/cli.c @@ -49,6 +49,7 @@ af_packet_create_command_fn (vlib_main_t * vm, unformat_input_t * input, u8 *hw_addr_ptr = 0; u32 sw_if_index; int r; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -63,29 +64,47 @@ af_packet_create_command_fn (vlib_main_t * vm, unformat_input_t * input, (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr)) hw_addr_ptr = hwaddr; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (host_if_name == NULL) - return clib_error_return (0, "missing host interface name"); + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } r = af_packet_create_if (vm, host_if_name, hw_addr_ptr, &sw_if_index); - vec_free (host_if_name); if (r == VNET_API_ERROR_SYSCALL_ERROR_1) - return clib_error_return (0, "%s (errno %d)", strerror (errno), errno); + { + error = clib_error_return (0, "%s (errno %d)", strerror (errno), errno); + goto done; + } if (r == VNET_API_ERROR_INVALID_INTERFACE) - return clib_error_return (0, "Invalid interface name"); + { + error = clib_error_return (0, "Invalid interface name"); + goto done; + } if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS) - return clib_error_return (0, "Interface elready exists"); + { + error = clib_error_return (0, "Interface elready exists"); + goto done; + } vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (), sw_if_index); - return 0; + +done: + vec_free (host_if_name); + unformat_free (line_input); + + return error; } /*? @@ -124,6 +143,7 @@ af_packet_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, { unformat_input_t _line_input, *line_input = &_line_input; u8 *host_if_name = NULL; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -134,18 +154,26 @@ af_packet_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, if (unformat (line_input, "name %s", &host_if_name)) ; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (host_if_name == NULL) - return clib_error_return (0, "missing host interface name"); + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } af_packet_delete_if (vm, host_if_name); + +done: vec_free (host_if_name); + unformat_free (line_input); - return 0; + return error; } /*? diff --git a/src/vnet/devices/dpdk/cli.c b/src/vnet/devices/dpdk/cli.c index d133cfd9..1fc665ac 100644 --- a/src/vnet/devices/dpdk/cli.c +++ b/src/vnet/devices/dpdk/cli.c @@ -398,7 +398,7 @@ set_dpdk_if_desc (vlib_main_t * vm, unformat_input_t * input, u32 hw_if_index = (u32) ~ 0; u32 nb_rx_desc = (u32) ~ 0; u32 nb_tx_desc = (u32) ~ 0; - clib_error_t *rv; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -414,25 +414,37 @@ set_dpdk_if_desc (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "rx %d", &nb_rx_desc)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0) - return clib_error_return (0, "number of descriptors can be set only for " - "physical devices"); + { + error = + clib_error_return (0, + "number of descriptors can be set only for " + "physical devices"); + goto done; + } if ((nb_rx_desc == (u32) ~ 0 || nb_rx_desc == xd->nb_rx_desc) && (nb_tx_desc == (u32) ~ 0 || nb_tx_desc == xd->nb_tx_desc)) - return clib_error_return (0, "nothing changed"); + { + error = clib_error_return (0, "nothing changed"); + goto done; + } if (nb_rx_desc != (u32) ~ 0) xd->nb_rx_desc = nb_rx_desc; @@ -440,9 +452,12 @@ set_dpdk_if_desc (vlib_main_t * vm, unformat_input_t * input, if (nb_tx_desc != (u32) ~ 0) xd->nb_tx_desc = nb_tx_desc; - rv = dpdk_port_setup (dm, xd); + error = dpdk_port_setup (dm, xd); + +done: + unformat_free (line_input); - return rv; + return error; } /* *INDENT-OFF* */ @@ -523,6 +538,7 @@ set_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input, u32 queue = (u32) 0; u32 cpu = (u32) ~ 0; int i; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -538,18 +554,25 @@ set_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "thread %d", &cpu)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } if (cpu < dm->input_cpu_first_index || cpu >= (dm->input_cpu_first_index + dm->input_cpu_count)) - return clib_error_return (0, "please specify valid thread id"); + { + error = clib_error_return (0, "please specify valid thread id"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -563,7 +586,7 @@ set_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input, queue == dq->queue_id) { if (cpu == i) /* nothing to do */ - return 0; + goto done; vec_del1(dm->devices_by_cpu[i], dq - dm->devices_by_cpu[i]); vec_add2(dm->devices_by_cpu[cpu], dq, 1); @@ -586,13 +609,18 @@ set_dpdk_if_placement (vlib_main_t * vm, unformat_input_t * input, vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index, VLIB_NODE_STATE_POLLING); - return 0; + goto done; } } /* *INDENT-ON* */ } - return clib_error_return (0, "not found"); + error = clib_error_return (0, "not found"); + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -653,6 +681,7 @@ set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input, u32 hw_if_index = (u32) ~ 0; u32 cpu = (u32) ~ 0; int i; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -666,18 +695,22 @@ set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "thread %d", &cpu)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) return clib_error_return (0, "please specify valid interface name"); if (cpu < dm->hqos_cpu_first_index || cpu >= (dm->hqos_cpu_first_index + dm->hqos_cpu_count)) - return clib_error_return (0, "please specify valid thread id"); + { + error = clib_error_return (0, "please specify valid thread id"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -689,7 +722,7 @@ set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input, if (hw_if_index == dm->devices[dq->device].vlib_hw_if_index) { if (cpu == i) /* nothing to do */ - return 0; + goto done; vec_del1 (dm->devices_by_hqos_cpu[i], dq - dm->devices_by_hqos_cpu[i]); @@ -703,12 +736,17 @@ set_dpdk_if_hqos_placement (vlib_main_t * vm, unformat_input_t * input, vec_sort_with_function (dm->devices_by_hqos_cpu[cpu], dpdk_device_queue_sort); - return 0; + goto done; } } } - return clib_error_return (0, "not found"); + error = clib_error_return (0, "not found"); + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -732,6 +770,7 @@ set_dpdk_if_hqos_pipe (vlib_main_t * vm, unformat_input_t * input, u32 pipe_id = (u32) ~ 0; u32 profile_id = (u32) ~ 0; int rv; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -749,14 +788,18 @@ set_dpdk_if_hqos_pipe (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "profile %d", &profile_id)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -765,9 +808,15 @@ set_dpdk_if_hqos_pipe (vlib_main_t * vm, unformat_input_t * input, rte_sched_pipe_config (xd->hqos_ht->hqos, subport_id, pipe_id, profile_id); if (rv) - return clib_error_return (0, "pipe configuration failed"); + { + error = clib_error_return (0, "pipe configuration failed"); + goto done; + } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -797,6 +846,7 @@ set_dpdk_if_hqos_subport (vlib_main_t * vm, unformat_input_t * input, .tc_period = 10, }; int rv; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -829,23 +879,33 @@ set_dpdk_if_hqos_subport (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "period %d", &p.tc_period)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); rv = rte_sched_subport_config (xd->hqos_ht->hqos, subport_id, &p); if (rv) - return clib_error_return (0, "subport configuration failed"); + { + error = clib_error_return (0, "subport configuration failed"); + goto done; + } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -872,6 +932,7 @@ set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input, u32 queue = (u32) ~ 0; u32 entry = (u32) ~ 0; u32 val, i; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -889,20 +950,33 @@ set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "queue %d", &queue)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } if (entry >= 64) - return clib_error_return (0, "invalid entry"); + { + error = clib_error_return (0, "invalid entry"); + goto done; + } if (tc >= RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE) - return clib_error_return (0, "invalid traffic class"); + { + error = clib_error_return (0, "invalid traffic class"); + goto done; + } if (queue >= RTE_SCHED_QUEUES_PER_TRAFFIC_CLASS) - return clib_error_return (0, "invalid traffic class"); + { + error = clib_error_return (0, "invalid traffic class"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -911,7 +985,10 @@ set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input, uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); /* Should never happen, shut up Coverity warning */ if (p == 0) - return clib_error_return (0, "no worker registrations?"); + { + error = clib_error_return (0, "no worker registrations?"); + goto done; + } vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; int worker_thread_first = tr->first_index; @@ -921,7 +998,10 @@ set_dpdk_if_hqos_tctbl (vlib_main_t * vm, unformat_input_t * input, for (i = 0; i < worker_thread_count; i++) xd->hqos_wt[worker_thread_first + i].hqos_tc_table[entry] = val; - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -939,6 +1019,7 @@ set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input, unformat_input_t _line_input, *line_input = &_line_input; vlib_thread_main_t *tm = vlib_get_thread_main (); dpdk_main_t *dm = &dpdk_main; + clib_error_t *error = NULL; /* Device specific data */ struct rte_eth_dev_info dev_info; @@ -984,15 +1065,19 @@ set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "mask %llx", &mask)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - /* Get interface */ if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify valid interface name"); + { + error = clib_error_return (0, "please specify valid interface name"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -1019,7 +1104,7 @@ set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input, if (devconf->hqos_enabled == 0) { vlib_cli_output (vm, "HQoS disabled for this interface"); - return 0; + goto done; } n_subports_per_port = devconf->hqos.port.n_subports_per_port; @@ -1028,27 +1113,39 @@ set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input, /* Validate packet field configuration: id, offset and mask */ if (id >= 3) - return clib_error_return (0, "invalid packet field id"); + { + error = clib_error_return (0, "invalid packet field id"); + goto done; + } switch (id) { case 0: if (dpdk_hqos_validate_mask (mask, n_subports_per_port) != 0) - return clib_error_return (0, "invalid subport ID mask " - "(n_subports_per_port = %u)", - n_subports_per_port); + { + error = clib_error_return (0, "invalid subport ID mask " + "(n_subports_per_port = %u)", + n_subports_per_port); + goto done; + } break; case 1: if (dpdk_hqos_validate_mask (mask, n_pipes_per_subport) != 0) - return clib_error_return (0, "invalid pipe ID mask " - "(n_pipes_per_subport = %u)", - n_pipes_per_subport); + { + error = clib_error_return (0, "invalid pipe ID mask " + "(n_pipes_per_subport = %u)", + n_pipes_per_subport); + goto done; + } break; case 2: default: if (dpdk_hqos_validate_mask (mask, tctbl_size) != 0) - return clib_error_return (0, "invalid TC table index mask " - "(TC table size = %u)", tctbl_size); + { + error = clib_error_return (0, "invalid TC table index mask " + "(TC table size = %u)", tctbl_size); + goto done; + } } /* Propagate packet field configuration to all workers */ @@ -1075,7 +1172,10 @@ set_dpdk_if_hqos_pktfield (vlib_main_t * vm, unformat_input_t * input, __builtin_ctzll (mask); } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -1106,6 +1206,7 @@ show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input, dpdk_device_config_t *devconf = 0; vlib_thread_registration_t *tr; uword *p = 0; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -1117,14 +1218,18 @@ show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input, &hw_if_index)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify interface name!!"); + { + error = clib_error_return (0, "please specify interface name!!"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -1151,7 +1256,7 @@ show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input, if (devconf->hqos_enabled == 0) { vlib_cli_output (vm, "HQoS disabled for this interface"); - return 0; + goto done; } /* Detect the set of worker threads */ @@ -1159,7 +1264,10 @@ show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input, /* Should never happen, shut up Coverity warning */ if (p == 0) - return clib_error_return (0, "no worker registrations?"); + { + error = clib_error_return (0, "no worker registrations?"); + goto done; + } tr = (vlib_thread_registration_t *) p[0]; @@ -1284,7 +1392,10 @@ show_dpdk_if_hqos (vlib_main_t * vm, unformat_input_t * input, } #endif - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -1315,6 +1426,7 @@ show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input, u32 qindex; struct rte_sched_queue_stats stats; u16 qlen; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -1339,14 +1451,18 @@ show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input, ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "please specify interface name!!"); + { + error = clib_error_return (0, "please specify interface name!!"); + goto done; + } hw = vnet_get_hw_interface (dm->vnet_main, hw_if_index); xd = vec_elt_at_index (dm->devices, hw->dev_instance); @@ -1373,7 +1489,7 @@ show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input, if (devconf->hqos_enabled == 0) { vlib_cli_output (vm, "HQoS disabled for this interface"); - return 0; + goto done; } /* @@ -1386,7 +1502,10 @@ show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input, if (rte_sched_queue_read_stats (xd->hqos_ht->hqos, qindex, &stats, &qlen) != 0) - return clib_error_return (0, "failed to read stats"); + { + error = clib_error_return (0, "failed to read stats"); + goto done; + } vlib_cli_output (vm, "%=24s%=16s", "Stats Parameter", "Value"); vlib_cli_output (vm, "%=24s%=16d", "Packets", stats.n_pkts); @@ -1399,7 +1518,10 @@ show_dpdk_hqos_queue_stats (vlib_main_t * vm, unformat_input_t * input, vlib_cli_output (vm, "%=24s%=16d", "Bytes dropped", stats.n_bytes_dropped); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/devices/dpdk/ipsec/cli.c b/src/vnet/devices/dpdk/ipsec/cli.c index 93df4a64..f9d3a5d0 100644 --- a/src/vnet/devices/dpdk/ipsec/cli.c +++ b/src/vnet/devices/dpdk/ipsec/cli.c @@ -111,6 +111,7 @@ lcore_cryptodev_map_fn (vlib_main_t * vm, unformat_input_t * input, { unformat_input_t _line_input, *line_input = &_line_input; u16 detail = 0; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -120,15 +121,19 @@ lcore_cryptodev_map_fn (vlib_main_t * vm, unformat_input_t * input, if (unformat (line_input, "verbose")) detail = 1; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - dpdk_ipsec_show_mapping (vm, detail); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/devices/netmap/cli.c b/src/vnet/devices/netmap/cli.c index 6157f27c..71363294 100644 --- a/src/vnet/devices/netmap/cli.c +++ b/src/vnet/devices/netmap/cli.c @@ -37,6 +37,7 @@ netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input, u8 is_pipe = 0; u8 is_master = 0; u32 sw_if_index = ~0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -57,30 +58,48 @@ netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "slave")) is_master = 0; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (host_if_name == NULL) - return clib_error_return (0, "missing host interface name"); + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } r = netmap_create_if (vm, host_if_name, hw_addr_ptr, is_pipe, is_master, &sw_if_index); if (r == VNET_API_ERROR_SYSCALL_ERROR_1) - return clib_error_return (0, "%s (errno %d)", strerror (errno), errno); + { + error = clib_error_return (0, "%s (errno %d)", strerror (errno), errno); + goto done; + } if (r == VNET_API_ERROR_INVALID_INTERFACE) - return clib_error_return (0, "Invalid interface name"); + { + error = clib_error_return (0, "Invalid interface name"); + goto done; + } if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS) - return clib_error_return (0, "Interface already exists"); + { + error = clib_error_return (0, "Interface already exists"); + goto done; + } vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (), sw_if_index); - return 0; + +done: + unformat_free (line_input); + + return error; } /*? @@ -144,6 +163,7 @@ netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, { unformat_input_t _line_input, *line_input = &_line_input; u8 *host_if_name = NULL; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -154,17 +174,25 @@ netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input, if (unformat (line_input, "name %s", &host_if_name)) ; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (host_if_name == NULL) - return clib_error_return (0, "missing host interface name"); + { + error = clib_error_return (0, "missing host interface name"); + goto done; + } netmap_delete_if (vm, host_if_name); - return 0; +done: + unformat_free (line_input); + + return error; } /*? diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c index 315daa77..c43f6e67 100644 --- a/src/vnet/devices/virtio/vhost-user.c +++ b/src/vnet/devices/virtio/vhost-user.c @@ -2682,6 +2682,7 @@ vhost_user_connect_command_fn (vlib_main_t * vm, u32 custom_dev_instance = ~0; u8 hwaddr[6]; u8 *hw = NULL; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -2704,10 +2705,12 @@ vhost_user_connect_command_fn (vlib_main_t * vm, renumber = 1; } else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); vnet_main_t *vnm = vnet_get_main (); @@ -2716,14 +2719,18 @@ vhost_user_connect_command_fn (vlib_main_t * vm, is_server, &sw_if_index, feature_mask, renumber, custom_dev_instance, hw))) { - vec_free (sock_filename); - return clib_error_return (0, "vhost_user_create_if returned %d", rv); + error = clib_error_return (0, "vhost_user_create_if returned %d", rv); + goto done; } - vec_free (sock_filename); vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (), sw_if_index); - return 0; + +done: + vec_free (sock_filename); + unformat_free (line_input); + + return error; } clib_error_t * @@ -2734,6 +2741,7 @@ vhost_user_delete_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u32 sw_if_index = ~0; vnet_main_t *vnm = vnet_get_main (); + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -2751,15 +2759,25 @@ vhost_user_delete_command_fn (vlib_main_t * vm, vnet_get_sup_hw_interface (vnm, sw_if_index); if (hwif == NULL || vhost_user_dev_class.index != hwif->dev_class_index) - return clib_error_return (0, "Not a vhost interface"); + { + error = clib_error_return (0, "Not a vhost interface"); + goto done; + } } else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); + vhost_user_delete_if (vnm, vm, sw_if_index); - return 0; + +done: + unformat_free (line_input); + + return error; } int @@ -3286,6 +3304,7 @@ vhost_thread_command_fn (vlib_main_t * vm, u32 sw_if_index; u8 del = 0; int rv; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -3295,9 +3314,9 @@ vhost_thread_command_fn (vlib_main_t * vm, (line_input, "%U %d", unformat_vnet_sw_interface, vnet_get_main (), &sw_if_index, &worker_thread_index)) { - unformat_free (line_input); - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; } if (unformat (line_input, "del")) @@ -3305,9 +3324,16 @@ vhost_thread_command_fn (vlib_main_t * vm, if ((rv = vhost_user_thread_placement (sw_if_index, worker_thread_index, del))) - return clib_error_return (0, "vhost_user_thread_placement returned %d", - rv); - return 0; + { + error = clib_error_return (0, "vhost_user_thread_placement returned %d", + rv); + goto done; + } + +done: + unformat_free (line_input); + + return error; } diff --git a/src/vnet/gre/interface.c b/src/vnet/gre/interface.c index d624587d..d4476ac4 100644 --- a/src/vnet/gre/interface.c +++ b/src/vnet/gre/interface.c @@ -491,6 +491,7 @@ create_gre_tunnel_command_fn (vlib_main_t * vm, u32 num_m_args = 0; u8 is_add = 1; u32 sw_if_index; + clib_error_t *error = NULL; /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) @@ -508,16 +509,24 @@ create_gre_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "teb")) teb = 1; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args < 2) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } if (memcmp (&src, &dst, sizeof(src)) == 0) - return clib_error_return (0, "src and dst are identical"); + { + error = clib_error_return (0, "src and dst are identical"); + goto done; + } memset (a, 0, sizeof (*a)); a->outer_fib_id = outer_fib_id; @@ -536,15 +545,21 @@ create_gre_tunnel_command_fn (vlib_main_t * vm, vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index); break; case VNET_API_ERROR_INVALID_VALUE: - return clib_error_return (0, "GRE tunnel already exists..."); + error = clib_error_return (0, "GRE tunnel already exists..."); + goto done; case VNET_API_ERROR_NO_SUCH_FIB: - return clib_error_return (0, "outer fib ID %d doesn't exist\n", - outer_fib_id); + error = clib_error_return (0, "outer fib ID %d doesn't exist\n", + outer_fib_id); + goto done; default: - return clib_error_return (0, "vnet_gre_add_del_tunnel returned %d", rv); + error = clib_error_return (0, "vnet_gre_add_del_tunnel returned %d", rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (create_gre_tunnel_command, static) = { diff --git a/src/vnet/ip/ip4_source_check.c b/src/vnet/ip/ip4_source_check.c index d461cc88..3af32f2e 100644 --- a/src/vnet/ip/ip4_source_check.c +++ b/src/vnet/ip/ip4_source_check.c @@ -399,6 +399,8 @@ set_ip_source_check (vlib_main_t * vm, vnet_feature_enable_disable ("ip4-unicast", feature_name, sw_if_index, is_del == 0, &config, sizeof (config)); done: + unformat_free (line_input); + return error; } @@ -531,7 +533,9 @@ ip_source_check_accept (vlib_main_t * vm, } done: - return (error); + unformat_free (line_input); + + return error; } /*? diff --git a/src/vnet/ip/ip4_test.c b/src/vnet/ip/ip4_test.c index 45d17113..73dabfdc 100644 --- a/src/vnet/ip/ip4_test.c +++ b/src/vnet/ip/ip4_test.c @@ -143,8 +143,11 @@ thrash (vlib_main_t * vm, else if (unformat (line_input, "verbose")) verbose = 1; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } } @@ -178,7 +181,7 @@ thrash (vlib_main_t * vm, if (p == 0) { vlib_cli_output (vm, "Couldn't map fib id %d to fib index\n", table_id); - return 0; + goto done; } table_index = p[0]; @@ -294,7 +297,11 @@ thrash (vlib_main_t * vm, pool_free (tm->route_pool); } - return 0; + +done: + unformat_free (line_input); + + return error; } /*? diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 7229591e..6b53137f 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -2923,7 +2923,10 @@ ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input, else if (unformat (line_input, "ra-lifetime")) { if (!unformat (line_input, "%d", &ra_lifetime)) - return (error = unformat_parse_error (line_input)); + { + error = unformat_parse_error (line_input); + goto done; + } use_lifetime = 1; break; } @@ -2931,13 +2934,19 @@ ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input, { if (!unformat (line_input, "%d %d", &ra_initial_count, &ra_initial_interval)) - return (error = unformat_parse_error (line_input)); + { + error = unformat_parse_error (line_input); + goto done; + } break; } else if (unformat (line_input, "ra-interval")) { if (!unformat (line_input, "%d", &ra_max_interval)) - return (error = unformat_parse_error (line_input)); + { + error = unformat_parse_error (line_input); + goto done; + } if (!unformat (line_input, "%d", &ra_min_interval)) ra_min_interval = 0; @@ -2949,7 +2958,10 @@ ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input, break; } else - return (unformat_parse_error (line_input)); + { + error = unformat_parse_error (line_input); + goto done; + } } if (add_radv_info) @@ -3006,7 +3018,10 @@ ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input, else if (unformat (line_input, "no-onlink")) no_onlink = 1; else - return (unformat_parse_error (line_input)); + { + error = unformat_parse_error (line_input); + goto done; + } } ip6_neighbor_ra_prefix (vm, sw_if_index, @@ -3018,9 +3033,9 @@ ip6_neighbor_cmd (vlib_main_t * vm, unformat_input_t * main_input, off_link, no_autoconfig, no_onlink, is_no); } +done: unformat_free (line_input); -done: return error; } diff --git a/src/vnet/ip/lookup.c b/src/vnet/ip/lookup.c index 0ef0e7a6..807b87b6 100644 --- a/src/vnet/ip/lookup.c +++ b/src/vnet/ip/lookup.c @@ -568,8 +568,6 @@ vnet_ip_route_cmd (vlib_main_t * vm, } } - unformat_free (line_input); - if (vec_len (prefixs) == 0) { error = @@ -704,6 +702,7 @@ done: vec_free (dpos); vec_free (prefixs); vec_free (rpaths); + unformat_free (line_input); return error; } @@ -872,8 +871,6 @@ vnet_ip_mroute_cmd (vlib_main_t * vm, } } - unformat_free (line_input); - if (~0 == table_id) { /* @@ -970,6 +967,8 @@ vnet_ip_mroute_cmd (vlib_main_t * vm, (scount * gcount) / (timet[1] - timet[0])); done: + unformat_free (line_input); + return error; } @@ -1149,24 +1148,37 @@ probe_neighbor_address (vlib_main_t * vm, is_ip4 = 0; } else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (sw_if_index == ~0) - return clib_error_return (0, "Interface required, not set."); + { + error = clib_error_return (0, "Interface required, not set."); + goto done; + } if (address_set == 0) - return clib_error_return (0, "ip address required, not set."); + { + error = clib_error_return (0, "ip address required, not set."); + goto done; + } if (address_set > 1) - return clib_error_return (0, "Multiple ip addresses not supported."); + { + error = clib_error_return (0, "Multiple ip addresses not supported."); + goto done; + } if (is_ip4) error = ip4_probe_neighbor_wait (vm, &a4, sw_if_index, retry_count); else error = ip6_probe_neighbor_wait (vm, &a6, sw_if_index, retry_count); +done: + unformat_free (line_input); + return error; } diff --git a/src/vnet/ipsec-gre/interface.c b/src/vnet/ipsec-gre/interface.c index 3b6e4ac2..0772ce73 100644 --- a/src/vnet/ipsec-gre/interface.c +++ b/src/vnet/ipsec-gre/interface.c @@ -232,6 +232,7 @@ create_ipsec_gre_tunnel_command_fn (vlib_main_t * vm, vnet_ipsec_gre_add_del_tunnel_args_t _a, *a = &_a; int rv; u32 sw_if_index; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -250,16 +251,24 @@ create_ipsec_gre_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "remote-sa %d", &rsa)) num_m_args++; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args < 4) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } if (memcmp (&src, &dst, sizeof (src)) == 0) - return clib_error_return (0, "src and dst are identical"); + { + error = clib_error_return (0, "src and dst are identical"); + goto done; + } memset (a, 0, sizeof (*a)); a->is_add = is_add; @@ -277,14 +286,19 @@ create_ipsec_gre_tunnel_command_fn (vlib_main_t * vm, vnet_get_main (), sw_if_index); break; case VNET_API_ERROR_INVALID_VALUE: - return clib_error_return (0, "GRE tunnel already exists..."); + error = clib_error_return (0, "GRE tunnel already exists..."); + goto done; default: - return clib_error_return (0, - "vnet_ipsec_gre_add_del_tunnel returned %d", - rv); + error = clib_error_return (0, + "vnet_ipsec_gre_add_del_tunnel returned %d", + rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c index 3c1e26f2..0e034402 100644 --- a/src/vnet/ipsec/ipsec_cli.c +++ b/src/vnet/ipsec/ipsec_cli.c @@ -32,6 +32,7 @@ set_interface_spd_command_fn (vlib_main_t * vm, u32 sw_if_index = (u32) ~ 0; u32 spd_id; int is_add = 1; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -43,14 +44,18 @@ set_interface_spd_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) is_add = 0; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); - - unformat_free (line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } ipsec_set_interface_spd (vm, sw_if_index, spd_id, is_add); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -72,7 +77,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, ipsec_sa_t sa; int is_add = ~0; u8 *ck = 0, *ik = 0; - clib_error_t *err = 0; + clib_error_t *error = NULL; memset (&sa, 0, sizeof (sa)); @@ -90,8 +95,11 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, else if (unformat (line_input, "esp")) sa.protocol = IPSEC_PROTOCOL_ESP; else if (unformat (line_input, "ah")) - //sa.protocol = IPSEC_PROTOCOL_AH; - return clib_error_return (0, "unsupported security protocol 'AH'"); + { + //sa.protocol = IPSEC_PROTOCOL_AH; + error = clib_error_return (0, "unsupported security protocol 'AH'"); + goto done; + } else if (unformat (line_input, "crypto-key %U", unformat_hex_string, &ck)) sa.crypto_key_len = vec_len (ck); @@ -102,8 +110,12 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, { if (sa.crypto_alg < IPSEC_CRYPTO_ALG_AES_CBC_128 || sa.crypto_alg >= IPSEC_CRYPTO_N_ALG) - return clib_error_return (0, "unsupported crypto-alg: '%U'", - format_ipsec_crypto_alg, sa.crypto_alg); + { + error = clib_error_return (0, "unsupported crypto-alg: '%U'", + format_ipsec_crypto_alg, + sa.crypto_alg); + goto done; + } } else if (unformat (line_input, "integ-key %U", unformat_hex_string, &ik)) @@ -113,8 +125,12 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, { if (sa.integ_alg < IPSEC_INTEG_ALG_SHA1_96 || sa.integ_alg >= IPSEC_INTEG_N_ALG) - return clib_error_return (0, "unsupported integ-alg: '%U'", - format_ipsec_integ_alg, sa.integ_alg); + { + error = clib_error_return (0, "unsupported integ-alg: '%U'", + format_ipsec_integ_alg, + sa.integ_alg); + goto done; + } } else if (unformat (line_input, "tunnel-src %U", unformat_ip4_address, &sa.tunnel_src_addr.ip4)) @@ -135,12 +151,13 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, sa.is_tunnel_ip6 = 1; } else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (sa.crypto_key_len > sizeof (sa.crypto_key)) sa.crypto_key_len = sizeof (sa.crypto_key); @@ -156,14 +173,17 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm, if (is_add) { ASSERT (im->cb.check_support_cb); - err = im->cb.check_support_cb (&sa); - if (err) - return err; + error = im->cb.check_support_cb (&sa); + if (error) + goto done; } ipsec_add_del_sa (vm, &sa, is_add); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -183,6 +203,7 @@ ipsec_spd_add_del_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u32 spd_id = ~0; int is_add = ~0; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -196,18 +217,25 @@ ipsec_spd_add_del_command_fn (vlib_main_t * vm, else if (unformat (line_input, "%u", &spd_id)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (spd_id == ~0) - return clib_error_return (0, "please specify SPD ID"); + { + error = clib_error_return (0, "please specify SPD ID"); + goto done; + } ipsec_add_del_spd (vm, spd_id, is_add); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -230,6 +258,7 @@ ipsec_policy_add_del_command_fn (vlib_main_t * vm, int is_add = 0; int is_ip_any = 1; u32 tmp, tmp2; + clib_error_t *error = NULL; memset (&p, 0, sizeof (p)); p.lport.stop = p.rport.stop = ~0; @@ -262,7 +291,10 @@ ipsec_policy_add_del_command_fn (vlib_main_t * vm, &p.policy)) { if (p.policy == IPSEC_POLICY_ACTION_RESOLVE) - return clib_error_return (0, "unsupported action: 'resolve'"); + { + error = clib_error_return (0, "unsupported action: 'resolve'"); + goto done; + } } else if (unformat (line_input, "sa %u", &p.sa_id)) ; @@ -300,19 +332,24 @@ ipsec_policy_add_del_command_fn (vlib_main_t * vm, p.rport.stop = tmp2; } else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - ipsec_add_del_policy (vm, &p, is_add); if (is_ip_any) { p.is_ipv6 = 1; ipsec_add_del_policy (vm, &p, is_add); } - return 0; + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -332,6 +369,7 @@ set_ipsec_sa_key_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; ipsec_sa_t sa; u8 *ck = 0, *ik = 0; + clib_error_t *error = NULL; memset (&sa, 0, sizeof (sa)); @@ -349,12 +387,13 @@ set_ipsec_sa_key_command_fn (vlib_main_t * vm, if (unformat (line_input, "integ-key %U", unformat_hex_string, &ik)) sa.integ_key_len = vec_len (ik); else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (sa.crypto_key_len > sizeof (sa.crypto_key)) sa.crypto_key_len = sizeof (sa.crypto_key); @@ -369,7 +408,10 @@ set_ipsec_sa_key_command_fn (vlib_main_t * vm, ipsec_set_sa_key (vm, &sa); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -649,6 +691,7 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm, ipsec_add_del_tunnel_args_t a; int rv; u32 num_m_args = 0; + clib_error_t *error = NULL; memset (&a, 0, sizeof (a)); a.is_add = 1; @@ -673,13 +716,18 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "del")) a.is_add = 0; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args < 4) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } rv = ipsec_add_del_tunnel_if (&a); @@ -689,16 +737,21 @@ create_ipsec_tunnel_command_fn (vlib_main_t * vm, break; case VNET_API_ERROR_INVALID_VALUE: if (a.is_add) - return clib_error_return (0, - "IPSec tunnel interface already exists..."); + error = clib_error_return (0, + "IPSec tunnel interface already exists..."); else - return clib_error_return (0, "IPSec tunnel interface not exists..."); + error = clib_error_return (0, "IPSec tunnel interface not exists..."); + goto done; default: - return clib_error_return (0, "ipsec_register_interface returned %d", - rv); + error = clib_error_return (0, "ipsec_register_interface returned %d", + rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -720,6 +773,7 @@ set_interface_key_command_fn (vlib_main_t * vm, u32 hw_if_index = (u32) ~ 0; u32 alg; u8 *key = 0; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -748,25 +802,38 @@ set_interface_key_command_fn (vlib_main_t * vm, else if (unformat (line_input, "%U", unformat_hex_string, &key)) ; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (type == IPSEC_IF_SET_KEY_TYPE_NONE) - return clib_error_return (0, "unknown key type"); + { + error = clib_error_return (0, "unknown key type"); + goto done; + } if (alg > 0 && vec_len (key) == 0) - return clib_error_return (0, "key is not specified"); + { + error = clib_error_return (0, "key is not specified"); + goto done; + } if (hw_if_index == (u32) ~ 0) - return clib_error_return (0, "interface not specified"); + { + error = clib_error_return (0, "interface not specified"); + goto done; + } ipsec_set_interface_key (im->vnet_main, hw_if_index, type, alg, key); + +done: vec_free (key); + unformat_free (line_input); - return 0; + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/l2/l2_patch.c b/src/vnet/l2/l2_patch.c index 5e4691f4..ff3d2f3a 100644 --- a/src/vnet/l2/l2_patch.c +++ b/src/vnet/l2/l2_patch.c @@ -315,6 +315,7 @@ test_patch_command_fn (vlib_main_t * vm, int rx_set = 0; int tx_set = 0; int is_add = 1; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -335,10 +336,16 @@ test_patch_command_fn (vlib_main_t * vm, } if (rx_set == 0) - return clib_error_return (0, "rx interface not set"); + { + error = clib_error_return (0, "rx interface not set"); + goto done; + } if (tx_set == 0) - return clib_error_return (0, "tx interface not set"); + { + error = clib_error_return (0, "tx interface not set"); + goto done; + } rv = vnet_l2_patch_add_del (rx_sw_if_index, tx_sw_if_index, is_add); @@ -348,17 +355,24 @@ test_patch_command_fn (vlib_main_t * vm, break; case VNET_API_ERROR_INVALID_SW_IF_INDEX: - return clib_error_return (0, "rx interface not a physical port"); + error = clib_error_return (0, "rx interface not a physical port"); + goto done; case VNET_API_ERROR_INVALID_SW_IF_INDEX_2: - return clib_error_return (0, "tx interface not a physical port"); + error = clib_error_return (0, "tx interface not a physical port"); + goto done; default: - return clib_error_return + error = clib_error_return (0, "WARNING: vnet_l2_patch_add_del returned %d", rv); + goto done; } - return 0; + +done: + unformat_free (line_input); + + return error; } /*? diff --git a/src/vnet/l2/l2_xcrw.c b/src/vnet/l2/l2_xcrw.c index 70610a85..d08a5d8f 100644 --- a/src/vnet/l2/l2_xcrw.c +++ b/src/vnet/l2/l2_xcrw.c @@ -409,6 +409,7 @@ set_l2_xcrw_command_fn (vlib_main_t * vm, u8 *rw = 0; vnet_main_t *vnm = vnet_get_main (); int rv; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) @@ -416,8 +417,11 @@ set_l2_xcrw_command_fn (vlib_main_t * vm, if (!unformat (line_input, "%U", unformat_vnet_sw_interface, vnm, &l2_sw_if_index)) - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { @@ -436,7 +440,10 @@ set_l2_xcrw_command_fn (vlib_main_t * vm, } if (next_node_index == ~0) - return clib_error_return (0, "next node not specified"); + { + error = clib_error_return (0, "next node not specified"); + goto done; + } if (tx_fib_id != ~0) { @@ -448,7 +455,11 @@ set_l2_xcrw_command_fn (vlib_main_t * vm, p = hash_get (ip4_main.fib_index_by_table_id, tx_fib_id); if (p == 0) - return clib_error_return (0, "nonexistent tx_fib_id %d", tx_fib_id); + { + error = + clib_error_return (0, "nonexistent tx_fib_id %d", tx_fib_id); + goto done; + } tx_fib_index = p[0]; } @@ -463,16 +474,21 @@ set_l2_xcrw_command_fn (vlib_main_t * vm, break; case VNET_API_ERROR_INVALID_SW_IF_INDEX: - return clib_error_return (0, "%U not cross-connected", - format_vnet_sw_if_index_name, - vnm, l2_sw_if_index); + error = clib_error_return (0, "%U not cross-connected", + format_vnet_sw_if_index_name, + vnm, l2_sw_if_index); + goto done; + default: - return clib_error_return (0, "vnet_configure_l2_xcrw returned %d", rv); + error = clib_error_return (0, "vnet_configure_l2_xcrw returned %d", rv); + goto done; } +done: vec_free (rw); + unformat_free (line_input); - return 0; + return error; } /*? diff --git a/src/vnet/l2tp/l2tp.c b/src/vnet/l2tp/l2tp.c index a4531dab..2d323397 100644 --- a/src/vnet/l2tp/l2tp.c +++ b/src/vnet/l2tp/l2tp.c @@ -427,6 +427,7 @@ create_l2tpv3_tunnel_command_fn (vlib_main_t * vm, u32 sw_if_index; u32 encap_fib_id = ~0; u32 encap_fib_index = ~0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -455,18 +456,22 @@ create_l2tpv3_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "l2-sublayer-present")) l2_sublayer_present = 1; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (encap_fib_id != ~0) { uword *p; ip6_main_t *im = &ip6_main; if (!(p = hash_get (im->fib_index_by_table_id, encap_fib_id))) - return clib_error_return (0, "No fib with id %d", encap_fib_id); + { + error = clib_error_return (0, "No fib with id %d", encap_fib_id); + goto done; + } encap_fib_index = p[0]; } else @@ -475,9 +480,15 @@ create_l2tpv3_tunnel_command_fn (vlib_main_t * vm, } if (our_address_set == 0) - return clib_error_return (0, "our address not specified"); + { + error = clib_error_return (0, "our address not specified"); + goto done; + } if (client_address_set == 0) - return clib_error_return (0, "client address not specified"); + { + error = clib_error_return (0, "client address not specified"); + goto done; + } rv = create_l2tpv3_ipv6_tunnel (lm, &client_address, &our_address, local_session_id, remote_session_id, @@ -491,16 +502,22 @@ create_l2tpv3_tunnel_command_fn (vlib_main_t * vm, vnet_get_main (), sw_if_index); break; case VNET_API_ERROR_INVALID_VALUE: - return clib_error_return (0, "session already exists..."); + error = clib_error_return (0, "session already exists..."); + goto done; case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "session does not exist..."); + error = clib_error_return (0, "session does not exist..."); + goto done; default: - return clib_error_return (0, "l2tp_session_add_del returned %d", rv); + error = clib_error_return (0, "l2tp_session_add_del returned %d", rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/lisp-cp/lisp_cli.c b/src/vnet/lisp-cp/lisp_cli.c index 25d11c61..05df9fb6 100644 --- a/src/vnet/lisp-cp/lisp_cli.c +++ b/src/vnet/lisp-cp/lisp_cli.c @@ -25,6 +25,7 @@ lisp_show_adjacencies_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "%s %40s\n", "leid", "reid"); unformat_input_t _line_input, *line_input = &_line_input; u32 vni = ~0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -38,14 +39,14 @@ lisp_show_adjacencies_command_fn (vlib_main_t * vm, { vlib_cli_output (vm, "parse error: '%U'", format_unformat_error, line_input); - return 0; + goto done; } } if (~0 == vni) { vlib_cli_output (vm, "error: no vni specified!"); - return 0; + goto done; } adjs = vnet_lisp_adjacencies_get_by_vni (vni); @@ -57,7 +58,10 @@ lisp_show_adjacencies_command_fn (vlib_main_t * vm, } vec_free (adjs); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -77,6 +81,7 @@ lisp_add_del_map_server_command_fn (vlib_main_t * vm, u8 is_add = 1, ip_set = 0; ip_address_t ip; unformat_input_t _line_input, *line_input = &_line_input; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -94,14 +99,14 @@ lisp_add_del_map_server_command_fn (vlib_main_t * vm, { vlib_cli_output (vm, "parse error: '%U'", format_unformat_error, line_input); - return 0; + goto done; } } if (!ip_set) { vlib_cli_output (vm, "map-server ip address not set!"); - return 0; + goto done; } rv = vnet_lisp_add_del_map_server (&ip, is_add); @@ -109,7 +114,10 @@ lisp_add_del_map_server_command_fn (vlib_main_t * vm, vlib_cli_output (vm, "failed to %s map-server!", is_add ? "add" : "delete"); - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -191,7 +199,7 @@ lisp_add_del_local_eid_command_fn (vlib_main_t * vm, unformat_input_t * input, if (key && (0 == key_id)) { vlib_cli_output (vm, "invalid key_id!"); - return 0; + goto done;; } gid_address_copy (&a->eid, &eid); @@ -213,6 +221,8 @@ done: vec_free (locator_set_name); gid_address_free (&a->eid); vec_free (a->key); + unformat_free (line_input); + return error; } @@ -233,6 +243,7 @@ lisp_eid_table_map_command_fn (vlib_main_t * vm, u8 is_add = 1, is_l2 = 0; u32 vni = 0, dp_id = 0; unformat_input_t _line_input, *line_input = &_line_input; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -250,11 +261,16 @@ lisp_eid_table_map_command_fn (vlib_main_t * vm, is_l2 = 1; else { - return unformat_parse_error (line_input); + error = unformat_parse_error (line_input); + goto done; } } vnet_lisp_eid_table_map (vni, dp_id, is_l2, is_add); - return 0; + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -479,7 +495,7 @@ lisp_add_del_adjacency_command_fn (vlib_main_t * vm, unformat_input_t * input, != ip_prefix_version (leid_ippref))) { clib_warning ("remote and local EIDs are of different types!"); - return error; + goto done; } memset (a, 0, sizeof (a[0])); @@ -512,6 +528,7 @@ lisp_map_request_mode_command_fn (vlib_main_t * vm, { unformat_input_t _i, *i = &_i; map_request_mode_t mr_mode = _MR_MODE_MAX; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, i)) @@ -533,12 +550,15 @@ lisp_map_request_mode_command_fn (vlib_main_t * vm, if (_MR_MODE_MAX == mr_mode) { clib_warning ("No LISP map request mode entered!"); - return 0; + goto done; } vnet_lisp_set_map_request_mode (mr_mode); + done: - return 0; + unformat_free (i); + + return error; } /* *INDENT-OFF* */ @@ -630,7 +650,10 @@ lisp_pitr_set_locator_set_command_fn (vlib_main_t * vm, else if (unformat (line_input, "disable")) is_add = 0; else - return clib_error_return (0, "parse error"); + { + error = clib_error_return (0, "parse error"); + goto done; + } } if (!locator_name_set) @@ -648,6 +671,8 @@ lisp_pitr_set_locator_set_command_fn (vlib_main_t * vm, done: if (locator_set_name) vec_free (locator_set_name); + unformat_free (line_input); + return error; } @@ -771,6 +796,7 @@ lisp_show_eid_table_command_fn (vlib_main_t * vm, gid_address_t eid; u8 print_all = 1; u8 filter = 0; + clib_error_t *error = NULL; memset (&eid, 0, sizeof (eid)); @@ -787,8 +813,11 @@ lisp_show_eid_table_command_fn (vlib_main_t * vm, else if (unformat (line_input, "remote")) filter = 2; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } vlib_cli_output (vm, "%-35s%-20s%-30s%-20s%-s", @@ -818,7 +847,7 @@ lisp_show_eid_table_command_fn (vlib_main_t * vm, { mi = gid_dictionary_lookup (&lcm->mapping_index_by_gid, &eid); if ((u32) ~ 0 == mi) - return 0; + goto done; mapit = pool_elt_at_index (lcm->mapping_pool, mi); locator_set_t *ls = pool_elt_at_index (lcm->locator_set_pool, @@ -827,14 +856,17 @@ lisp_show_eid_table_command_fn (vlib_main_t * vm, if (filter && !((1 == filter && ls->local) || (2 == filter && !ls->local))) { - return 0; + goto done; } vlib_cli_output (vm, "%U,", format_eid_entry, lcm->vnet_main, lcm, mapit, ls); } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -853,6 +885,7 @@ lisp_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t * input, unformat_input_t _line_input, *line_input = &_line_input; u8 is_enabled = 0; u8 is_set = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -869,16 +902,24 @@ lisp_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t * input, is_set = 1; else { - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; } } if (!is_set) - return clib_error_return (0, "state not set"); + { + error = clib_error_return (0, "state not set"); + goto done; + } vnet_lisp_enable_disable (is_enabled); - return 0; + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -897,6 +938,7 @@ lisp_map_register_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u8 is_enabled = 0; u8 is_set = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -915,18 +957,22 @@ lisp_map_register_enable_disable_command_fn (vlib_main_t * vm, { vlib_cli_output (vm, "parse error: '%U'", format_unformat_error, line_input); - return 0; + goto done; } } if (!is_set) { vlib_cli_output (vm, "state not set!"); - return 0; + goto done; } vnet_lisp_map_register_enable_disable (is_enabled); - return 0; + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -945,6 +991,7 @@ lisp_rloc_probe_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u8 is_enabled = 0; u8 is_set = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -963,18 +1010,22 @@ lisp_rloc_probe_enable_disable_command_fn (vlib_main_t * vm, { vlib_cli_output (vm, "parse error: '%U'", format_unformat_error, line_input); - return 0; + goto done; } } if (!is_set) { vlib_cli_output (vm, "state not set!"); - return 0; + goto done; } vnet_lisp_rloc_probe_enable_disable (is_enabled); - return 0; + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -1022,6 +1073,7 @@ lisp_show_eid_table_map_command_fn (vlib_main_t * vm, lisp_cp_main_t *lcm = vnet_lisp_cp_get_main (); uword *vni_table = 0; u8 is_l2 = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -1040,14 +1092,17 @@ lisp_show_eid_table_map_command_fn (vlib_main_t * vm, is_l2 = 0; } else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } if (!vni_table) { vlib_cli_output (vm, "Error: expected l2|l3 param!\n"); - return 0; + goto done; } vlib_cli_output (vm, "%=10s%=10s", "VNI", is_l2 ? "BD" : "VRF"); @@ -1059,7 +1114,10 @@ lisp_show_eid_table_map_command_fn (vlib_main_t * vm, })); /* *INDENT-ON* */ - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ @@ -1131,6 +1189,8 @@ done: vec_free (locators); if (locator_set_name) vec_free (locator_set_name); + unformat_free (line_input); + return error; } @@ -1205,6 +1265,8 @@ lisp_add_del_locator_in_set_command_fn (vlib_main_t * vm, done: vec_free (locators); vec_free (locator_set_name); + unformat_free (line_input); + return error; } @@ -1322,6 +1384,8 @@ lisp_add_del_map_resolver_command_fn (vlib_main_t * vm, } done: + unformat_free (line_input); + return error; } @@ -1372,11 +1436,11 @@ lisp_add_del_mreq_itr_rlocs_command_fn (vlib_main_t * vm, is_add ? "add" : "delete"); } +done: vec_free (locator_set_name); + unformat_free (line_input); -done: return error; - } /* *INDENT-OFF* */ @@ -1438,7 +1502,10 @@ lisp_use_petr_set_locator_set_command_fn (vlib_main_t * vm, else if (unformat (line_input, "disable")) is_add = 0; else - return clib_error_return (0, "parse error"); + { + error = clib_error_return (0, "parse error"); + goto done; + } } if (!ip_set) @@ -1454,6 +1521,8 @@ lisp_use_petr_set_locator_set_command_fn (vlib_main_t * vm, } done: + unformat_free (line_input); + return error; } diff --git a/src/vnet/lisp-gpe/interface.c b/src/vnet/lisp-gpe/interface.c index 2142e095..19ac22e7 100644 --- a/src/vnet/lisp-gpe/interface.c +++ b/src/vnet/lisp-gpe/interface.c @@ -794,6 +794,7 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, u32 table_id, vni, bd_id; u8 vni_is_set = 0, vrf_is_set = 0, bd_index_is_set = 0; u8 nsh_iface = 0; + clib_error_t *error = NULL; if (vnet_lisp_gpe_enable_disable_status () == 0) { @@ -828,8 +829,9 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, } else { - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; } } @@ -839,7 +841,8 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (~0 == lisp_gpe_add_nsh_iface (&lisp_gpe_main)) { - return clib_error_return (0, "NSH interface not created"); + error = clib_error_return (0, "NSH interface not created"); + goto done; } } else @@ -850,21 +853,34 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, } if (vrf_is_set && bd_index_is_set) - return clib_error_return (0, - "Cannot set both vrf and brdige domain index!"); + { + error = clib_error_return + (0, "Cannot set both vrf and brdige domain index!"); + goto done; + } if (!vni_is_set) - return clib_error_return (0, "vni must be set!"); + { + error = clib_error_return (0, "vni must be set!"); + goto done; + } if (!vrf_is_set && !bd_index_is_set) - return clib_error_return (0, "vrf or bridge domain index must be set!"); + { + error = + clib_error_return (0, "vrf or bridge domain index must be set!"); + goto done; + } if (bd_index_is_set) { if (is_add) { if (~0 == lisp_gpe_tenant_l2_iface_add_or_lock (vni, bd_id)) - return clib_error_return (0, "L2 interface not created"); + { + error = clib_error_return (0, "L2 interface not created"); + goto done; + } } else lisp_gpe_tenant_l2_iface_unlock (vni); @@ -874,13 +890,35 @@ lisp_gpe_add_del_iface_command_fn (vlib_main_t * vm, unformat_input_t * input, if (is_add) { if (~0 == lisp_gpe_tenant_l3_iface_add_or_lock (vni, table_id)) - return clib_error_return (0, "L3 interface not created"); + { + error = clib_error_return (0, "L3 interface not created"); + goto done; + } } else lisp_gpe_tenant_l3_iface_unlock (vni); } - return (NULL); + if (nsh_iface) + { + if (is_add) + { + if (~0 == lisp_gpe_add_nsh_iface (&lisp_gpe_main)) + { + error = clib_error_return (0, "NSH interface not created"); + goto done; + } + else + { + lisp_gpe_del_nsh_iface (&lisp_gpe_main); + } + } + } + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/lisp-gpe/lisp_gpe.c b/src/vnet/lisp-gpe/lisp_gpe.c index 1f8afdae..f2fbcbd5 100644 --- a/src/vnet/lisp-gpe/lisp_gpe.c +++ b/src/vnet/lisp-gpe/lisp_gpe.c @@ -218,6 +218,7 @@ lisp_gpe_enable_disable_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u8 is_en = 1; vnet_lisp_gpe_enable_disable_args_t _a, *a = &_a; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -231,12 +232,18 @@ lisp_gpe_enable_disable_command_fn (vlib_main_t * vm, is_en = 0; else { - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; } } a->is_en = is_en; - return vnet_lisp_gpe_enable_disable (a); + error = vnet_lisp_gpe_enable_disable (a); + +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index aeec6a94..a2d28118 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -465,6 +465,8 @@ map_security_check_command_fn (vlib_main_t * vm, { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; + clib_error_t *error = NULL; + /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -476,11 +478,17 @@ map_security_check_command_fn (vlib_main_t * vm, else if (unformat (line_input, "on")) mm->sec_check = true; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + + return error; } static clib_error_t * @@ -490,6 +498,8 @@ map_security_check_frag_command_fn (vlib_main_t * vm, { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; + clib_error_t *error = NULL; + /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -501,11 +511,17 @@ map_security_check_frag_command_fn (vlib_main_t * vm, else if (unformat (line_input, "on")) mm->sec_check_frag = true; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + + return error; } static clib_error_t * @@ -523,6 +539,7 @@ map_add_domain_command_fn (vlib_main_t * vm, u32 mtu = 0; u8 flags = 0; ip6_src_len = 128; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -559,20 +576,28 @@ map_add_domain_command_fn (vlib_main_t * vm, else if (unformat (line_input, "map-t")) flags |= MAP_DOMAIN_TRANSLATION; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args < 3) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } map_create_domain (&ip4_prefix, ip4_prefix_len, &ip6_prefix, ip6_prefix_len, &ip6_src, ip6_src_len, ea_bits_len, psid_offset, psid_length, &map_domain_index, mtu, flags); - return 0; +done: + unformat_free (line_input); + + return error; } static clib_error_t * @@ -582,6 +607,7 @@ map_del_domain_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; u32 num_m_args = 0; u32 map_domain_index; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -592,17 +618,25 @@ map_del_domain_command_fn (vlib_main_t * vm, if (unformat (line_input, "index %d", &map_domain_index)) num_m_args++; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args != 1) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } map_delete_domain (map_domain_index); - return 0; +done: + unformat_free (line_input); + + return error; } static clib_error_t * @@ -613,6 +647,7 @@ map_add_rule_command_fn (vlib_main_t * vm, ip6_address_t tep; u32 num_m_args = 0; u32 psid = 0, map_domain_index; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -628,19 +663,29 @@ map_add_rule_command_fn (vlib_main_t * vm, if (unformat (line_input, "ip6-dst %U", unformat_ip6_address, &tep)) num_m_args++; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args != 3) - return clib_error_return (0, "mandatory argument(s) missing"); + { + error = clib_error_return (0, "mandatory argument(s) missing"); + goto done; + } if (map_add_del_psid (map_domain_index, psid, &tep, 1) != 0) { - return clib_error_return (0, "Failing to add Mapping Rule"); + error = clib_error_return (0, "Failing to add Mapping Rule"); + goto done; } - return 0; + +done: + unformat_free (line_input); + + return error; } #if MAP_SKIP_IP6_LOOKUP @@ -653,6 +698,7 @@ map_pre_resolve_command_fn (vlib_main_t * vm, ip4_address_t ip4nh; ip6_address_t ip6nh; map_main_t *mm = &map_main; + clib_error_t *error = NULL; memset (&ip4nh, 0, sizeof (ip4nh)); memset (&ip6nh, 0, sizeof (ip6nh)); @@ -669,14 +715,19 @@ map_pre_resolve_command_fn (vlib_main_t * vm, if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh)) mm->preresolve_ip6 = ip6nh; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); map_pre_resolve (&ip4nh, &ip6nh); - return 0; +done: + unformat_free (line_input); + + return error; } #endif @@ -688,6 +739,7 @@ map_icmp_relay_source_address_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; ip4_address_t icmp_src_address; map_main_t *mm = &map_main; + clib_error_t *error = NULL; mm->icmp4_src_address.as_u32 = 0; @@ -701,12 +753,17 @@ map_icmp_relay_source_address_command_fn (vlib_main_t * vm, (line_input, "%U", unformat_ip4_address, &icmp_src_address)) mm->icmp4_src_address = icmp_src_address; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + return error; } static clib_error_t * @@ -717,6 +774,7 @@ map_icmp_unreachables_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; int num_m_args = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -730,16 +788,21 @@ map_icmp_unreachables_command_fn (vlib_main_t * vm, else if (unformat (line_input, "off")) mm->icmp6_enabled = false; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (num_m_args != 1) - return clib_error_return (0, "mandatory argument(s) missing"); + error = clib_error_return (0, "mandatory argument(s) missing"); - return 0; +done: + unformat_free (line_input); + + return error; } static clib_error_t * @@ -748,6 +811,7 @@ map_fragment_command_fn (vlib_main_t * vm, { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -760,12 +824,17 @@ map_fragment_command_fn (vlib_main_t * vm, else if (unformat (line_input, "outer")) mm->frag_inner = false; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + return error; } static clib_error_t * @@ -775,6 +844,7 @@ map_fragment_df_command_fn (vlib_main_t * vm, { unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -787,12 +857,17 @@ map_fragment_df_command_fn (vlib_main_t * vm, else if (unformat (line_input, "off")) mm->frag_ignore_df = false; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + return error; } static clib_error_t * @@ -803,6 +878,7 @@ map_traffic_class_command_fn (vlib_main_t * vm, unformat_input_t _line_input, *line_input = &_line_input; map_main_t *mm = &map_main; u32 tc = 0; + clib_error_t *error = NULL; mm->tc_copy = false; @@ -817,12 +893,17 @@ map_traffic_class_command_fn (vlib_main_t * vm, else if (unformat (line_input, "%x", &tc)) mm->tc = tc & 0xff; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + +done: unformat_free (line_input); - return 0; + return error; } static u8 * @@ -922,6 +1003,7 @@ show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input, map_domain_t *d; bool counters = false; u32 map_domain_index = ~0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -934,10 +1016,12 @@ show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input, else if (unformat (line_input, "index %d", &map_domain_index)) ; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); if (pool_elts (mm->domains) == 0) vlib_cli_output (vm, "No MAP domains are configured..."); @@ -952,15 +1036,19 @@ show_map_domain_command_fn (vlib_main_t * vm, unformat_input_t * input, { if (pool_is_free_index (mm->domains, map_domain_index)) { - return clib_error_return (0, "MAP domain does not exists %d", - map_domain_index); + error = clib_error_return (0, "MAP domain does not exists %d", + map_domain_index); + goto done; } d = pool_elt_at_index (mm->domains, map_domain_index); vlib_cli_output (vm, "%U", format_map_domain, d, counters); } - return 0; +done: + unformat_free (line_input); + + return error; } static clib_error_t * diff --git a/src/vnet/mpls/mpls.c b/src/vnet/mpls/mpls.c index 0e610e17..7ae4aa00 100644 --- a/src/vnet/mpls/mpls.c +++ b/src/vnet/mpls/mpls.c @@ -470,6 +470,8 @@ vnet_mpls_local_label (vlib_main_t * vm, } done: + unformat_free (line_input); + return error; } diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c index 8d1e30a3..e488271d 100644 --- a/src/vnet/mpls/mpls_tunnel.c +++ b/src/vnet/mpls/mpls_tunnel.c @@ -535,6 +535,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, fib_route_path_t rpath, *rpaths = NULL; mpls_label_t out_label = MPLS_LABEL_INVALID, *labels = NULL; u32 sw_if_index; + clib_error_t *error = NULL; memset(&rpath, 0, sizeof(rpath)); @@ -595,8 +596,11 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "l2-only")) l2_only = 1; else - return clib_error_return (0, "unknown input '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input '%U'", + format_unformat_error, line_input); + goto done; + } } if (is_del) @@ -606,17 +610,22 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, else { if (0 == vec_len(labels)) - return clib_error_return (0, "No Output Labels '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "No Output Labels '%U'", + format_unformat_error, line_input); + goto done; + } vec_add1(rpaths, rpath); vnet_mpls_tunnel_add(rpaths, labels, l2_only, &sw_if_index); } +done: vec_free(labels); vec_free(rpaths); + unformat_free (line_input); - return (NULL); + return error; } /*? diff --git a/src/vnet/pg/cli.c b/src/vnet/pg/cli.c index f5896b43..3c249a7b 100644 --- a/src/vnet/pg/cli.c +++ b/src/vnet/pg/cli.c @@ -547,21 +547,30 @@ pg_capture_cmd_fn (vlib_main_t * vm, else { error = clib_error_create ("unknown input `%U'", - format_unformat_error, input); - return error; + format_unformat_error, line_input); + goto done; } } if (!hi) - return clib_error_return (0, "Please specify interface name"); + { + error = clib_error_return (0, "Please specify interface name"); + goto done; + } if (hi->dev_class_index != pg_dev_class.index) - return clib_error_return (0, "Please specify packet-generator interface"); + { + error = + clib_error_return (0, "Please specify packet-generator interface"); + goto done; + } if (!pcap_file_name && is_disable == 0) - return clib_error_return (0, "Please specify pcap file name"); + { + error = clib_error_return (0, "Please specify pcap file name"); + goto done; + } - unformat_free (line_input); pg_capture_args_t _a, *a = &_a; @@ -572,6 +581,10 @@ pg_capture_cmd_fn (vlib_main_t * vm, a->count = count; error = pg_capture (a); + +done: + unformat_free (line_input); + return error; } @@ -590,6 +603,7 @@ create_pg_if_cmd_fn (vlib_main_t * vm, pg_main_t *pg = &pg_main; unformat_input_t _line_input, *line_input = &_line_input; u32 if_id; + clib_error_t *error = NULL; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -600,14 +614,19 @@ create_pg_if_cmd_fn (vlib_main_t * vm, ; else - return clib_error_create ("unknown input `%U'", - format_unformat_error, input); + { + error = clib_error_create ("unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + pg_interface_add_or_get (pg, if_id); + +done: unformat_free (line_input); - pg_interface_add_or_get (pg, if_id); - return 0; + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/policer/node_funcs.c b/src/vnet/policer/node_funcs.c index 1f4997ff..457dd09f 100644 --- a/src/vnet/policer/node_funcs.c +++ b/src/vnet/policer/node_funcs.c @@ -447,6 +447,7 @@ test_policer_command_fn (vlib_main_t * vm, int rx_set = 0; int is_add = 1; int is_show = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -468,7 +469,10 @@ test_policer_command_fn (vlib_main_t * vm, } if (rx_set == 0) - return clib_error_return (0, "interface not set"); + { + error = clib_error_return (0, "interface not set"); + goto done; + } if (is_show) { @@ -477,12 +481,13 @@ test_policer_command_fn (vlib_main_t * vm, policer = pool_elt_at_index (pm->policers, pi); vlib_cli_output (vm, "%U", format_policer_instance, policer); - return 0; + goto done; } if (is_add && config_name == 0) { - return clib_error_return (0, "policer config name required"); + error = clib_error_return (0, "policer config name required"); + goto done; } rv = test_policer_add_del (rx_sw_if_index, config_name, is_add); @@ -493,11 +498,15 @@ test_policer_command_fn (vlib_main_t * vm, break; default: - return clib_error_return + error = clib_error_return (0, "WARNING: vnet_vnet_policer_add_del returned %d", rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/policer/policer.c b/src/vnet/policer/policer.c index 290a6af5..cd754e29 100644 --- a/src/vnet/policer/policer.c +++ b/src/vnet/policer/policer.c @@ -413,6 +413,7 @@ configure_policer_command_fn (vlib_main_t * vm, u8 is_add = 1; u8 *name = 0; u32 pi; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -433,13 +434,19 @@ configure_policer_command_fn (vlib_main_t * vm, foreach_config_param #undef _ else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } + error = policer_add_del (vm, name, &c, &pi, is_add); + +done: unformat_free (line_input); - return policer_add_del (vm, name, &c, &pi, is_add); + return error; } /* *INDENT-OFF* */ diff --git a/src/vnet/unix/tapcli.c b/src/vnet/unix/tapcli.c index 48e81b50..25c930c6 100644 --- a/src/vnet/unix/tapcli.c +++ b/src/vnet/unix/tapcli.c @@ -1308,6 +1308,7 @@ tap_connect_command_fn (vlib_main_t * vm, int ip6_address_set = 0; u32 ip4_mask_width = 0; u32 ip6_mask_width = 0; + clib_error_t *error = NULL; if (tm->is_disabled) return clib_error_return (0, "device disabled..."); @@ -1336,12 +1337,18 @@ tap_connect_command_fn (vlib_main_t * vm, else if (unformat (line_input, "%s", &intfc_name)) ; else - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } } if (intfc_name == 0) - return clib_error_return (0, "interface name must be specified"); + { + error = clib_error_return (0, "interface name must be specified"); + goto done; + } memset (ap, 0, sizeof (*ap)); @@ -1367,48 +1374,64 @@ tap_connect_command_fn (vlib_main_t * vm, switch (rv) { case VNET_API_ERROR_SYSCALL_ERROR_1: - return clib_error_return (0, "Couldn't open /dev/net/tun"); + error = clib_error_return (0, "Couldn't open /dev/net/tun"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_2: - return clib_error_return (0, "Error setting flags on '%s'", intfc_name); - + error = clib_error_return (0, "Error setting flags on '%s'", intfc_name); + goto done; + case VNET_API_ERROR_SYSCALL_ERROR_3: - return clib_error_return (0, "Couldn't open provisioning socket"); + error = clib_error_return (0, "Couldn't open provisioning socket"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_4: - return clib_error_return (0, "Couldn't get if_index"); + error = clib_error_return (0, "Couldn't get if_index"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_5: - return clib_error_return (0, "Couldn't bind provisioning socket"); + error = clib_error_return (0, "Couldn't bind provisioning socket"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_6: - return clib_error_return (0, "Couldn't set device non-blocking flag"); + error = clib_error_return (0, "Couldn't set device non-blocking flag"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_7: - return clib_error_return (0, "Couldn't set device MTU"); + error = clib_error_return (0, "Couldn't set device MTU"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_8: - return clib_error_return (0, "Couldn't get interface flags"); + error = clib_error_return (0, "Couldn't get interface flags"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_9: - return clib_error_return (0, "Couldn't set intfc admin state up"); + error = clib_error_return (0, "Couldn't set intfc admin state up"); + goto done; case VNET_API_ERROR_SYSCALL_ERROR_10: - return clib_error_return (0, "Couldn't set intfc address/mask"); + error = clib_error_return (0, "Couldn't set intfc address/mask"); + goto done; case VNET_API_ERROR_INVALID_REGISTRATION: - return clib_error_return (0, "Invalid registration"); + error = clib_error_return (0, "Invalid registration"); + goto done; case 0: break; default: - return clib_error_return (0, "Unknown error: %d", rv); + error = clib_error_return (0, "Unknown error: %d", rv); + goto done; } vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index); - return 0; + +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (tap_connect_command, static) = { diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.c b/src/vnet/vxlan-gpe/vxlan_gpe.c index b97510c4..2cba596f 100644 --- a/src/vnet/vxlan-gpe/vxlan_gpe.c +++ b/src/vnet/vxlan-gpe/vxlan_gpe.c @@ -454,6 +454,7 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, u32 tmp; vnet_vxlan_gpe_add_del_tunnel_args_t _a, * a = &_a; u32 sw_if_index; + clib_error_t *error = NULL; /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) @@ -494,7 +495,10 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, encap_fib_index = ip4_fib_index_from_table_id (tmp); if (encap_fib_index == ~0) - return clib_error_return (0, "nonexistent encap fib id %d", tmp); + { + error = clib_error_return (0, "nonexistent encap fib id %d", tmp); + goto done; + } } else if (unformat (line_input, "decap-vrf-id %d", &tmp)) { @@ -504,7 +508,10 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, decap_fib_index = ip4_fib_index_from_table_id (tmp); if (decap_fib_index == ~0) - return clib_error_return (0, "nonexistent decap fib id %d", tmp); + { + error = clib_error_return (0, "nonexistent decap fib id %d", tmp); + goto done; + } } else if (unformat (line_input, "vni %d", &vni)) vni_set = 1; @@ -517,27 +524,43 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, else if (unformat(line_input, "next-nsh")) protocol = VXLAN_GPE_PROTOCOL_NSH; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (local_set == 0) - return clib_error_return (0, "tunnel local address not specified"); + { + error = clib_error_return (0, "tunnel local address not specified"); + goto done; + } if (remote_set == 0) - return clib_error_return (0, "tunnel remote address not specified"); + { + error = clib_error_return (0, "tunnel remote address not specified"); + goto done; + } if (ipv4_set && ipv6_set) - return clib_error_return (0, "both IPv4 and IPv6 addresses specified"); + { + error = clib_error_return (0, "both IPv4 and IPv6 addresses specified"); + goto done; + } if ((ipv4_set && memcmp(&local.ip4, &remote.ip4, sizeof(local.ip4)) == 0) || (ipv6_set && memcmp(&local.ip6, &remote.ip6, sizeof(local.ip6)) == 0)) - return clib_error_return (0, "src and dst addresses are identical"); + { + error = clib_error_return (0, "src and dst addresses are identical"); + goto done; + } if (vni_set == 0) - return clib_error_return (0, "vni not specified"); + { + error = clib_error_return (0, "vni not specified"); + goto done; + } memset (a, 0, sizeof (*a)); @@ -558,20 +581,27 @@ vxlan_gpe_add_del_tunnel_command_fn (vlib_main_t * vm, vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index); break; case VNET_API_ERROR_INVALID_DECAP_NEXT: - return clib_error_return (0, "invalid decap-next..."); + error = clib_error_return (0, "invalid decap-next..."); + goto done; case VNET_API_ERROR_TUNNEL_EXIST: - return clib_error_return (0, "tunnel already exists..."); + error = clib_error_return (0, "tunnel already exists..."); + goto done; case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "tunnel does not exist..."); + error = clib_error_return (0, "tunnel does not exist..."); + goto done; default: - return clib_error_return + error = clib_error_return (0, "vnet_vxlan_gpe_add_del_tunnel returned %d", rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } VLIB_CLI_COMMAND (create_vxlan_gpe_tunnel_command, static) = { diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c index 849fc25d..eedc16f8 100644 --- a/src/vnet/vxlan/vxlan.c +++ b/src/vnet/vxlan/vxlan.c @@ -657,6 +657,7 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, int rv; vnet_vxlan_add_del_tunnel_args_t _a, * a = &_a; u32 tunnel_sw_if_index; + clib_error_t *error = NULL; /* Cant "universally zero init" (={0}) due to GCC bug 53119 */ memset(&src, 0, sizeof src); @@ -715,7 +716,10 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, { encap_fib_index = fib_table_find (fib_ip_proto (ipv6_set), tmp); if (encap_fib_index == ~0) - return clib_error_return (0, "nonexistent encap-vrf-id %d", tmp); + { + error = clib_error_return (0, "nonexistent encap-vrf-id %d", tmp); + goto done; + } } else if (unformat (line_input, "decap-next %U", unformat_decap_next, &decap_next_index, ipv4_set)) @@ -723,41 +727,72 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, else if (unformat (line_input, "vni %d", &vni)) { if (vni >> 24) - return clib_error_return (0, "vni %d out of range", vni); + { + error = clib_error_return (0, "vni %d out of range", vni); + goto done; + } } else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + goto done; + } } - unformat_free (line_input); - if (src_set == 0) - return clib_error_return (0, "tunnel src address not specified"); + { + error = clib_error_return (0, "tunnel src address not specified"); + goto done; + } if (dst_set == 0) - return clib_error_return (0, "tunnel dst address not specified"); + { + error = clib_error_return (0, "tunnel dst address not specified"); + goto done; + } if (grp_set && !ip46_address_is_multicast(&dst)) - return clib_error_return (0, "tunnel group address not multicast"); + { + error = clib_error_return (0, "tunnel group address not multicast"); + goto done; + } if (grp_set == 0 && ip46_address_is_multicast(&dst)) - return clib_error_return (0, "dst address must be unicast"); + { + error = clib_error_return (0, "dst address must be unicast"); + goto done; + } if (grp_set && mcast_sw_if_index == ~0) - return clib_error_return (0, "tunnel nonexistent multicast device"); + { + error = clib_error_return (0, "tunnel nonexistent multicast device"); + goto done; + } if (ipv4_set && ipv6_set) - return clib_error_return (0, "both IPv4 and IPv6 addresses specified"); + { + error = clib_error_return (0, "both IPv4 and IPv6 addresses specified"); + goto done; + } if (ip46_address_cmp(&src, &dst) == 0) - return clib_error_return (0, "src and dst addresses are identical"); + { + error = clib_error_return (0, "src and dst addresses are identical"); + goto done; + } if (decap_next_index == ~0) - return clib_error_return (0, "next node not found"); + { + error = clib_error_return (0, "next node not found"); + goto done; + } if (vni == 0) - return clib_error_return (0, "vni not specified"); + { + error = clib_error_return (0, "vni not specified"); + goto done; + } memset (a, 0, sizeof (*a)); @@ -779,17 +814,23 @@ vxlan_add_del_tunnel_command_fn (vlib_main_t * vm, break; case VNET_API_ERROR_TUNNEL_EXIST: - return clib_error_return (0, "tunnel already exists..."); + error = clib_error_return (0, "tunnel already exists..."); + goto done; case VNET_API_ERROR_NO_SUCH_ENTRY: - return clib_error_return (0, "tunnel does not exist..."); + error = clib_error_return (0, "tunnel does not exist..."); + goto done; default: - return clib_error_return + error = clib_error_return (0, "vnet_vxlan_add_del_tunnel returned %d", rv); + goto done; } - return 0; +done: + unformat_free (line_input); + + return error; } /*? @@ -912,6 +953,8 @@ set_ip_vxlan_bypass (u32 is_ip6, vnet_int_vxlan_bypass_mode (sw_if_index, is_ip6, is_enable); done: + unformat_free (line_input); + return error; } diff --git a/src/vpp/app/l2t.c b/src/vpp/app/l2t.c index 45dd2807..e1eda155 100644 --- a/src/vpp/app/l2t.c +++ b/src/vpp/app/l2t.c @@ -254,6 +254,7 @@ l2tp_session_add_command_fn (vlib_main_t * vm, u32 local_session_id = 1, remote_session_id = 1; int our_address_set = 0, client_address_set = 0; int l2_sublayer_present = 0; + clib_error_t *error = NULL; /* Get a line of input. */ if (!unformat_user (input, unformat_line_input, line_input)) @@ -290,8 +291,12 @@ l2tp_session_add_command_fn (vlib_main_t * vm, else if (unformat (line_input, "l2-sublayer-present")) l2_sublayer_present = 1; else - return clib_error_return (0, "parse error: '%U'", - format_unformat_error, line_input); + { + error = clib_error_return (0, "parse error: '%U'", + format_unformat_error, line_input); + unformat_free (line_input); + return error; + } } unformat_free (line_input); diff --git a/src/vpp/app/vpe_cli.c b/src/vpp/app/vpe_cli.c index a26bf71f..94bdc84c 100644 --- a/src/vpp/app/vpe_cli.c +++ b/src/vpp/app/vpe_cli.c @@ -36,6 +36,7 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, mac_addr_t *mac_addrs = 0; u32 sw_if_index; u32 i; + clib_error_t *error = NULL; next_hops = NULL; rpaths = NULL; @@ -49,7 +50,11 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, if (!unformat (line_input, "%U %U", unformat_ip4_address, &prefix.fp_addr.ip4, unformat_vnet_sw_interface, vnm, &sw_if_index)) - goto barf; + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { @@ -67,13 +72,18 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, } else { - barf: - return clib_error_return (0, "unknown input `%U'", - format_unformat_error, input); + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; } } + if (vec_len (mac_addrs) == 0 || vec_len (mac_addrs) != vec_len (next_hops)) - goto barf; + { + error = clib_error_return (0, "unknown input `%U'", + format_unformat_error, line_input); + goto done; + } /* Create / delete special interface route /32's */ @@ -100,10 +110,12 @@ virtual_ip_cmd_fn_command_fn (vlib_main_t * vm, &prefix, FIB_SOURCE_CLI, FIB_ENTRY_FLAG_NONE, rpaths); +done: vec_free (mac_addrs); vec_free (next_hops); + unformat_free (line_input); - return 0; + return error; } /* *INDENT-OFF* */ -- cgit 1.2.3-korg From 8082380922c65702251d5242058f7b5f35011574 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Mon, 20 Feb 2017 18:23:41 -0800 Subject: MAP pre-resolve - use FIB to track pre-resolved next-hop Change-Id: I9ea16881caf7aee57f0daf4ac2e8b82c672f87e9 Signed-off-by: Neale Ranns --- src/vnet/fib/fib_node.h | 2 + src/vnet/map/ip4_map.c | 16 ++--- src/vnet/map/ip6_map.c | 16 ++--- src/vnet/map/map.c | 172 +++++++++++++++++++++++++++++++++++++++------- src/vnet/map/map.h | 41 +++++++++-- test/test_map.py | 171 +++++++++++++++++++++++++++++++++++++++++++++ test/vpp_papi_provider.py | 28 ++++++++ 7 files changed, 391 insertions(+), 55 deletions(-) create mode 100644 test/test_map.py (limited to 'src/vnet/map') diff --git a/src/vnet/fib/fib_node.h b/src/vnet/fib/fib_node.h index 457dfb7a..496929ad 100644 --- a/src/vnet/fib/fib_node.h +++ b/src/vnet/fib/fib_node.h @@ -41,6 +41,7 @@ typedef enum fib_node_type_t_ { FIB_NODE_TYPE_LISP_ADJ, FIB_NODE_TYPE_GRE_TUNNEL, FIB_NODE_TYPE_VXLAN_TUNNEL, + FIB_NODE_TYPE_MAP_E, /** * Marker. New types before this one. leave the test last. */ @@ -63,6 +64,7 @@ typedef enum fib_node_type_t_ { [FIB_NODE_TYPE_LISP_ADJ] = "lisp-adj", \ [FIB_NODE_TYPE_GRE_TUNNEL] = "gre-tunnel", \ [FIB_NODE_TYPE_VXLAN_TUNNEL] = "vxlan-tunnel", \ + [FIB_NODE_TYPE_MAP_E] = "map-e", \ } /** diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c index 9fd10f62..2be9ad37 100644 --- a/src/vnet/map/ip4_map.c +++ b/src/vnet/map/ip4_map.c @@ -173,18 +173,10 @@ static_always_inline bool ip4_map_ip6_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip) { #ifdef MAP_SKIP_IP6_LOOKUP - map_main_t *mm = &map_main; - u32 adj_index0 = mm->adj6_index; - if (adj_index0 > 0) + if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP6].fei) { - ip_lookup_main_t *lm6 = &ip6_main.lookup_main; - ip_adjacency_t *adj = ip_get_adjacency (lm6, mm->adj6_index); - if (adj->n_adj > 1) - { - u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT); - adj_index0 += (hash_c0 & (adj->n_adj - 1)); - } - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0; + vnet_buffer (p0)->ip.adj_index[VLIB_TX] = + pre_resolved[FIB_PROTOCOL_IP6].dpo.dpoi_index; return (true); } #endif @@ -773,7 +765,7 @@ VLIB_REGISTER_NODE(ip4_map_node) = { .next_nodes = { [IP4_MAP_NEXT_IP6_LOOKUP] = "ip6-lookup", #ifdef MAP_SKIP_IP6_LOOKUP - [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-rewrite", + [IP4_MAP_NEXT_IP6_REWRITE] = "ip6-load-balance", #endif [IP4_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag", [IP4_MAP_NEXT_IP6_FRAGMENT] = "ip6-frag", diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c index d2945059..f7eb768f 100644 --- a/src/vnet/map/ip6_map.c +++ b/src/vnet/map/ip6_map.c @@ -151,18 +151,10 @@ static_always_inline bool ip6_map_ip4_lookup_bypass (vlib_buffer_t * p0, ip4_header_t * ip) { #ifdef MAP_SKIP_IP6_LOOKUP - map_main_t *mm = &map_main; - u32 adj_index0 = mm->adj4_index; - if (adj_index0 > 0) + if (FIB_NODE_INDEX_INVALID != pre_resolved[FIB_PROTOCOL_IP4].fei) { - ip_lookup_main_t *lm4 = &ip4_main.lookup_main; - ip_adjacency_t *adj = ip_get_adjacency (lm4, mm->adj4_index); - if (adj->n_adj > 1) - { - u32 hash_c0 = ip4_compute_flow_hash (ip, IP_FLOW_HASH_DEFAULT); - adj_index0 += (hash_c0 & (adj->n_adj - 1)); - } - vnet_buffer (p0)->ip.adj_index[VLIB_TX] = adj_index0; + vnet_buffer (p0)->ip.adj_index[VLIB_TX] = + pre_resolved[FIB_PROTOCOL_IP4].dpo.dpoi_index; return (true); } #endif @@ -1195,7 +1187,7 @@ VLIB_REGISTER_NODE(ip6_map_node) = { .next_nodes = { [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup", #ifdef MAP_SKIP_IP6_LOOKUP - [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-rewrite", + [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance", #endif [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass", [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass", diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index a2d28118..6823a46e 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -41,6 +41,7 @@ crc_u32 (u32 data, u32 value) } #endif + /* * This code supports the following MAP modes: * @@ -437,23 +438,141 @@ map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, } #ifdef MAP_SKIP_IP6_LOOKUP +/** + * Pre-resolvd per-protocol global next-hops + */ +map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]; + static void -map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6) +map_pre_resolve_init (map_main_pre_resolved_t * pr) { - map_main_t *mm = &map_main; - ip6_main_t *im6 = &ip6_main; + pr->fei = FIB_NODE_INDEX_INVALID; + fib_node_init (&pr->node, FIB_NODE_TYPE_MAP_E); +} + +static u8 * +format_map_pre_resolve (u8 * s, va_list ap) +{ + map_main_pre_resolved_t *pr = va_arg (ap, map_main_pre_resolved_t *); + + if (FIB_NODE_INDEX_INVALID != pr->fei) + { + fib_prefix_t pfx; + + fib_entry_get_prefix (pr->fei, &pfx); + + return (format (s, "%U (%u)", + format_ip46_address, &pfx.fp_addr, IP46_TYPE_ANY, + pr->dpo.dpoi_index)); + } + else + { + return (format (s, "un-set")); + } +} + + +/** + * Function definition to inform the FIB node that its last lock has gone. + */ +static void +map_last_lock_gone (fib_node_t * node) +{ + /* + * The MAP is a root of the graph. As such + * it never has children and thus is never locked. + */ + ASSERT (0); +} + +static map_main_pre_resolved_t * +map_from_fib_node (fib_node_t * node) +{ +#if (CLIB_DEBUG > 0) + ASSERT (FIB_NODE_TYPE_MAP_E == node->fn_type); +#endif + return ((map_main_pre_resolved_t *) + (((char *) node) - + STRUCT_OFFSET_OF (map_main_pre_resolved_t, node))); +} + +static void +map_stack (map_main_pre_resolved_t * pr) +{ + const dpo_id_t *dpo; - if (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0) + dpo = fib_entry_contribute_ip_forwarding (pr->fei); + + dpo_copy (&pr->dpo, dpo); +} + +/** + * Function definition to backwalk a FIB node + */ +static fib_node_back_walk_rc_t +map_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx) +{ + map_stack (map_from_fib_node (node)); + + return (FIB_NODE_BACK_WALK_CONTINUE); +} + +/** + * Function definition to get a FIB node from its index + */ +static fib_node_t * +map_fib_node_get (fib_node_index_t index) +{ + return (&pre_resolved[index].node); +} + +/* + * Virtual function table registered by MPLS GRE tunnels + * for participation in the FIB object graph. + */ +const static fib_node_vft_t map_vft = { + .fnv_get = map_fib_node_get, + .fnv_last_lock = map_last_lock_gone, + .fnv_back_walk = map_back_walk, +}; + +static void +map_fib_resolve (map_main_pre_resolved_t * pr, + fib_protocol_t proto, u8 len, const ip46_address_t * addr) +{ + fib_prefix_t pfx = { + .fp_proto = proto, + .fp_len = len, + .fp_addr = *addr, + }; + + pr->fei = fib_table_entry_special_add (0, // default fib + &pfx, + FIB_SOURCE_RR, + FIB_ENTRY_FLAG_NONE, + ADJ_INDEX_INVALID); + pr->sibling = fib_entry_child_add (pr->fei, FIB_NODE_TYPE_MAP_E, proto); + map_stack (pr); +} + +static void +map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6) +{ + if (ip6 && (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0)) { - // FIXME NOT an ADJ - mm->adj6_index = ip6_fib_table_fwding_lookup (im6, 0, ip6); - clib_warning ("FIB lookup results in: %u", mm->adj6_index); + ip46_address_t addr = { + .ip6 = *ip6, + }; + map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6], + FIB_PROTOCOL_IP6, 128, &addr); } - if (ip4->as_u32 != 0) + if (ip4 && (ip4->as_u32 != 0)) { - // FIXME NOT an ADJ - mm->adj4_index = ip4_fib_table_lookup_lb (0, ip4); - clib_warning ("FIB lookup results in: %u", mm->adj4_index); + ip46_address_t addr = { + .ip4 = *ip4, + }; + map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4], + FIB_PROTOCOL_IP4, 32, &addr); } } #endif @@ -695,9 +814,8 @@ map_pre_resolve_command_fn (vlib_main_t * vm, vlib_cli_command_t * cmd) { unformat_input_t _line_input, *line_input = &_line_input; - ip4_address_t ip4nh; - ip6_address_t ip6nh; - map_main_t *mm = &map_main; + ip4_address_t ip4nh, *p_v4 = NULL; + ip6_address_t ip6nh, *p_v6 = NULL; clib_error_t *error = NULL; memset (&ip4nh, 0, sizeof (ip4nh)); @@ -710,10 +828,10 @@ map_pre_resolve_command_fn (vlib_main_t * vm, while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) { if (unformat (line_input, "ip4-nh %U", unformat_ip4_address, &ip4nh)) - mm->preresolve_ip4 = ip4nh; + p_v4 = &ip4nh; else if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh)) - mm->preresolve_ip6 = ip6nh; + p_v6 = &ip6nh; else { error = clib_error_return (0, "unknown input `%U'", @@ -722,7 +840,7 @@ map_pre_resolve_command_fn (vlib_main_t * vm, } } - map_pre_resolve (&ip4nh, &ip6nh); + map_pre_resolve (p_v4, p_v6); done: unformat_free (line_input); @@ -1113,9 +1231,10 @@ show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input, #if MAP_SKIP_IP6_LOOKUP vlib_cli_output (vm, - "MAP pre-resolve: IP6 next-hop: %U (%u), IP4 next-hop: %U (%u)\n", - format_ip6_address, &mm->preresolve_ip6, mm->adj6_index, - format_ip4_address, &mm->preresolve_ip4, mm->adj4_index); + "MAP pre-resolve: IP6 next-hop: %U, IP4 next-hop: %U\n", + format_map_pre_resolve, &pre_resolved[FIB_PROTOCOL_IP6], + format_map_pre_resolve, &pre_resolved[FIB_PROTOCOL_IP4]); + #endif if (mm->tc_copy) @@ -2180,10 +2299,12 @@ map_init (vlib_main_t * vm) mm->vlib_main = vm; #ifdef MAP_SKIP_IP6_LOOKUP - memset (&mm->preresolve_ip4, 0, sizeof (mm->preresolve_ip4)); - memset (&mm->preresolve_ip6, 0, sizeof (mm->preresolve_ip6)); - mm->adj4_index = 0; - mm->adj6_index = 0; + fib_protocol_t proto; + + FOR_EACH_FIB_PROTOCOL (proto) + { + map_pre_resolve_init (&pre_resolved[proto]); + } #endif /* traffic class */ @@ -2238,6 +2359,9 @@ map_init (vlib_main_t * vm) mm->ip6_reass_fifo_last = MAP_REASS_INDEX_NONE; map_ip6_reass_reinit (NULL, NULL); +#ifdef MAP_SKIP_IP6_LOOKUP + fib_node_register_type (FIB_NODE_TYPE_MAP_E, &map_vft); +#endif map_dpo_module_init (); return 0; diff --git a/src/vnet/map/map.h b/src/vnet/map/map.h index f446b739..616d42c0 100644 --- a/src/vnet/map/map.h +++ b/src/vnet/map/map.h @@ -198,6 +198,40 @@ typedef struct { map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]; } map_ip6_reass_t; +#ifdef MAP_SKIP_IP6_LOOKUP +/** + * A pre-resolved next-hop + */ +typedef struct map_main_pre_resolved_t_ +{ + /** + * Linkage into the FIB graph + */ + fib_node_t node; + + /** + * The FIB entry index of the next-hop + */ + fib_node_index_t fei; + + /** + * This object sibling index on the FIB entry's child dependency list + */ + u32 sibling; + + /** + * The Load-balance object index to use to forward + */ + dpo_id_t dpo; +} map_main_pre_resolved_t; + +/** + * Pre-resolved next hops for v4 and v6. Why these are global and not + * per-domain is beyond me. + */ +extern map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]; +#endif + typedef struct { /* pool of MAP domains */ map_domain_t *domains; @@ -207,13 +241,6 @@ typedef struct { vlib_combined_counter_main_t *domain_counters; volatile u32 *counter_lock; -#ifdef MAP_SKIP_IP6_LOOKUP - /* pre-presolve */ - u32 adj6_index, adj4_index; - ip4_address_t preresolve_ip4; - ip6_address_t preresolve_ip6; -#endif - /* Traffic class: zero, copy (~0) or fixed value */ u8 tc; bool tc_copy; diff --git a/test/test_map.py b/test/test_map.py new file mode 100644 index 00000000..bc6cd818 --- /dev/null +++ b/test/test_map.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import unittest +import socket + +from framework import VppTestCase, VppTestRunner +from vpp_ip_route import VppIpRoute, VppRoutePath + +from scapy.layers.l2 import Ether, Raw +from scapy.layers.inet import IP, UDP, ICMP +from scapy.layers.inet6 import IPv6 + + +class TestMAP(VppTestCase): + """ MAP Test Case """ + + def setUp(self): + super(TestMAP, self).setUp() + + # create 2 pg interfaces + self.create_pg_interfaces(range(4)) + + # pg0 is 'inside' IPv4 + self.pg0.admin_up() + self.pg0.config_ip4() + self.pg0.resolve_arp() + + # pg1 is 'outside' IPv6 + self.pg1.admin_up() + self.pg1.config_ip6() + self.pg1.generate_remote_hosts(4) + self.pg1.configure_ipv6_neighbors() + + def tearDown(self): + super(TestMAP, self).tearDown() + for i in self.pg_interfaces: + i.unconfig_ip4() + i.unconfig_ip6() + i.admin_down() + + def send_and_assert_no_replies(self, intf, pkts, remark): + intf.add_stream(pkts) + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + for i in self.pg_interfaces: + i.assert_nothing_captured(remark=remark) + + def send_and_assert_encapped(self, tx, ip6_src, ip6_dst, dmac=None): + if not dmac: + dmac = self.pg1.remote_mac + + self.pg0.add_stream(tx) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg1.get_capture(1) + rx = rx[0] + + self.assertEqual(rx[Ether].dst, dmac) + self.assertEqual(rx[IP].src, tx[IP].src) + self.assertEqual(rx[IPv6].src, ip6_src) + self.assertEqual(rx[IPv6].dst, ip6_dst) + + def test_map_e(self): + """ MAP-E """ + + # + # Add a route to the MAP-BR + # + map_br_pfx = "2001::" + map_br_pfx_len = 64 + map_route = VppIpRoute(self, + map_br_pfx, + map_br_pfx_len, + [VppRoutePath(self.pg1.remote_ip6, + self.pg1.sw_if_index, + is_ip6=1)], + is_ip6=1) + map_route.add_vpp_config() + + # + # Add a domain that maps from pg0 to pg1 + # + map_dst = socket.inet_pton(socket.AF_INET6, map_br_pfx) + map_src = "3001::1" + map_src_n = socket.inet_pton(socket.AF_INET6, map_src) + client_pfx = socket.inet_pton(socket.AF_INET, "192.168.0.0") + + self.vapi.map_add_domain(map_dst, + map_br_pfx_len, + map_src_n, + 128, + client_pfx, + 16) + + # + # Fire in a v4 packet that will be encapped to the BR + # + v4 = (Ether(dst=self.pg0.local_mac, src=self.pg0.remote_mac) / + IP(src=self.pg0.remote_ip4, dst='192.168.1.1') / + UDP(sport=20000, dport=10000) / + Raw('\xa5' * 100)) + + self.send_and_assert_encapped(v4, map_src, "2001::c0a8:0:0") + + # + # Fire in a V6 encapped packet. + # expect a decapped packet on the inside ip4 link + # + p = (Ether(dst=self.pg1.local_mac, src=self.pg1.remote_mac) / + IPv6(dst=map_src, src="2001::1") / + IP(dst=self.pg0.remote_ip4, src='192.168.1.1') / + UDP(sport=20000, dport=10000) / + Raw('\xa5' * 100)) + + self.pg1.add_stream(p) + + self.pg_enable_capture(self.pg_interfaces) + self.pg_start() + + rx = self.pg0.get_capture(1) + rx = rx[0] + + self.assertFalse(rx.haslayer(IPv6)) + self.assertEqual(rx[IP].src, p[IP].src) + self.assertEqual(rx[IP].dst, p[IP].dst) + + # + # Pre-resolve. No API for this!! + # + self.vapi.ppcli("map params pre-resolve ip6-nh 4001::1") + + self.send_and_assert_no_replies(self.pg0, v4, + "resovled via default route") + + # + # Add a route to 4001::1. Expect the encapped traffic to be + # sent via that routes next-hop + # + pre_res_route = VppIpRoute(self, + "4001::1", + 128, + [VppRoutePath(self.pg1.remote_hosts[2].ip6, + self.pg1.sw_if_index, + is_ip6=1)], + is_ip6=1) + pre_res_route.add_vpp_config() + + self.send_and_assert_encapped(v4, map_src, + "2001::c0a8:0:0", + dmac=self.pg1.remote_hosts[2].mac) + + # + # change the route to the pre-solved next-hop + # + pre_res_route1 = VppIpRoute(self, + "4001::1", + 128, + [VppRoutePath(self.pg1.remote_hosts[3].ip6, + self.pg1.sw_if_index, + is_ip6=1)], + is_ip6=1) + pre_res_route1.add_vpp_config() + + self.send_and_assert_encapped(v4, map_src, + "2001::c0a8:0:0", + dmac=self.pg1.remote_hosts[3].mac) + +if __name__ == '__main__': + unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_papi_provider.py b/test/vpp_papi_provider.py index 0062b72b..92070424 100644 --- a/test/vpp_papi_provider.py +++ b/test/vpp_papi_provider.py @@ -1624,3 +1624,31 @@ class VppPapiProvider(object): { 'vni': vni }) + + def map_add_domain(self, + ip6_prefix, + ip6_prefix_len, + ip6_src, + ip6_src_prefix_len, + ip4_prefix, + ip4_prefix_len, + ea_bits_len=0, + psid_offset=0, + psid_length=0, + is_translation=0, + mtu=1280): + return self.api( + self.papi.map_add_domain, + { + 'ip6_prefix': ip6_prefix, + 'ip6_prefix_len': ip6_prefix_len, + 'ip4_prefix': ip4_prefix, + 'ip4_prefix_len': ip4_prefix_len, + 'ip6_src': ip6_src, + 'ip6_src_prefix_len': ip6_src_prefix_len, + 'ea_bits_len': ea_bits_len, + 'psid_offset': psid_offset, + 'psid_length': psid_length, + 'is_translation': is_translation, + 'mtu': mtu + }) -- cgit 1.2.3-korg From 69b7aa424abaec4adae0e9007794cf35a7f9849f Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Fri, 10 Mar 2017 03:04:12 -0800 Subject: Fix MAP-E UT. Add functionality in MAP code to delete the pre-resolved next-hops. UT checks for no-leftover-state now pass Change-Id: I9e980ee117c0b6aebc6c7a0fcc153a7c0eaf0c72 Signed-off-by: Neale Ranns --- src/vnet/map/map.c | 43 +++++++++++++++++++++++++++++++++++++------ test/test_map.py | 20 +++++++++++--------- test/vpp_ip_route.py | 10 +++++++++- 3 files changed, 57 insertions(+), 16 deletions(-) (limited to 'src/vnet/map') diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 6823a46e..7006b1db 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -556,23 +556,51 @@ map_fib_resolve (map_main_pre_resolved_t * pr, } static void -map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6) +map_fib_unresolve (map_main_pre_resolved_t * pr, + fib_protocol_t proto, u8 len, const ip46_address_t * addr) +{ + fib_prefix_t pfx = { + .fp_proto = proto, + .fp_len = len, + .fp_addr = *addr, + }; + + fib_entry_child_remove (pr->fei, pr->sibling); + + fib_table_entry_special_remove (0, // default fib + &pfx, FIB_SOURCE_RR); + dpo_reset (&pr->dpo); + + pr->fei = FIB_NODE_INDEX_INVALID; + pr->sibling = FIB_NODE_INDEX_INVALID; +} + +static void +map_pre_resolve (ip4_address_t * ip4, ip6_address_t * ip6, int is_del) { if (ip6 && (ip6->as_u64[0] != 0 || ip6->as_u64[1] != 0)) { ip46_address_t addr = { .ip6 = *ip6, }; - map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6], - FIB_PROTOCOL_IP6, 128, &addr); + if (is_del) + map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP6], + FIB_PROTOCOL_IP6, 128, &addr); + else + map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP6], + FIB_PROTOCOL_IP6, 128, &addr); } if (ip4 && (ip4->as_u32 != 0)) { ip46_address_t addr = { .ip4 = *ip4, }; - map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4], - FIB_PROTOCOL_IP4, 32, &addr); + if (is_del) + map_fib_unresolve (&pre_resolved[FIB_PROTOCOL_IP4], + FIB_PROTOCOL_IP4, 32, &addr); + else + map_fib_resolve (&pre_resolved[FIB_PROTOCOL_IP4], + FIB_PROTOCOL_IP4, 32, &addr); } } #endif @@ -817,6 +845,7 @@ map_pre_resolve_command_fn (vlib_main_t * vm, ip4_address_t ip4nh, *p_v4 = NULL; ip6_address_t ip6nh, *p_v6 = NULL; clib_error_t *error = NULL; + int is_del = 0; memset (&ip4nh, 0, sizeof (ip4nh)); memset (&ip6nh, 0, sizeof (ip6nh)); @@ -832,6 +861,8 @@ map_pre_resolve_command_fn (vlib_main_t * vm, else if (unformat (line_input, "ip6-nh %U", unformat_ip6_address, &ip6nh)) p_v6 = &ip6nh; + else if (unformat (line_input, "del")) + is_del = 1; else { error = clib_error_return (0, "unknown input `%U'", @@ -840,7 +871,7 @@ map_pre_resolve_command_fn (vlib_main_t * vm, } } - map_pre_resolve (p_v4, p_v6); + map_pre_resolve (p_v4, p_v6, is_del); done: unformat_free (line_input); diff --git a/test/test_map.py b/test/test_map.py index 6bcd70e9..9ac3948a 100644 --- a/test/test_map.py +++ b/test/test_map.py @@ -62,7 +62,6 @@ class TestMAP(VppTestCase): self.assertEqual(rx[IPv6].src, ip6_src) self.assertEqual(rx[IPv6].dst, ip6_dst) - @unittest.skip("Doesn't Work") def test_map_e(self): """ MAP-E """ @@ -155,18 +154,21 @@ class TestMAP(VppTestCase): # # change the route to the pre-solved next-hop # - pre_res_route1 = VppIpRoute(self, - "4001::1", - 128, - [VppRoutePath(self.pg1.remote_hosts[3].ip6, - self.pg1.sw_if_index, - is_ip6=1)], - is_ip6=1) - pre_res_route1.add_vpp_config() + pre_res_route.modify([VppRoutePath(self.pg1.remote_hosts[3].ip6, + self.pg1.sw_if_index, + is_ip6=1)]) + pre_res_route.add_vpp_config() self.send_and_assert_encapped(v4, map_src, "2001::c0a8:0:0", dmac=self.pg1.remote_hosts[3].mac) + # + # cleanup. The test infra's object registry will ensure + # the route is really gone and thus that the unresolve worked. + # + pre_res_route.remove_vpp_config() + self.vapi.ppcli("map params pre-resolve del ip6-nh 4001::1") + if __name__ == '__main__': unittest.main(testRunner=VppTestRunner) diff --git a/test/vpp_ip_route.py b/test/vpp_ip_route.py index 7a62b230..e1c2b4b4 100644 --- a/test/vpp_ip_route.py +++ b/test/vpp_ip_route.py @@ -79,6 +79,13 @@ class VppIpRoute(VppObject): else: self.dest_addr = inet_pton(AF_INET, dest_addr) + def modify(self, paths, is_local=0, + is_unreach=0, is_prohibit=0): + self.paths = paths + self.is_local = is_local + self.is_unreach = is_unreach + self.is_prohibit = is_prohibit + def add_vpp_config(self): if self.is_local or self.is_unreach or self.is_prohibit: self._test.vapi.ip_add_del_route( @@ -126,7 +133,8 @@ class VppIpRoute(VppObject): path.nh_addr, path.nh_itf, table_id=self.table_id, - is_add=0) + is_add=0, + is_ipv6=self.is_ip6) def query_vpp_config(self): return find_route(self._test, -- cgit 1.2.3-korg From 1bd01099a6512b6119bbf337b36222a6f0770d49 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Wed, 15 Mar 2017 15:41:17 -0400 Subject: 64 bit per-thread counters after: TenGigabitEthernet5/0/1-output active 107522 17375708 0 7.22e0 161.60 TenGigabitEthernet5/0/1-tx active 107522 17375708 0 6.93e1 161.60 ip4-input-no-checksum active 107522 17375708 0 2.52e1 161.60 ip4-lookup active 107522 17375708 0 3.10e1 161.60 ip4-rewrite active 107522 17375708 0 2.52e1 161.60 before TenGigabitEthernet5/0/1-output active 433575 110995200 0 6.95e0 256.00 TenGigabitEthernet5/0/1-tx active 433575 110995200 0 7.14e1 256.00 ip4-input-no-checksum active 433575 110995200 0 2.66e1 256.00 ip4-lookup active 433575 110995200 0 3.29e1 256.00 ip4-rewrite active 433575 110995200 0 2.59e1 256.00 Change-Id: I46405bd22189f48a39f06e3443bb7e13f410b539 Signed-off-by: Neale Ranns --- src/vlib/counter.c | 66 +++++++-------- src/vlib/counter.h | 207 ++++++++++++++++------------------------------ src/vnet/ip/ip4_forward.c | 36 ++++---- src/vnet/map/map.c | 2 +- src/vnet/map/map_api.c | 2 +- src/vnet/rewrite.c | 2 +- src/vpp/api/api.c | 2 +- src/vpp/stats/stats.c | 16 ++-- 8 files changed, 132 insertions(+), 201 deletions(-) (limited to 'src/vnet/map') diff --git a/src/vlib/counter.c b/src/vlib/counter.c index 9f66e04d..62f4bd66 100644 --- a/src/vlib/counter.c +++ b/src/vlib/counter.c @@ -42,56 +42,36 @@ void vlib_clear_simple_counters (vlib_simple_counter_main_t * cm) { + counter_t *my_counters; uword i, j; - u16 *my_minis; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - for (j = 0; j < vec_len (my_minis); j++) + for (j = 0; j < vec_len (my_counters); j++) { - cm->maxi[j] += my_minis[j]; - my_minis[j] = 0; + my_counters[j] = 0; } } - - j = vec_len (cm->maxi); - if (j > 0) - vec_validate (cm->value_at_last_clear, j - 1); - for (i = 0; i < j; i++) - cm->value_at_last_clear[i] = cm->maxi[i]; } void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm) { + vlib_counter_t *my_counters; uword i, j; - vlib_mini_counter_t *my_minis; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - for (j = 0; j < vec_len (my_minis); j++) + for (j = 0; j < vec_len (my_counters); j++) { - cm->maxi[j].packets += my_minis[j].packets; - cm->maxi[j].bytes += my_minis[j].bytes; - my_minis[j].packets = 0; - my_minis[j].bytes = 0; + my_counters[j].packets = 0; + my_counters[j].bytes = 0; } } - - j = vec_len (cm->maxi); - if (j > 0) - vec_validate (cm->value_at_last_clear, j - 1); - - for (i = 0; i < j; i++) - { - vlib_counter_t *c = vec_elt_at_index (cm->value_at_last_clear, i); - - c[0] = cm->maxi[i]; - } } void @@ -100,10 +80,9 @@ vlib_validate_simple_counter (vlib_simple_counter_main_t * cm, u32 index) vlib_thread_main_t *tm = vlib_get_thread_main (); int i; - vec_validate (cm->minis, tm->n_vlib_mains - 1); + vec_validate (cm->counters, tm->n_vlib_mains - 1); for (i = 0; i < tm->n_vlib_mains; i++) - vec_validate_aligned (cm->minis[i], index, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (cm->maxi, index, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (cm->counters[i], index, CLIB_CACHE_LINE_BYTES); } void @@ -112,10 +91,23 @@ vlib_validate_combined_counter (vlib_combined_counter_main_t * cm, u32 index) vlib_thread_main_t *tm = vlib_get_thread_main (); int i; - vec_validate (cm->minis, tm->n_vlib_mains - 1); + vec_validate (cm->counters, tm->n_vlib_mains - 1); for (i = 0; i < tm->n_vlib_mains; i++) - vec_validate_aligned (cm->minis[i], index, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (cm->maxi, index, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (cm->counters[i], index, CLIB_CACHE_LINE_BYTES); +} + +u32 +vlib_combined_counter_n_counters (const vlib_combined_counter_main_t * cm) +{ + ASSERT (cm->counters); + return (vec_len (cm->counters[0])); +} + +u32 +vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm) +{ + ASSERT (cm->counters); + return (vec_len (cm->counters[0])); } void diff --git a/src/vlib/counter.h b/src/vlib/counter.h index abfa89ee..17a85217 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -44,59 +44,48 @@ Optimized thread-safe counters. - Each vlib_[simple|combined]_counter_main_t consists of a single - vector of thread-safe / atomically-updated u64 counters [the - "maxi" vector], and a (u16 **) per-thread vector [the "minis" - vector] of narrow, per-thread counters. - - The idea is to drastically reduce the number of atomic operations. - In the case of packet counts, we divide the number of atomic ops - by 2**16, etc. + Each vlib_[simple|combined]_counter_main_t consists of a per-thread + vector of per-object counters. + + The idea is to drastically eliminate atomic operations. */ +/** 64bit counters */ +typedef u64 counter_t; + /** A collection of simple counters */ typedef struct { - u16 **minis; /**< Per-thread u16 non-atomic counters */ - u64 *maxi; /**< Shared wide counters */ - u64 *value_at_last_clear; /**< Counter values as of last clear. */ - u64 *value_at_last_serialize; /**< Values as of last serialize. */ + counter_t **counters; /**< Per-thread u64 non-atomic counters */ + counter_t *value_at_last_serialize; /**< Values as of last serialize. */ u32 last_incremental_serialize_index; /**< Last counter index serialized incrementally. */ char *name; /**< The counter collection's name. */ } vlib_simple_counter_main_t; +/** The number of counters (not the number of per-thread counters) */ +u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm); + /** Increment a simple counter @param cm - (vlib_simple_counter_main_t *) simple counter main pointer @param cpu_index - (u32) the current cpu index @param index - (u32) index of the counter to increment - @param increment - (u32) quantitiy to add to the counter + @param increment - (u64) quantitiy to add to the counter */ always_inline void vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, - u32 cpu_index, u32 index, u32 increment) + u32 cpu_index, u32 index, u64 increment) { - u16 *my_minis; - u16 *mini; - u32 old, new; - - my_minis = cm->minis[cpu_index]; - mini = vec_elt_at_index (my_minis, index); - old = mini[0]; - new = old + increment; - mini[0] = new; + counter_t *my_counters; - if (PREDICT_FALSE (mini[0] != new)) - { - __sync_fetch_and_add (&cm->maxi[index], new); - my_minis[index] = 0; - } + my_counters = cm->counters[cpu_index]; + my_counters[index] += increment; } /** Get the value of a simple counter - Scrapes the entire set of mini counters. Innacurate unless + Scrapes the entire set of per-thread counters. Innacurate unless worker threads which might increment the counter are barrier-synchronized @@ -104,30 +93,21 @@ vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, @param index - (u32) index of the counter to fetch @returns - (u64) current counter value */ -always_inline u64 +always_inline counter_t vlib_get_simple_counter (vlib_simple_counter_main_t * cm, u32 index) { - u16 *my_minis, *mini; - u64 v; + counter_t *my_counters; + counter_t v; int i; - ASSERT (index < vec_len (cm->maxi)); + ASSERT (index < vlib_simple_counter_n_counters (cm)); v = 0; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; - mini = vec_elt_at_index (my_minis, index); - v += mini[0]; - } - - v += cm->maxi[index]; - - if (index < vec_len (cm->value_at_last_clear)) - { - ASSERT (v >= cm->value_at_last_clear[index]); - v -= cm->value_at_last_clear[index]; + my_counters = cm->counters[i]; + v += my_counters[index]; } return v; @@ -142,29 +122,24 @@ vlib_get_simple_counter (vlib_simple_counter_main_t * cm, u32 index) always_inline void vlib_zero_simple_counter (vlib_simple_counter_main_t * cm, u32 index) { - u16 *my_minis; + counter_t *my_counters; int i; - ASSERT (index < vec_len (cm->maxi)); + ASSERT (index < vlib_simple_counter_n_counters (cm)); - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; - my_minis[index] = 0; + my_counters = cm->counters[i]; + my_counters[index] = 0; } - - cm->maxi[index] = 0; - - if (index < vec_len (cm->value_at_last_clear)) - cm->value_at_last_clear[index] = 0; } /** Combined counter to hold both packets and byte differences. */ typedef struct { - u64 packets; /**< packet counter */ - u64 bytes; /**< byte counter */ + counter_t packets; /**< packet counter */ + counter_t bytes; /**< byte counter */ } vlib_counter_t; /** Add two combined counters, results in the first counter @@ -201,24 +176,19 @@ vlib_counter_zero (vlib_counter_t * a) a->packets = a->bytes = 0; } -/** Mini combined counter */ -typedef struct -{ - u16 packets; /**< Packet count */ - i16 bytes; /**< Byte count */ -} vlib_mini_counter_t; - /** A collection of combined counters */ typedef struct { - vlib_mini_counter_t **minis; /**< Per-thread u16 non-atomic counter pairs */ - vlib_counter_t *maxi; /**< Shared wide counter pairs */ - vlib_counter_t *value_at_last_clear; /**< Counter values as of last clear. */ + vlib_counter_t **counters; /**< Per-thread u64 non-atomic counter pairs */ vlib_counter_t *value_at_last_serialize; /**< Counter values as of last serialize. */ u32 last_incremental_serialize_index; /**< Last counter index serialized incrementally. */ char *name; /**< The counter collection's name. */ } vlib_combined_counter_main_t; +/** The number of counters (not the number of per-thread counters) */ +u32 vlib_combined_counter_n_counters (const vlib_combined_counter_main_t * + cm); + /** Clear a collection of simple counters @param cm - (vlib_simple_counter_main_t *) collection to clear */ @@ -233,62 +203,41 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); @param cm - (vlib_combined_counter_main_t *) comined counter main pointer @param cpu_index - (u32) the current cpu index @param index - (u32) index of the counter to increment - @param packet_increment - (u32) number of packets to add to the counter - @param byte_increment - (u32) number of bytes to add to the counter + @param packet_increment - (u64) number of packets to add to the counter + @param byte_increment - (u64) number of bytes to add to the counter */ always_inline void vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, u32 cpu_index, - u32 index, - u32 packet_increment, u32 byte_increment) + u32 index, u64 n_packets, u64 n_bytes) { - vlib_mini_counter_t *my_minis, *mini; - u32 old_packets, new_packets; - i32 old_bytes, new_bytes; - - /* Use this CPU's mini counter array */ - my_minis = cm->minis[cpu_index]; - - mini = vec_elt_at_index (my_minis, index); - old_packets = mini->packets; - old_bytes = mini->bytes; - - new_packets = old_packets + packet_increment; - new_bytes = old_bytes + byte_increment; - - mini->packets = new_packets; - mini->bytes = new_bytes; - - /* Bytes always overflow before packets.. */ - if (PREDICT_FALSE (mini->bytes != new_bytes)) - { - vlib_counter_t *maxi = vec_elt_at_index (cm->maxi, index); + vlib_counter_t *my_counters; - __sync_fetch_and_add (&maxi->packets, new_packets); - __sync_fetch_and_add (&maxi->bytes, new_bytes); + /* Use this CPU's counter array */ + my_counters = cm->counters[cpu_index]; - mini->packets = 0; - mini->bytes = 0; - } + my_counters[index].packets += n_packets; + my_counters[index].bytes += n_bytes; } -#define vlib_prefetch_combined_counter(_cm, _cpu_index, _index) \ -{ \ - vlib_mini_counter_t *_cpu_minis; \ - \ - /* \ - * This CPU's mini index is assumed to already be in cache \ - */ \ - _cpu_minis = (_cm)->minis[(_cpu_index)]; \ - CLIB_PREFETCH(_cpu_minis + (_index), \ - sizeof(*_cpu_minis), \ - STORE); \ +/** Pre-fetch a per-thread combined counter for the given object index */ +always_inline void +vlib_prefetch_combined_counter (const vlib_combined_counter_main_t * cm, + u32 cpu_index, u32 index) +{ + vlib_counter_t *cpu_counters; + + /* + * This CPU's index is assumed to already be in cache + */ + cpu_counters = cm->counters[cpu_index]; + CLIB_PREFETCH (cpu_counters + index, CLIB_CACHE_LINE_BYTES, STORE); } /** Get the value of a combined counter, never called in the speed path - Scrapes the entire set of mini counters. Innacurate unless + Scrapes the entire set of per-thread counters. Innacurate unless worker threads which might increment the counter are barrier-synchronized @@ -298,35 +247,27 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, */ static inline void -vlib_get_combined_counter (vlib_combined_counter_main_t * cm, +vlib_get_combined_counter (const vlib_combined_counter_main_t * cm, u32 index, vlib_counter_t * result) { - vlib_mini_counter_t *my_minis, *mini; - vlib_counter_t *maxi; + vlib_counter_t *my_counters, *counter; int i; result->packets = 0; result->bytes = 0; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - mini = vec_elt_at_index (my_minis, index); - result->packets += mini->packets; - result->bytes += mini->bytes; + counter = vec_elt_at_index (my_counters, index); + result->packets += counter->packets; + result->bytes += counter->bytes; } - - maxi = vec_elt_at_index (cm->maxi, index); - result->packets += maxi->packets; - result->bytes += maxi->bytes; - - if (index < vec_len (cm->value_at_last_clear)) - vlib_counter_sub (result, &cm->value_at_last_clear[index]); } /** Clear a combined counter - Clears the set of per-thread u16 counters, and the shared vlib_counter_t + Clears the set of per-thread counters. @param cm - (vlib_combined_counter_main_t *) combined counter main pointer @param index - (u32) index of the counter to clear @@ -334,21 +275,17 @@ vlib_get_combined_counter (vlib_combined_counter_main_t * cm, always_inline void vlib_zero_combined_counter (vlib_combined_counter_main_t * cm, u32 index) { - vlib_mini_counter_t *mini, *my_minis; + vlib_counter_t *my_counters, *counter; int i; - for (i = 0; i < vec_len (cm->minis); i++) + for (i = 0; i < vec_len (cm->counters); i++) { - my_minis = cm->minis[i]; + my_counters = cm->counters[i]; - mini = vec_elt_at_index (my_minis, index); - mini->packets = 0; - mini->bytes = 0; + counter = vec_elt_at_index (my_counters, index); + counter->packets = 0; + counter->bytes = 0; } - - vlib_counter_zero (&cm->maxi[index]); - if (index < vec_len (cm->value_at_last_clear)) - vlib_counter_zero (&cm->value_at_last_clear[index]); } /** validate a simple counter diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index 0dad61d4..7352c2e7 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -2375,6 +2375,17 @@ ip4_rewrite_inline (vlib_main_t * vm, adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + /* + * pre-fetch the per-adjacency counters + */ + if (do_counters) + { + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index1); + } + /* We should never rewrite a pkt using the MISS adjacency */ ASSERT (adj_index0 && adj_index1); @@ -2480,17 +2491,6 @@ ip4_rewrite_inline (vlib_main_t * vm, rewrite_header.max_l3_packet_bytes ? IP4_ERROR_MTU_EXCEEDED : error1); - /* - * pre-fetch the per-adjacency counters - */ - if (do_counters) - { - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index1); - } - /* Don't adjust the buffer for ttl issue; icmp-error node wants * to see the IP headerr */ if (PREDICT_TRUE (error0 == IP4_ERROR_NONE)) @@ -2624,8 +2624,9 @@ ip4_rewrite_inline (vlib_main_t * vm, p0->flags &= ~VNET_BUFFER_LOCALLY_ORIGINATED; } - vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + if (do_counters) + vlib_prefetch_combined_counter (&adjacency_counters, + cpu_index, adj_index0); /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -2641,10 +2642,11 @@ ip4_rewrite_inline (vlib_main_t * vm, rw_len0 = adj0[0].rewrite_header.data_bytes; vnet_buffer (p0)->ip.save_rewrite_length = rw_len0; - vlib_increment_combined_counter - (&adjacency_counters, - cpu_index, - adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); + if (do_counters) + vlib_increment_combined_counter + (&adjacency_counters, + cpu_index, adj_index0, 1, + vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ error0 = (vlib_buffer_length_in_chain (vm, p0) diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 7006b1db..99305afa 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -1304,7 +1304,7 @@ show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input, { which = cm - mm->domain_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vnet/map/map_api.c b/src/vnet/map/map_api.c index 7febeb3d..d618e7a6 100644 --- a/src/vnet/map/map_api.c +++ b/src/vnet/map/map_api.c @@ -211,7 +211,7 @@ vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp) { which = cm - mm->domain_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vnet/rewrite.c b/src/vnet/rewrite.c index c4a171c1..47fb74df 100644 --- a/src/vnet/rewrite.c +++ b/src/vnet/rewrite.c @@ -79,7 +79,7 @@ format_vnet_rewrite (u8 * s, va_list * args) if (NULL != si) s = format (s, "%U: ", format_vnet_sw_interface_name, vnm, si); else - s = format (s, "DELETED"); + s = format (s, "DELETED:%d", rw->sw_if_index); } /* Format rewrite string. */ diff --git a/src/vpp/api/api.c b/src/vpp/api/api.c index d8301fa6..8df40406 100644 --- a/src/vpp/api/api.c +++ b/src/vpp/api/api.c @@ -849,7 +849,7 @@ vl_api_vnet_get_summary_stats_t_handler (vl_api_vnet_get_summary_stats_t * mp) { which = cm - im->combined_sw_if_counters; - for (i = 0; i < vec_len (cm->maxi); i++) + for (i = 0; i < vlib_combined_counter_n_counters (cm); i++) { vlib_get_combined_counter (cm, i, &v); total_pkts[which] += v.packets; diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index c46d441a..1927da0b 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -134,7 +134,7 @@ do_simple_interface_counters (stats_main_t * sm) vlib_simple_counter_main_t *cm; u32 items_this_message = 0; u64 v, *vp = 0; - int i; + int i, n_counts; /* * Prevent interface registration from expanding / moving the vectors... @@ -144,13 +144,13 @@ do_simple_interface_counters (stats_main_t * sm) vec_foreach (cm, im->sw_if_counters) { - - for (i = 0; i < vec_len (cm->maxi); i++) + n_counts = vlib_simple_counter_n_counters (cm); + for (i = 0; i < n_counts; i++) { if (mp == 0) { items_this_message = clib_min (SIMPLE_COUNTER_BATCH_SIZE, - vec_len (cm->maxi) - i); + n_counts - i); mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); @@ -189,19 +189,19 @@ do_combined_interface_counters (stats_main_t * sm) vlib_combined_counter_main_t *cm; u32 items_this_message = 0; vlib_counter_t v, *vp = 0; - int i; + int i, n_counts; vnet_interface_counter_lock (im); vec_foreach (cm, im->combined_sw_if_counters) { - - for (i = 0; i < vec_len (cm->maxi); i++) + n_counts = vlib_combined_counter_n_counters (cm); + for (i = 0; i < n_counts; i++) { if (mp == 0) { items_this_message = clib_min (COMBINED_COUNTER_BATCH_SIZE, - vec_len (cm->maxi) - i); + n_counts - i); mp = vl_msg_api_alloc_as_if_client (sizeof (*mp) + items_this_message * sizeof (v)); -- cgit 1.2.3-korg From 9705c3833a7b18609df8ae315a0aa062e1d2e180 Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Mon, 20 Feb 2017 20:29:41 -0800 Subject: MAP - add the domain struct directly into the dat-path and avoid the indirectiob throught the map-DPO Change-Id: Ifb72a1c1258440fdc4845aca8aecf2abd63526b1 Signed-off-by: Neale Ranns --- src/vnet/map/ip4_map.c | 20 ++++++-------- src/vnet/map/ip4_map_t.c | 15 ++++++----- src/vnet/map/map.c | 52 ++++++++---------------------------- src/vnet/map/map.h | 29 +++++++------------- src/vnet/map/map_dpo.c | 69 +++--------------------------------------------- src/vnet/map/map_dpo.h | 24 ----------------- 6 files changed, 42 insertions(+), 167 deletions(-) (limited to 'src/vnet/map') diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c index 2be9ad37..1a20d704 100644 --- a/src/vnet/map/ip4_map.c +++ b/src/vnet/map/ip4_map.c @@ -293,12 +293,10 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) p1 = vlib_get_buffer (vm, pi1); ip40 = vlib_buffer_get_current (p0); ip41 = vlib_buffer_get_current (p1); - d0 = - ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - &map_domain_index0); - d1 = - ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], - &map_domain_index1); + map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + d0 = ip4_map_get_domain (map_domain_index0); + map_domain_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + d1 = ip4_map_get_domain (map_domain_index1); ASSERT (d0); ASSERT (d1); @@ -464,9 +462,8 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) p0 = vlib_get_buffer (vm, pi0); ip40 = vlib_buffer_get_current (p0); - d0 = - ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - &map_domain_index0); + map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + d0 = ip4_map_get_domain (map_domain_index0); ASSERT (d0); /* @@ -597,9 +594,8 @@ ip4_map_reass (vlib_main_t * vm, p0 = vlib_get_buffer (vm, pi0); ip60 = vlib_buffer_get_current (p0); ip40 = (ip4_header_t *) (ip60 + 1); - d0 = - ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - &map_domain_index0); + map_domain_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + d0 = ip4_map_get_domain (map_domain_index0); map_ip4_reass_lock (); map_ip4_reass_t *r = map_ip4_reass_get (ip40->src_address.as_u32, diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c index 15974d8a..b63d76bf 100644 --- a/src/vnet/map/ip4_map_t.c +++ b/src/vnet/map/ip4_map_t.c @@ -1100,10 +1100,12 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) next1 = IP4_MAPT_NEXT_DROP; } - d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - &vnet_buffer (p0)->map_t.map_domain_index); - d1 = ip4_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], - &vnet_buffer (p1)->map_t.map_domain_index); + vnet_buffer (p0)->map_t.map_domain_index = + vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index); + vnet_buffer (p1)->map_t.map_domain_index = + vnet_buffer (p1)->ip.adj_index[VLIB_TX]; + d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index); vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0; @@ -1213,8 +1215,9 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) next0 = IP4_MAPT_NEXT_DROP; } - d0 = ip4_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - &vnet_buffer (p0)->map_t.map_domain_index); + vnet_buffer (p0)->map_t.map_domain_index = + vnet_buffer (p0)->ip.adj_index[VLIB_TX]; + d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index); vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0; diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 99305afa..811a0abc 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -177,7 +177,6 @@ map_create_domain (ip4_address_t * ip4_prefix, map_main_t *mm = &map_main; dpo_id_t dpo_v4 = DPO_INVALID; dpo_id_t dpo_v6 = DPO_INVALID; - fib_node_index_t fei; map_domain_t *d; /* Sanity check on the src prefix length */ @@ -268,57 +267,28 @@ map_create_domain (ip4_address_t * ip4_prefix, dpo_reset (&dpo_v4); /* - * Multiple MAP domains may share same source IPv6 TEP. - * In this case the route will exist and be MAP sourced. - * Find the adj (if any) already contributed and modify it + * construct a DPO to use the v6 domain */ - fib_prefix_t pfx6 = { - .fp_proto = FIB_PROTOCOL_IP6, - .fp_len = d->ip6_src_len, - .fp_addr = { - .ip6 = d->ip6_src, - } - , - }; - fei = fib_table_lookup_exact_match (0, &pfx6); - - if (FIB_NODE_INDEX_INVALID != fei) - { - dpo_id_t dpo = DPO_INVALID; - - if (fib_entry_get_dpo_for_source (fei, FIB_SOURCE_MAP, &dpo)) - { - /* - * modify the existing MAP to indicate it's shared - * skip to route add. - */ - const dpo_id_t *md_dpo; - map_dpo_t *md; - - ASSERT (DPO_LOAD_BALANCE == dpo.dpoi_type); - - md_dpo = load_balance_get_bucket (dpo.dpoi_index, 0); - md = map_dpo_get (md_dpo->dpoi_index); - - md->md_domain = ~0; - dpo_copy (&dpo_v6, md_dpo); - dpo_reset (&dpo); - - goto route_add; - } - } - if (d->flags & MAP_DOMAIN_TRANSLATION) map_t_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); else map_dpo_create (DPO_PROTO_IP6, *map_domain_index, &dpo_v6); -route_add: /* + * Multiple MAP domains may share same source IPv6 TEP. Which is just dandy. + * We are not tracking the sharing. So a v4 lookup to find the correct + * domain post decap/trnaslate is always done + * * Create ip6 route. This is a reference counted add. If the prefix * already exists and is MAP sourced, it is now MAP source n+1 times * and will need to be removed n+1 times. */ + fib_prefix_t pfx6 = { + .fp_proto = FIB_PROTOCOL_IP6, + .fp_len = d->ip6_src_len, + .fp_addr.ip6 = d->ip6_src, + }; + fib_table_entry_special_dpo_add (0, &pfx6, FIB_SOURCE_MAP, FIB_ENTRY_FLAG_EXCLUSIVE, &dpo_v6); diff --git a/src/vnet/map/map.h b/src/vnet/map/map.h index 616d42c0..644e80f5 100644 --- a/src/vnet/map/map.h +++ b/src/vnet/map/map.h @@ -416,17 +416,11 @@ map_get_ip4 (ip6_address_t *addr) * Get the MAP domain from an IPv4 lookup adjacency. */ static_always_inline map_domain_t * -ip4_map_get_domain (u32 mdi, - u32 *map_domain_index) +ip4_map_get_domain (u32 mdi) { map_main_t *mm = &map_main; - map_dpo_t *md; - md = map_dpo_get(mdi); - - ASSERT(md); - *map_domain_index = md->md_domain; - return pool_elt_at_index(mm->domains, *map_domain_index); + return pool_elt_at_index(mm->domains, mdi); } /* @@ -435,23 +429,21 @@ ip4_map_get_domain (u32 mdi, * The IPv4 address is used otherwise. */ static_always_inline map_domain_t * -ip6_map_get_domain (u32 mdi, ip4_address_t *addr, - u32 *map_domain_index, u8 *error) +ip6_map_get_domain (u32 mdi, + ip4_address_t *addr, + u32 *map_domain_index, + u8 *error) { map_main_t *mm = &map_main; - map_dpo_t *md; /* * Disable direct MAP domain lookup on decap, until the security check is updated to verify IPv4 SA. * (That's done implicitly when MAP domain is looked up in the IPv4 FIB) */ #ifdef MAP_NONSHARED_DOMAIN_ENABLED - md = map_dpo_get(mdi); - - ASSERT(md); - *map_domain_index = md->md_domain; - if (*map_domain_index != ~0) - return pool_elt_at_index(mm->domains, *map_domain_index); +#error "How can you be sure this domain is not shared?" + *map_domain_index = mdi; + return pool_elt_at_index(mm->domains, mdi); #endif u32 lbi = ip4_fib_forwarding_lookup(0, addr); @@ -459,8 +451,7 @@ ip6_map_get_domain (u32 mdi, ip4_address_t *addr, if (PREDICT_TRUE(dpo->dpoi_type == map_dpo_type || dpo->dpoi_type == map_t_dpo_type)) { - md = map_dpo_get(dpo->dpoi_index); - *map_domain_index = md->md_domain; + *map_domain_index = dpo->dpoi_index; return pool_elt_at_index(mm->domains, *map_domain_index); } *error = MAP_ERROR_NO_DOMAIN; diff --git a/src/vnet/map/map_dpo.c b/src/vnet/map/map_dpo.c index df2b5fa4..430c1fbf 100644 --- a/src/vnet/map/map_dpo.c +++ b/src/vnet/map/map_dpo.c @@ -16,49 +16,21 @@ #include #include -/** - * pool of all MPLS Label DPOs - */ -map_dpo_t *map_dpo_pool; - /** * The register MAP DPO type */ dpo_type_t map_dpo_type; dpo_type_t map_t_dpo_type; -static map_dpo_t * -map_dpo_alloc (void) -{ - map_dpo_t *md; - - pool_get_aligned(map_dpo_pool, md, CLIB_CACHE_LINE_BYTES); - memset(md, 0, sizeof(*md)); - - return (md); -} - -static index_t -map_dpo_get_index (map_dpo_t *md) -{ - return (md - map_dpo_pool); -} - void map_dpo_create (dpo_proto_t dproto, u32 domain_index, dpo_id_t *dpo) { - map_dpo_t *md; - - md = map_dpo_alloc(); - md->md_domain = domain_index; - md->md_proto = dproto; - dpo_set(dpo, map_dpo_type, dproto, - map_dpo_get_index(md)); + domain_index); } void @@ -66,16 +38,10 @@ map_t_dpo_create (dpo_proto_t dproto, u32 domain_index, dpo_id_t *dpo) { - map_dpo_t *md; - - md = map_dpo_alloc(); - md->md_domain = domain_index; - md->md_proto = dproto; - dpo_set(dpo, map_t_dpo_type, dproto, - map_dpo_get_index(md)); + domain_index); } @@ -84,14 +50,8 @@ format_map_dpo (u8 *s, va_list *args) { index_t index = va_arg (*args, index_t); CLIB_UNUSED(u32 indent) = va_arg (*args, u32); - map_dpo_t *md; - - md = map_dpo_get(index); - return (format(s, "map:[%d]:%U domain:%d", - index, - format_dpo_proto, md->md_proto, - md->md_domain)); + return (format(s, "map: domain:%d", index)); } u8* @@ -99,40 +59,19 @@ format_map_t_dpo (u8 *s, va_list *args) { index_t index = va_arg (*args, index_t); CLIB_UNUSED(u32 indent) = va_arg (*args, u32); - map_dpo_t *md; - - md = map_dpo_get(index); - return (format(s, "map-t:[%d]:%U domain:%d", - index, - format_dpo_proto, md->md_proto, - md->md_domain)); + return (format(s, "map-t: domain:%d", index)); } static void map_dpo_lock (dpo_id_t *dpo) { - map_dpo_t *md; - - md = map_dpo_get(dpo->dpoi_index); - - md->md_locks++; } static void map_dpo_unlock (dpo_id_t *dpo) { - map_dpo_t *md; - - md = map_dpo_get(dpo->dpoi_index); - - md->md_locks--; - - if (0 == md->md_locks) - { - pool_put(map_dpo_pool, md); - } } const static dpo_vft_t md_vft = { diff --git a/src/vnet/map/map_dpo.h b/src/vnet/map/map_dpo.h index be510dba..63bf4787 100644 --- a/src/vnet/map/map_dpo.h +++ b/src/vnet/map/map_dpo.h @@ -22,23 +22,6 @@ /** * A representation of a MAP DPO */ -typedef struct map_dpo_t -{ - /** - * The dat-plane protocol - */ - dpo_proto_t md_proto; - - /** - * the MAP domain index - */ - u32 md_domain; - - /** - * Number of locks/users of the label - */ - u16 md_locks; -} map_dpo_t; extern void map_dpo_create (dpo_proto_t dproto, u32 domain_index, @@ -52,16 +35,9 @@ extern u8* format_map_dpo(u8 *s, va_list *args); /* * Encapsulation violation for fast data-path access */ -extern map_dpo_t *map_dpo_pool; extern dpo_type_t map_dpo_type; extern dpo_type_t map_t_dpo_type; -static inline map_dpo_t * -map_dpo_get (index_t index) -{ - return (pool_elt_at_index(map_dpo_pool, index)); -} - extern void map_dpo_module_init(void); #endif -- cgit 1.2.3-korg From 586afd762bfa149f5ca167bd5fd5a0cd59ce94fe Mon Sep 17 00:00:00 2001 From: Damjan Marion Date: Wed, 5 Apr 2017 19:18:20 +0200 Subject: Use thread local storage for thread index This patch deprecates stack-based thread identification, Also removes requirement that thread stacks are adjacent. Finally, possibly annoying for some folks, it renames all occurences of cpu_index and cpu_number with thread index. Using word "cpu" is misleading here as thread can be migrated ti different CPU, and also it is not related to linux cpu index. Change-Id: I68cdaf661e701d2336fc953dcb9978d10a70f7c1 Signed-off-by: Damjan Marion --- src/examples/srv6-sample-localsid/node.c | 4 +- src/plugins/dpdk/buffer.c | 2 +- src/plugins/dpdk/device/device.c | 8 +- src/plugins/dpdk/device/dpdk_priv.h | 8 +- src/plugins/dpdk/device/init.c | 2 +- src/plugins/dpdk/device/node.c | 32 +++--- src/plugins/dpdk/hqos/hqos.c | 16 +-- src/plugins/dpdk/ipsec/cli.c | 8 +- src/plugins/dpdk/ipsec/crypto_node.c | 4 +- src/plugins/dpdk/ipsec/esp.h | 4 +- src/plugins/dpdk/ipsec/esp_decrypt.c | 4 +- src/plugins/dpdk/ipsec/esp_encrypt.c | 5 +- src/plugins/dpdk/ipsec/ipsec.c | 2 +- src/plugins/dpdk/ipsec/ipsec.h | 4 +- src/plugins/dpdk/main.c | 2 +- src/plugins/flowperpkt/l2_node.c | 2 +- src/plugins/flowperpkt/node.c | 2 +- src/plugins/ioam/export-common/ioam_export.h | 6 +- .../ioam/ip6/ioam_cache_tunnel_select_node.c | 16 +-- src/plugins/ixge/ixge.c | 2 +- src/plugins/lb/lb.c | 8 +- src/plugins/lb/node.c | 22 ++-- src/plugins/lb/refcount.c | 8 +- src/plugins/lb/refcount.h | 4 +- src/plugins/memif/node.c | 35 +++--- src/plugins/snat/in2out.c | 110 +++++++++--------- src/plugins/snat/out2in.c | 102 ++++++++--------- src/plugins/snat/snat.h | 10 +- src/vlib/buffer.c | 6 +- src/vlib/buffer_funcs.h | 4 +- src/vlib/cli.c | 6 +- src/vlib/counter.h | 16 +-- src/vlib/error.c | 2 +- src/vlib/global_funcs.h | 2 +- src/vlib/main.c | 14 +-- src/vlib/main.h | 2 +- src/vlib/node.c | 2 +- src/vlib/node.h | 6 +- src/vlib/node_funcs.h | 8 +- src/vlib/threads.c | 69 ++++------- src/vlib/threads.h | 21 ++-- src/vlib/unix/cj.c | 7 +- src/vlib/unix/cj.h | 2 +- src/vlib/unix/main.c | 43 +++---- src/vnet/adj/adj_l2.c | 4 +- src/vnet/adj/adj_midchain.c | 8 +- src/vnet/adj/adj_nsh.c | 4 +- src/vnet/classify/vnet_classify.c | 16 +-- src/vnet/cop/ip4_whitelist.c | 8 +- src/vnet/cop/ip6_whitelist.c | 8 +- src/vnet/devices/af_packet/node.c | 20 ++-- src/vnet/devices/devices.c | 61 +++++----- src/vnet/devices/devices.h | 18 +-- src/vnet/devices/netmap/node.c | 24 ++-- src/vnet/devices/ssvm/node.c | 6 +- src/vnet/devices/virtio/vhost-user.c | 127 +++++++++++---------- src/vnet/dpo/lookup_dpo.c | 20 ++-- src/vnet/dpo/replicate_dpo.c | 12 +- src/vnet/ethernet/arp.c | 2 +- src/vnet/ethernet/interface.c | 7 +- src/vnet/ethernet/node.c | 14 +-- src/vnet/gre/node.c | 8 +- src/vnet/interface.h | 2 +- src/vnet/interface_output.c | 53 ++++----- src/vnet/ip/ip4_forward.c | 34 +++--- src/vnet/ip/ip4_input.c | 8 +- src/vnet/ip/ip6_forward.c | 24 ++-- src/vnet/ip/ip6_input.c | 8 +- src/vnet/ip/ip6_neighbor.c | 4 +- src/vnet/ipsec/esp.h | 8 +- src/vnet/ipsec/esp_decrypt.c | 13 ++- src/vnet/ipsec/esp_encrypt.c | 13 ++- src/vnet/ipsec/ikev2.c | 64 ++++++----- src/vnet/ipsec/ipsec.h | 12 +- src/vnet/ipsec/ipsec_if.c | 2 +- src/vnet/l2/l2_bvi.h | 2 +- src/vnet/l2/l2_input.c | 14 +-- src/vnet/l2/l2_output.c | 6 +- src/vnet/l2tp/decap.c | 2 +- src/vnet/l2tp/encap.c | 2 +- src/vnet/l2tp/l2tp.c | 6 +- src/vnet/lisp-gpe/decap.c | 16 +-- src/vnet/lldp/lldp_input.c | 2 +- src/vnet/map/ip4_map.c | 14 +-- src/vnet/map/ip4_map_t.c | 12 +- src/vnet/map/ip6_map.c | 19 +-- src/vnet/map/ip6_map_t.c | 12 +- src/vnet/mpls/mpls_input.c | 8 +- src/vnet/mpls/mpls_lookup.c | 20 ++-- src/vnet/mpls/mpls_output.c | 10 +- src/vnet/pg/input.c | 4 +- src/vnet/replication.c | 20 ++-- src/vnet/replication.h | 2 +- src/vnet/session/node.c | 2 +- src/vnet/sr/sr_localsid.c | 44 +++---- src/vnet/tcp/builtin_client.c | 2 +- src/vnet/tcp/tcp.c | 8 +- src/vnet/tcp/tcp_debug.h | 2 +- src/vnet/tcp/tcp_input.c | 10 +- src/vnet/tcp/tcp_output.c | 20 ++-- src/vnet/udp/udp_input.c | 2 +- src/vnet/unix/tapcli.c | 2 +- src/vnet/unix/tuntap.c | 4 +- src/vnet/vxlan-gpe/decap.c | 10 +- src/vnet/vxlan-gpe/encap.c | 12 +- src/vnet/vxlan/decap.c | 10 +- src/vnet/vxlan/encap.c | 12 +- src/vpp/stats/stats.c | 14 +-- src/vpp/stats/stats.h | 2 +- 109 files changed, 790 insertions(+), 791 deletions(-) (limited to 'src/vnet/map') diff --git a/src/examples/srv6-sample-localsid/node.c b/src/examples/srv6-sample-localsid/node.c index 7bae9cd7..e83e2352 100644 --- a/src/examples/srv6-sample-localsid/node.c +++ b/src/examples/srv6-sample-localsid/node.c @@ -114,7 +114,7 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -168,7 +168,7 @@ srv6_localsid_sample_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_fram /* This increments the SRv6 per LocalSID counters.*/ vlib_increment_combined_counter (((next0 == SRV6_SAMPLE_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : &(sm->sr_ls_valid_counters)), - cpu_index, + thread_index, ls0 - sm->localsids, 1, vlib_buffer_length_in_chain (vm, b0)); diff --git a/src/plugins/dpdk/buffer.c b/src/plugins/dpdk/buffer.c index 2765c292..c80b3fa8 100644 --- a/src/plugins/dpdk/buffer.c +++ b/src/plugins/dpdk/buffer.c @@ -132,7 +132,7 @@ dpdk_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index) u32 merge_index; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); f = vlib_buffer_get_free_list (vm, free_list_index); diff --git a/src/plugins/dpdk/device/device.c b/src/plugins/dpdk/device/device.c index 50b26689..91661246 100644 --- a/src/plugins/dpdk/device/device.c +++ b/src/plugins/dpdk/device/device.c @@ -243,7 +243,7 @@ static_always_inline ASSERT (ring->tx_tail == 0); n_retry = 16; - queue_id = vm->cpu_index; + queue_id = vm->thread_index; do { @@ -266,7 +266,7 @@ static_always_inline { /* no wrap, transmit in one burst */ dpdk_device_hqos_per_worker_thread_t *hqos = - &xd->hqos_wt[vm->cpu_index]; + &xd->hqos_wt[vm->thread_index]; ASSERT (hqos->swq != NULL); @@ -332,7 +332,7 @@ dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp) { dpdk_main_t *dm = &dpdk_main; - u32 my_cpu = vm->cpu_index; + u32 my_cpu = vm->thread_index; struct rte_mbuf *mb_new; if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0) @@ -376,7 +376,7 @@ dpdk_interface_tx (vlib_main_t * vm, tx_ring_hdr_t *ring; u32 n_on_ring; - my_cpu = vm->cpu_index; + my_cpu = vm->thread_index; queue_id = my_cpu; diff --git a/src/plugins/dpdk/device/dpdk_priv.h b/src/plugins/dpdk/device/dpdk_priv.h index dd40ff48..52b4ca4b 100644 --- a/src/plugins/dpdk/device/dpdk_priv.h +++ b/src/plugins/dpdk/device/dpdk_priv.h @@ -79,7 +79,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) { vlib_simple_counter_main_t *cm; vnet_main_t *vnm = vnet_get_main (); - u32 my_cpu = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u64 rxerrors, last_rxerrors; /* only update counters for PMD interfaces */ @@ -96,7 +96,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_NO_BUF); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, xd->stats.rx_nombuf - xd->last_stats.rx_nombuf); } @@ -107,7 +107,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_MISS); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, xd->stats.imissed - xd->last_stats.imissed); } @@ -119,7 +119,7 @@ dpdk_update_counters (dpdk_device_t * xd, f64 now) cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_RX_ERROR); - vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, xd->vlib_sw_if_index, rxerrors - last_rxerrors); } diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 538db6cb..7eaf8da7 100755 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -324,7 +324,7 @@ dpdk_port_setup (dpdk_main_t * dm, dpdk_device_t * xd) int rv; int j; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) { diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c index e740fd18..b10e0fad 100644 --- a/src/plugins/dpdk/device/node.c +++ b/src/plugins/dpdk/device/node.c @@ -283,7 +283,7 @@ dpdk_buffer_init_from_template (void *d0, void *d1, void *d2, void *d3, */ static_always_inline u32 dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, - vlib_node_runtime_t * node, u32 cpu_index, u16 queue_id, + vlib_node_runtime_t * node, u32 thread_index, u16 queue_id, int maybe_multiseg) { u32 n_buffers; @@ -294,7 +294,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, uword n_rx_bytes = 0; u32 n_trace, trace_cnt __attribute__ ((unused)); vlib_buffer_free_list_t *fl; - vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, cpu_index); + vlib_buffer_t *bt = vec_elt_at_index (dm->buffer_templates, thread_index); if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0) return 0; @@ -306,7 +306,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, return 0; } - vec_reset_length (xd->d_trace_buffers[cpu_index]); + vec_reset_length (xd->d_trace_buffers[thread_index]); trace_cnt = n_trace = vlib_get_trace_count (vm, node); if (n_trace > 0) @@ -318,7 +318,7 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, { struct rte_mbuf *mb = xd->rx_vectors[queue_id][mb_index++]; vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb); - vec_add1 (xd->d_trace_buffers[cpu_index], + vec_add1 (xd->d_trace_buffers[thread_index], vlib_get_buffer_index (vm, b)); } } @@ -546,20 +546,22 @@ dpdk_device_input (dpdk_main_t * dm, dpdk_device_t * xd, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[cpu_index]) > 0)) + if (PREDICT_FALSE (vec_len (xd->d_trace_buffers[thread_index]) > 0)) { - dpdk_rx_trace (dm, node, xd, queue_id, xd->d_trace_buffers[cpu_index], - vec_len (xd->d_trace_buffers[cpu_index])); - vlib_set_trace_count (vm, node, n_trace - - vec_len (xd->d_trace_buffers[cpu_index])); + dpdk_rx_trace (dm, node, xd, queue_id, + xd->d_trace_buffers[thread_index], + vec_len (xd->d_trace_buffers[thread_index])); + vlib_set_trace_count (vm, node, + n_trace - + vec_len (xd->d_trace_buffers[thread_index])); } vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes); + thread_index, xd->vlib_sw_if_index, mb_index, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, mb_index); + vnet_device_increment_rx_packets (thread_index, mb_index); return mb_index; } @@ -630,19 +632,19 @@ dpdk_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) dpdk_device_t *xd; uword n_rx_packets = 0; dpdk_device_and_queue_t *dq; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* * Poll all devices on this cpu for input/interrupts. */ /* *INDENT-OFF* */ - vec_foreach (dq, dm->devices_by_cpu[cpu_index]) + vec_foreach (dq, dm->devices_by_cpu[thread_index]) { xd = vec_elt_at_index(dm->devices, dq->device); if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG) - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, /* maybe_multiseg */ 1); + n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 1); else - n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, /* maybe_multiseg */ 0); + n_rx_packets += dpdk_device_input (dm, xd, node, thread_index, dq->queue_id, /* maybe_multiseg */ 0); } /* *INDENT-ON* */ diff --git a/src/plugins/dpdk/hqos/hqos.c b/src/plugins/dpdk/hqos/hqos.c index a288fca7..8b251beb 100644 --- a/src/plugins/dpdk/hqos/hqos.c +++ b/src/plugins/dpdk/hqos/hqos.c @@ -397,7 +397,7 @@ static_always_inline void dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) { dpdk_main_t *dm = &dpdk_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; u32 dev_pos; dev_pos = 0; @@ -405,12 +405,12 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) { vlib_worker_thread_barrier_check (); - u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]); + u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]); if (dev_pos >= n_devs) dev_pos = 0; dpdk_device_and_queue_t *dq = - vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos); + vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device); dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht; @@ -479,7 +479,7 @@ static_always_inline void dpdk_hqos_thread_internal (vlib_main_t * vm) { dpdk_main_t *dm = &dpdk_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; u32 dev_pos; dev_pos = 0; @@ -487,7 +487,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) { vlib_worker_thread_barrier_check (); - u32 n_devs = vec_len (dm->devices_by_hqos_cpu[cpu_index]); + u32 n_devs = vec_len (dm->devices_by_hqos_cpu[thread_index]); if (PREDICT_FALSE (n_devs == 0)) { dev_pos = 0; @@ -497,7 +497,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) dev_pos = 0; dpdk_device_and_queue_t *dq = - vec_elt_at_index (dm->devices_by_hqos_cpu[cpu_index], dev_pos); + vec_elt_at_index (dm->devices_by_hqos_cpu[thread_index], dev_pos); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dq->device); dpdk_device_hqos_per_hqos_thread_t *hqos = xd->hqos_ht; @@ -586,7 +586,7 @@ dpdk_hqos_thread (vlib_worker_thread_t * w) vm = vlib_get_main (); - ASSERT (vm->cpu_index == os_get_cpu_number ()); + ASSERT (vm->thread_index == vlib_get_thread_index ()); clib_time_init (&vm->clib_time); clib_mem_set_heap (w->thread_mheap); @@ -595,7 +595,7 @@ dpdk_hqos_thread (vlib_worker_thread_t * w) while (tm->worker_thread_release == 0) vlib_worker_thread_barrier_check (); - if (vec_len (dm->devices_by_hqos_cpu[vm->cpu_index]) == 0) + if (vec_len (dm->devices_by_hqos_cpu[vm->thread_index]) == 0) return clib_error ("current I/O TX thread does not have any devices assigned to it"); diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c index cd0a6037..3ae8c9b8 100644 --- a/src/plugins/dpdk/ipsec/cli.c +++ b/src/plugins/dpdk/ipsec/cli.c @@ -42,8 +42,8 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) for (i = 0; i < tm->n_vlib_mains; i++) { uword key, data; - u32 cpu_index = vlib_mains[i]->cpu_index; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + u32 thread_index = vlib_mains[i]->thread_index; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; u8 *s = 0; if (skip_master) @@ -57,7 +57,7 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) i32 last_cdev = -1; crypto_qp_data_t *qpd; - s = format (s, "%u\t", cpu_index); + s = format (s, "%u\t", thread_index); /* *INDENT-OFF* */ vec_foreach (qpd, cwm->qp_data) @@ -95,7 +95,7 @@ dpdk_ipsec_show_mapping (vlib_main_t * vm, u16 detail_display) cap.sym.auth.algo = p_key->auth_algo; check_algo_is_supported (&cap, auth_str); vlib_cli_output (vm, "%u\t%10s\t%15s\t%3s\t%u\t%u\n", - vlib_mains[i]->cpu_index, cipher_str, auth_str, + vlib_mains[i]->thread_index, cipher_str, auth_str, p_key->is_outbound ? "out" : "in", cwm->qp_data[data].dev_id, cwm->qp_data[data].qp_id); diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/src/plugins/dpdk/ipsec/crypto_node.c index dc3452b2..a3c45902 100644 --- a/src/plugins/dpdk/ipsec/crypto_node.c +++ b/src/plugins/dpdk/ipsec/crypto_node.c @@ -171,9 +171,9 @@ static uword dpdk_crypto_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; crypto_qp_data_t *qpd; u32 n_deq = 0; diff --git a/src/plugins/dpdk/ipsec/esp.h b/src/plugins/dpdk/ipsec/esp.h index 320295b1..56f0c756 100644 --- a/src/plugins/dpdk/ipsec/esp.h +++ b/src/plugins/dpdk/ipsec/esp.h @@ -170,9 +170,9 @@ static_always_inline int create_sym_sess (ipsec_sa_t * sa, crypto_sa_session_t * sa_sess, u8 is_outbound) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; struct rte_crypto_sym_xform cipher_xform = { 0 }; struct rte_crypto_sym_xform auth_xform = { 0 }; struct rte_crypto_sym_xform *xfs; diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c index 286e03f8..bab76e3b 100644 --- a/src/plugins/dpdk/ipsec/esp_decrypt.c +++ b/src/plugins/dpdk/ipsec/esp_decrypt.c @@ -88,7 +88,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); dpdk_crypto_main_t * dcm = &dpdk_crypto_main; dpdk_esp_main_t * em = &dpdk_esp_main; u32 i; @@ -104,7 +104,7 @@ dpdk_esp_decrypt_node_fn (vlib_main_t * vm, return n_left_from; } - crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, cpu_index); + crypto_worker_main_t *cwm = vec_elt_at_index(dcm->workers_main, thread_index); u32 n_qps = vec_len(cwm->qp_data); struct rte_crypto_op ** cops_to_enq[n_qps]; u32 n_cop_qp[n_qps], * bi_to_enq[n_qps]; diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c index 5b03de73..f996d7df 100644 --- a/src/plugins/dpdk/ipsec/esp_encrypt.c +++ b/src/plugins/dpdk/ipsec/esp_encrypt.c @@ -93,7 +93,7 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, { u32 n_left_from, *from, *to_next, next_index; ipsec_main_t *im = &ipsec_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); dpdk_crypto_main_t *dcm = &dpdk_crypto_main; dpdk_esp_main_t *em = &dpdk_esp_main; u32 i; @@ -111,7 +111,8 @@ dpdk_esp_encrypt_node_fn (vlib_main_t * vm, return n_left_from; } - crypto_worker_main_t *cwm = vec_elt_at_index (dcm->workers_main, cpu_index); + crypto_worker_main_t *cwm = + vec_elt_at_index (dcm->workers_main, thread_index); u32 n_qps = vec_len (cwm->qp_data); struct rte_crypto_op **cops_to_enq[n_qps]; u32 n_cop_qp[n_qps], *bi_to_enq[n_qps]; diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c index b0aaaaec..5d8f4fba 100644 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ b/src/plugins/dpdk/ipsec/ipsec.c @@ -289,7 +289,7 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, if (!map) { clib_warning ("unable to create hash table for worker %u", - vlib_mains[i]->cpu_index); + vlib_mains[i]->thread_index); goto error; } cwm->algo_qp_map = map; diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h index 28bffc80..f0f793c0 100644 --- a/src/plugins/dpdk/ipsec/ipsec.h +++ b/src/plugins/dpdk/ipsec/ipsec.h @@ -95,8 +95,8 @@ static_always_inline void crypto_alloc_cops () { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - u32 cpu_index = os_get_cpu_number (); - crypto_worker_main_t *cwm = &dcm->workers_main[cpu_index]; + u32 thread_index = vlib_get_thread_index (); + crypto_worker_main_t *cwm = &dcm->workers_main[thread_index]; unsigned socket_id = rte_socket_id (); crypto_qp_data_t *qpd; diff --git a/src/plugins/dpdk/main.c b/src/plugins/dpdk/main.c index 7ee2a785..942b8b2d 100644 --- a/src/plugins/dpdk/main.c +++ b/src/plugins/dpdk/main.c @@ -39,7 +39,7 @@ rte_delay_us_override (unsigned us) * thread then do not intercept. (Must not be called from an * independent pthread). */ - if (os_get_cpu_number () == 0) + if (vlib_get_thread_index () == 0) { /* * We're in the vlib main thread or a vlib process. Make sure diff --git a/src/plugins/flowperpkt/l2_node.c b/src/plugins/flowperpkt/l2_node.c index 1c2f681e..fdaf81d1 100644 --- a/src/plugins/flowperpkt/l2_node.c +++ b/src/plugins/flowperpkt/l2_node.c @@ -102,7 +102,7 @@ add_to_flow_record_l2 (vlib_main_t * vm, u8 * src_mac, u8 * dst_mac, u16 ethertype, u64 timestamp, u16 length, int do_flush) { - u32 my_cpu_number = vm->cpu_index; + u32 my_cpu_number = vm->thread_index; flow_report_main_t *frm = &flow_report_main; ip4_header_t *ip; udp_header_t *udp; diff --git a/src/plugins/flowperpkt/node.c b/src/plugins/flowperpkt/node.c index f77f087d..0277682d 100644 --- a/src/plugins/flowperpkt/node.c +++ b/src/plugins/flowperpkt/node.c @@ -101,7 +101,7 @@ add_to_flow_record_ipv4 (vlib_main_t * vm, u32 src_address, u32 dst_address, u8 tos, u64 timestamp, u16 length, int do_flush) { - u32 my_cpu_number = vm->cpu_index; + u32 my_cpu_number = vm->thread_index; flow_report_main_t *frm = &flow_report_main; ip4_header_t *ip; udp_header_t *udp; diff --git a/src/plugins/ioam/export-common/ioam_export.h b/src/plugins/ioam/export-common/ioam_export.h index 2bf3fd54..9de0d13b 100644 --- a/src/plugins/ioam/export-common/ioam_export.h +++ b/src/plugins/ioam/export-common/ioam_export.h @@ -477,8 +477,8 @@ do { \ from = vlib_frame_vector_args (F); \ n_left_from = (F)->n_vectors; \ next_index = (N)->cached_next_index; \ - while (__sync_lock_test_and_set ((EM)->lockp[(VM)->cpu_index], 1)); \ - my_buf = ioam_export_get_my_buffer (EM, (VM)->cpu_index); \ + while (__sync_lock_test_and_set ((EM)->lockp[(VM)->thread_index], 1)); \ + my_buf = ioam_export_get_my_buffer (EM, (VM)->thread_index); \ my_buf->touched_at = vlib_time_now (VM); \ while (n_left_from > 0) \ { \ @@ -620,7 +620,7 @@ do { \ } \ vlib_node_increment_counter (VM, export_node.index, \ EXPORT_ERROR_RECORDED, pkts_recorded); \ - *(EM)->lockp[(VM)->cpu_index] = 0; \ + *(EM)->lockp[(VM)->thread_index] = 0; \ } while(0) #endif /* __included_ioam_export_h__ */ diff --git a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c index a56dc040..0cf742c9 100644 --- a/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c +++ b/src/plugins/ioam/ip6/ioam_cache_tunnel_select_node.c @@ -396,7 +396,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp0->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index0)) + vm->thread_index, &pool_index0)) { cache_ts_added++; } @@ -419,7 +419,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index0; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -455,7 +455,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp1->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index1)) + vm->thread_index, &pool_index1)) { cache_ts_added++; } @@ -479,7 +479,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh1 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index1; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -562,7 +562,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, clib_net_to_host_u32 (tcp0->seq_number) + 1, no_of_responses, now, - vm->cpu_index, &pool_index0)) + vm->thread_index, &pool_index0)) { cache_ts_added++; } @@ -585,7 +585,7 @@ ip6_reset_ts_hbh_node_fn (vlib_main_t * vm, e2e = (ioam_e2e_cache_option_t *) ((u8 *) hbh0 + cm->rewrite_pool_index_offset); - e2e->pool_id = (u8) vm->cpu_index; + e2e->pool_id = (u8) vm->thread_index; e2e->pool_index = pool_index0; ioam_e2e_id_rewrite_handler ((ioam_e2e_id_option_t *) ((u8 *) e2e + @@ -701,7 +701,7 @@ expired_cache_ts_timer_callback (u32 * expired_timers) ioam_cache_main_t *cm = &ioam_cache_main; int i; u32 pool_index; - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 count = 0; for (i = 0; i < vec_len (expired_timers); i++) @@ -724,7 +724,7 @@ ioam_cache_ts_timer_tick_node_fn (vlib_main_t * vm, vlib_frame_t * f) { ioam_cache_main_t *cm = &ioam_cache_main; - u32 my_thread_index = os_get_cpu_number (); + u32 my_thread_index = vlib_get_thread_index (); struct timespec ts, tsrem; tw_timer_expire_timers_16t_2w_512sl (&cm->timer_wheels[my_thread_index], diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index f3c5cc09..08f5b692 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -1887,7 +1887,7 @@ done: vlib_increment_combined_counter (vnet_main. interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - 0 /* cpu_index */ , + 0 /* thread_index */ , xd->vlib_sw_if_index, n_packets, dq->rx.n_bytes); diff --git a/src/plugins/lb/lb.c b/src/plugins/lb/lb.c index add81236..addc2a42 100644 --- a/src/plugins/lb/lb.c +++ b/src/plugins/lb/lb.c @@ -63,11 +63,11 @@ u8 *format_lb_main (u8 * s, va_list * args) s = format(s, " #vips: %u\n", pool_elts(lbm->vips)); s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1); - u32 cpu_index; - for(cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++ ) { - lb_hash_t *h = lbm->per_cpu[cpu_index].sticky_ht; + u32 thread_index; + for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) { + lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht; if (h) { - s = format(s, "core %d\n", cpu_index); + s = format(s, "core %d\n", thread_index); s = format(s, " timeout: %ds\n", h->timeout); s = format(s, " usage: %d / %d\n", lb_hash_elts(h, lb_hash_time_now(vlib_get_main())), lb_hash_size(h)); } diff --git a/src/plugins/lb/node.c b/src/plugins/lb/node.c index 8b763c53..3171148b 100644 --- a/src/plugins/lb/node.c +++ b/src/plugins/lb/node.c @@ -60,10 +60,10 @@ format_lb_trace (u8 * s, va_list * args) return s; } -lb_hash_t *lb_get_sticky_table(u32 cpu_index) +lb_hash_t *lb_get_sticky_table(u32 thread_index) { lb_main_t *lbm = &lb_main; - lb_hash_t *sticky_ht = lbm->per_cpu[cpu_index].sticky_ht; + lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht; //Check if size changed if (PREDICT_FALSE(sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht)))) { @@ -71,8 +71,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index) lb_hash_bucket_t *b; u32 i; lb_hash_foreach_entry(sticky_ht, b, i) { - vlib_refcount_add(&lbm->as_refcount, cpu_index, b->value[i], -1); - vlib_refcount_add(&lbm->as_refcount, cpu_index, 0, 1); + vlib_refcount_add(&lbm->as_refcount, thread_index, b->value[i], -1); + vlib_refcount_add(&lbm->as_refcount, thread_index, 0, 1); } lb_hash_free(sticky_ht); @@ -81,8 +81,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index) //Create if necessary if (PREDICT_FALSE(sticky_ht == NULL)) { - lbm->per_cpu[cpu_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout); - sticky_ht = lbm->per_cpu[cpu_index].sticky_ht; + lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout); + sticky_ht = lbm->per_cpu[thread_index].sticky_ht; clib_warning("Regenerated sticky table %p", sticky_ht); } @@ -153,10 +153,10 @@ lb_node_fn (vlib_main_t * vm, { lb_main_t *lbm = &lb_main; u32 n_left_from, *from, next_index, *to_next, n_left_to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 lb_time = lb_hash_time_now(vm); - lb_hash_t *sticky_ht = lb_get_sticky_table(cpu_index); + lb_hash_t *sticky_ht = lb_get_sticky_table(thread_index); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -240,9 +240,9 @@ lb_node_fn (vlib_main_t * vm, //Configuration may be changed, vectors resized, etc... //Dereference previously used - vlib_refcount_add(&lbm->as_refcount, cpu_index, + vlib_refcount_add(&lbm->as_refcount, thread_index, lb_hash_available_value(sticky_ht, hash0, available_index0), -1); - vlib_refcount_add(&lbm->as_refcount, cpu_index, + vlib_refcount_add(&lbm->as_refcount, thread_index, asindex0, 1); //Add sticky entry @@ -260,7 +260,7 @@ lb_node_fn (vlib_main_t * vm, } vlib_increment_simple_counter(&lbm->vip_counters[counter], - cpu_index, + thread_index, vnet_buffer (p0)->ip.adj_index[VLIB_TX], 1); diff --git a/src/plugins/lb/refcount.c b/src/plugins/lb/refcount.c index 22415c88..6f01ab5a 100644 --- a/src/plugins/lb/refcount.c +++ b/src/plugins/lb/refcount.c @@ -31,10 +31,10 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index) { u64 count = 0; vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 cpu_index; - for (cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++) { - if (r->per_cpu[cpu_index].length > index) - count += r->per_cpu[cpu_index].counters[index]; + u32 thread_index; + for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) { + if (r->per_cpu[thread_index].length > index) + count += r->per_cpu[thread_index].counters[index]; } return count; } diff --git a/src/plugins/lb/refcount.h b/src/plugins/lb/refcount.h index 8c26e7be..dcfcb3fe 100644 --- a/src/plugins/lb/refcount.h +++ b/src/plugins/lb/refcount.h @@ -45,9 +45,9 @@ typedef struct { void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); static_always_inline -void vlib_refcount_add(vlib_refcount_t *r, u32 cpu_index, u32 counter_index, i32 v) +void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v) { - vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[cpu_index]; + vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index]; if (PREDICT_FALSE(counter_index >= per_cpu->length)) __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2)); diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c index 659d5dfb..cee1f3d1 100644 --- a/src/plugins/memif/node.c +++ b/src/plugins/memif/node.c @@ -94,7 +94,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_rx_bytes = 0; u32 *to_next = 0; u32 n_free_bufs; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 bi0, bi1; vlib_buffer_t *b0, *b1; u16 ring_size = 1 << mif->log2_ring_size; @@ -105,14 +105,15 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (mif->per_interface_next_index != ~0) next_index = mif->per_interface_next_index; - n_free_bufs = vec_len (nm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (nm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < ring_size)) { - vec_validate (nm->rx_buffers[cpu_index], ring_size + n_free_bufs - 1); + vec_validate (nm->rx_buffers[thread_index], + ring_size + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs], ring_size); - _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs; } head = ring->head; @@ -158,15 +159,15 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_CACHE_LINE_BYTES, LOAD); } /* get empty buffer */ - u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1; - bi0 = nm->rx_buffers[cpu_index][last_buf]; - bi1 = nm->rx_buffers[cpu_index][last_buf - 1]; - _vec_len (nm->rx_buffers[cpu_index]) -= 2; + u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1; + bi0 = nm->rx_buffers[thread_index][last_buf]; + bi1 = nm->rx_buffers[thread_index][last_buf - 1]; + _vec_len (nm->rx_buffers[thread_index]) -= 2; if (last_buf > 4) { - memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 2]); - memif_prefetch (vm, nm->rx_buffers[cpu_index][last_buf - 3]); + memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]); + memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]); } /* enqueue buffer */ @@ -256,9 +257,9 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, while (num_slots && n_left_to_next) { /* get empty buffer */ - u32 last_buf = vec_len (nm->rx_buffers[cpu_index]) - 1; - bi0 = nm->rx_buffers[cpu_index][last_buf]; - _vec_len (nm->rx_buffers[cpu_index]) = last_buf; + u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1; + bi0 = nm->rx_buffers[thread_index][last_buf]; + _vec_len (nm->rx_buffers[thread_index]) = last_buf; /* enqueue buffer */ to_next[0] = bi0; @@ -315,7 +316,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ring->tail = head; vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_RX, cpu_index, + + VNET_INTERFACE_COUNTER_RX, thread_index, mif->hw_if_index, n_rx_packets, n_rx_bytes); @@ -327,7 +328,7 @@ memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); memif_main_t *nm = &memif_main; memif_if_t *mif; @@ -337,7 +338,7 @@ memif_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (mif->flags & MEMIF_IF_FLAG_ADMIN_UP && mif->flags & MEMIF_IF_FLAG_CONNECTED && (mif->if_index % nm->input_cpu_count) == - (cpu_index - nm->input_cpu_first_index)) + (thread_index - nm->input_cpu_first_index)) { if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) n_rx_packets += diff --git a/src/plugins/snat/in2out.c b/src/plugins/snat/in2out.c index b4961365..e5ee965f 100644 --- a/src/plugins/snat/in2out.c +++ b/src/plugins/snat/in2out.c @@ -212,7 +212,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, snat_session_t ** sessionp, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index) + u32 thread_index) { snat_user_t *u; snat_user_key_t user_key; @@ -246,27 +246,27 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, if (clib_bihash_search_8_8 (&sm->user_hash, &kv0, &value0)) { /* no, make a new one */ - pool_get (sm->per_thread_data[cpu_index].users, u); + pool_get (sm->per_thread_data[thread_index].users, u); memset (u, 0, sizeof (*u)); u->addr = ip0->src_address; u->fib_index = rx_fib_index0; - pool_get (sm->per_thread_data[cpu_index].list_pool, per_user_list_head_elt); + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_list_head_elt); u->sessions_per_user_list_head_index = per_user_list_head_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); - kv0.value = u - sm->per_thread_data[cpu_index].users; + kv0.value = u - sm->per_thread_data[thread_index].users; /* add user */ clib_bihash_add_del_8_8 (&sm->user_hash, &kv0, 1 /* is_add */); } else { - u = pool_elt_at_index (sm->per_thread_data[cpu_index].users, + u = pool_elt_at_index (sm->per_thread_data[thread_index].users, value0.value); } @@ -276,25 +276,25 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, /* Remove the oldest dynamic translation */ do { oldest_per_user_translation_list_index = - clib_dlist_remove_head (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove_head (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); ASSERT (oldest_per_user_translation_list_index != ~0); /* add it back to the end of the LRU list */ - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index, oldest_per_user_translation_list_index); /* Get the list element */ oldest_per_user_translation_list_elt = - pool_elt_at_index (sm->per_thread_data[cpu_index].list_pool, + pool_elt_at_index (sm->per_thread_data[thread_index].list_pool, oldest_per_user_translation_list_index); /* Get the session index from the list element */ session_index = oldest_per_user_translation_list_elt->value; /* Get the session */ - s = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, session_index); } while (snat_is_session_static (s)); @@ -346,7 +346,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, } /* Create a new session */ - pool_get (sm->per_thread_data[cpu_index].sessions, s); + pool_get (sm->per_thread_data[thread_index].sessions, s); memset (s, 0, sizeof (*s)); s->outside_address_index = address_index; @@ -362,22 +362,22 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, } /* Create list elts */ - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt); - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); per_user_translation_list_elt->value = - s - sm->per_thread_data[cpu_index].sessions; + s - sm->per_thread_data[thread_index].sessions; s->per_user_index = per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s->per_user_list_head_index, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); } s->in2out = *key0; @@ -388,12 +388,12 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, /* Add to translation hashes */ kv0.key = s->in2out.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 1 /* is_add */)) clib_warning ("in2out key add failed"); kv0.key = s->out2in.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 1 /* is_add */)) clib_warning ("out2in key add failed"); @@ -403,7 +403,7 @@ static u32 slow_path (snat_main_t *sm, vlib_buffer_t *b0, worker_by_out_key.port = s->out2in.port; worker_by_out_key.fib_index = s->out2in.fib_index; kv0.key = worker_by_out_key.as_u64; - kv0.value = cpu_index; + kv0.value = thread_index; clib_bihash_add_del_8_8 (&sm->worker_by_out, &kv0, 1); /* log NAT event */ @@ -465,7 +465,7 @@ snat_in2out_error_t icmp_get_key(icmp46_header_t *icmp0, * * @param[in,out] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -473,7 +473,7 @@ snat_in2out_error_t icmp_get_key(icmp46_header_t *icmp0, * @param d optional parameter */ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -524,13 +524,13 @@ u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, } next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto out; } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); out: @@ -548,7 +548,7 @@ out: * * @param[in] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -556,7 +556,7 @@ out: * @param d optional parameter */ u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -624,7 +624,7 @@ static inline u32 icmp_in2out (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index, + u32 thread_index, void *d) { snat_session_key_t key0, sm0; @@ -641,7 +641,7 @@ static inline u32 icmp_in2out (snat_main_t *sm, echo0 = (icmp_echo_header_t *)(icmp0+1); - next0_tmp = sm->icmp_match_in2out_cb(sm, node, cpu_index, b0, + next0_tmp = sm->icmp_match_in2out_cb(sm, node, thread_index, b0, &key0, &sm0, &dont_translate, d); if (next0_tmp != ~0) next0 = next0_tmp; @@ -847,11 +847,11 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm, vlib_node_runtime_t * node, u32 next0, f64 now, - u32 cpu_index, + u32 thread_index, snat_session_t ** p_s0) { next0 = icmp_in2out(sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, cpu_index, p_s0); + next0, thread_index, p_s0); snat_session_t * s0 = *p_s0; if (PREDICT_TRUE(next0 != SNAT_IN2OUT_NEXT_DROP && s0)) { @@ -862,9 +862,9 @@ static inline u32 icmp_in2out_slow_path (snat_main_t *sm, /* Per-user LRU list maintenance for dynamic translations */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -884,7 +884,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, snat_runtime_t * rt = (snat_runtime_t *)node->runtime_data; f64 now = vlib_time_now (vm); u32 stats_node_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); stats_node_index = is_slow_path ? snat_in2out_slowpath_node.index : snat_in2out_node.index; @@ -977,7 +977,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next0 = icmp_in2out_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, - node, next0, now, cpu_index, &s0); + node, next0, now, thread_index, &s0); goto trace00; } } @@ -1006,7 +1006,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace00; next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace00; } @@ -1017,7 +1017,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->src_address.as_u32; @@ -1063,9 +1063,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1081,7 +1081,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -1117,7 +1117,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next1 = icmp_in2out_slow_path (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, - next1, now, cpu_index, &s1); + next1, now, thread_index, &s1); goto trace01; } } @@ -1146,7 +1146,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace01; next1 = slow_path (sm, b1, ip1, rx_fib_index1, &key1, - &s1, node, next1, cpu_index); + &s1, node, next1, thread_index); if (PREDICT_FALSE (next1 == SNAT_IN2OUT_NEXT_DROP)) goto trace01; } @@ -1157,7 +1157,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s1 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s1 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value1.value); old_addr1 = ip1->src_address.as_u32; @@ -1203,9 +1203,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s1)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s1->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s1->per_user_list_head_index, s1->per_user_index); } @@ -1220,7 +1220,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next1; t->session_index = ~0; if (s1) - t->session_index = s1 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s1 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next1 != SNAT_IN2OUT_NEXT_DROP; @@ -1292,7 +1292,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, { next0 = icmp_in2out_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace0; } } @@ -1321,7 +1321,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, goto trace0; next0 = slow_path (sm, b0, ip0, rx_fib_index0, &key0, - &s0, node, next0, cpu_index); + &s0, node, next0, thread_index); if (PREDICT_FALSE (next0 == SNAT_IN2OUT_NEXT_DROP)) goto trace0; @@ -1333,7 +1333,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->src_address.as_u32; @@ -1379,9 +1379,9 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1397,7 +1397,7 @@ snat_in2out_node_fn_inline (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_IN2OUT_NEXT_DROP; @@ -2010,7 +2010,7 @@ snat_in2out_worker_handoff_fn (vlib_main_t * vm, u32 n_left_to_next_worker = 0, *to_next_worker = 0; u32 next_worker_index = 0; u32 current_worker_index = ~0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ASSERT (vec_len (sm->workers)); @@ -2048,7 +2048,7 @@ snat_in2out_worker_handoff_fn (vlib_main_t * vm, next_worker_index = sm->worker_in2out_cb(ip0, rx_fib_index0); - if (PREDICT_FALSE (next_worker_index != cpu_index)) + if (PREDICT_FALSE (next_worker_index != thread_index)) { do_handoff = 1; diff --git a/src/plugins/snat/out2in.c b/src/plugins/snat/out2in.c index 656e42db..5d308d78 100644 --- a/src/plugins/snat/out2in.c +++ b/src/plugins/snat/out2in.c @@ -129,7 +129,7 @@ create_session_for_static_mapping (snat_main_t *sm, snat_session_key_t in2out, snat_session_key_t out2in, vlib_node_runtime_t * node, - u32 cpu_index) + u32 thread_index) { snat_user_t *u; snat_user_key_t user_key; @@ -146,36 +146,36 @@ create_session_for_static_mapping (snat_main_t *sm, if (clib_bihash_search_8_8 (&sm->user_hash, &kv0, &value0)) { /* no, make a new one */ - pool_get (sm->per_thread_data[cpu_index].users, u); + pool_get (sm->per_thread_data[thread_index].users, u); memset (u, 0, sizeof (*u)); u->addr = in2out.addr; u->fib_index = in2out.fib_index; - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_list_head_elt); u->sessions_per_user_list_head_index = per_user_list_head_elt - - sm->per_thread_data[cpu_index].list_pool; + sm->per_thread_data[thread_index].list_pool; - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, u->sessions_per_user_list_head_index); - kv0.value = u - sm->per_thread_data[cpu_index].users; + kv0.value = u - sm->per_thread_data[thread_index].users; /* add user */ clib_bihash_add_del_8_8 (&sm->user_hash, &kv0, 1 /* is_add */); /* add non-traslated packets worker lookup */ - kv0.value = cpu_index; + kv0.value = thread_index; clib_bihash_add_del_8_8 (&sm->worker_by_in, &kv0, 1); } else { - u = pool_elt_at_index (sm->per_thread_data[cpu_index].users, + u = pool_elt_at_index (sm->per_thread_data[thread_index].users, value0.value); } - pool_get (sm->per_thread_data[cpu_index].sessions, s); + pool_get (sm->per_thread_data[thread_index].sessions, s); memset (s, 0, sizeof (*s)); s->outside_address_index = ~0; @@ -183,22 +183,22 @@ create_session_for_static_mapping (snat_main_t *sm, u->nstaticsessions++; /* Create list elts */ - pool_get (sm->per_thread_data[cpu_index].list_pool, + pool_get (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt); - clib_dlist_init (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_init (sm->per_thread_data[thread_index].list_pool, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); per_user_translation_list_elt->value = - s - sm->per_thread_data[cpu_index].sessions; + s - sm->per_thread_data[thread_index].sessions; s->per_user_index = - per_user_translation_list_elt - sm->per_thread_data[cpu_index].list_pool; + per_user_translation_list_elt - sm->per_thread_data[thread_index].list_pool; s->per_user_list_head_index = u->sessions_per_user_list_head_index; - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s->per_user_list_head_index, per_user_translation_list_elt - - sm->per_thread_data[cpu_index].list_pool); + sm->per_thread_data[thread_index].list_pool); s->in2out = in2out; s->out2in = out2in; @@ -206,12 +206,12 @@ create_session_for_static_mapping (snat_main_t *sm, /* Add to translation hashes */ kv0.key = s->in2out.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->in2out, &kv0, 1 /* is_add */)) clib_warning ("in2out key add failed"); kv0.key = s->out2in.as_u64; - kv0.value = s - sm->per_thread_data[cpu_index].sessions; + kv0.value = s - sm->per_thread_data[thread_index].sessions; if (clib_bihash_add_del_8_8 (&sm->out2in, &kv0, 1 /* is_add */)) clib_warning ("out2in key add failed"); @@ -298,7 +298,7 @@ is_interface_addr(snat_main_t *sm, vlib_node_runtime_t *node, u32 sw_if_index0, * * @param[in,out] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -306,7 +306,7 @@ is_interface_addr(snat_main_t *sm, vlib_node_runtime_t *node, u32 sw_if_index0, * @param d optional parameter */ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -366,7 +366,7 @@ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, - node, cpu_index); + node, thread_index); if (!s0) { @@ -375,7 +375,7 @@ u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); out: @@ -393,7 +393,7 @@ out: * * @param[in] sm SNAT main * @param[in,out] node SNAT node runtime - * @param[in] cpu_index CPU index + * @param[in] thread_index thread index * @param[in,out] b0 buffer containing packet to be translated * @param[out] p_key address and port before NAT translation * @param[out] p_value address and port after NAT translation @@ -401,7 +401,7 @@ out: * @param d optional parameter */ u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d) @@ -460,7 +460,7 @@ static inline u32 icmp_out2in (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, - u32 cpu_index, + u32 thread_index, void *d) { snat_session_key_t key0, sm0; @@ -477,7 +477,7 @@ static inline u32 icmp_out2in (snat_main_t *sm, echo0 = (icmp_echo_header_t *)(icmp0+1); - next0_tmp = sm->icmp_match_out2in_cb(sm, node, cpu_index, b0, + next0_tmp = sm->icmp_match_out2in_cb(sm, node, thread_index, b0, &key0, &sm0, &dont_translate, d); if (next0_tmp != ~0) next0 = next0_tmp; @@ -589,11 +589,11 @@ static inline u32 icmp_out2in_slow_path (snat_main_t *sm, u32 rx_fib_index0, vlib_node_runtime_t * node, u32 next0, f64 now, - u32 cpu_index, + u32 thread_index, snat_session_t ** p_s0) { next0 = icmp_out2in(sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, cpu_index, p_s0); + next0, thread_index, p_s0); snat_session_t * s0 = *p_s0; if (PREDICT_TRUE(next0 != SNAT_OUT2IN_NEXT_DROP && s0)) { @@ -604,9 +604,9 @@ static inline u32 icmp_out2in_slow_path (snat_main_t *sm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -624,7 +624,7 @@ snat_out2in_node_fn (vlib_main_t * vm, u32 pkts_processed = 0; snat_main_t * sm = &snat_main; f64 now = vlib_time_now (vm); - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -712,7 +712,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next0 = icmp_out2in_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace0; } @@ -743,7 +743,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, node, - cpu_index); + thread_index); if (!s0) { b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -752,7 +752,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->dst_address.as_u32; @@ -796,9 +796,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -813,7 +813,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_OUT2IN_NEXT_DROP; @@ -847,7 +847,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next1 = icmp_out2in_slow_path (sm, b1, ip1, icmp1, sw_if_index1, rx_fib_index1, node, - next1, now, cpu_index, &s1); + next1, now, thread_index, &s1); goto trace1; } @@ -878,7 +878,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s1 = create_session_for_static_mapping(sm, b1, sm1, key1, node, - cpu_index); + thread_index); if (!s1) { b1->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -887,7 +887,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s1 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s1 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value1.value); old_addr1 = ip1->dst_address.as_u32; @@ -931,9 +931,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s1)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s1->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s1->per_user_list_head_index, s1->per_user_index); } @@ -948,7 +948,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next1; t->session_index = ~0; if (s1) - t->session_index = s1 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s1 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next1 != SNAT_OUT2IN_NEXT_DROP; @@ -1016,7 +1016,7 @@ snat_out2in_node_fn (vlib_main_t * vm, { next0 = icmp_out2in_slow_path (sm, b0, ip0, icmp0, sw_if_index0, rx_fib_index0, node, - next0, now, cpu_index, &s0); + next0, now, thread_index, &s0); goto trace00; } @@ -1048,7 +1048,7 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Create session initiated by host from external network */ s0 = create_session_for_static_mapping(sm, b0, sm0, key0, node, - cpu_index); + thread_index); if (!s0) { b0->error = node->errors[SNAT_OUT2IN_ERROR_NO_TRANSLATION]; @@ -1057,7 +1057,7 @@ snat_out2in_node_fn (vlib_main_t * vm, } } else - s0 = pool_elt_at_index (sm->per_thread_data[cpu_index].sessions, + s0 = pool_elt_at_index (sm->per_thread_data[thread_index].sessions, value0.value); old_addr0 = ip0->dst_address.as_u32; @@ -1101,9 +1101,9 @@ snat_out2in_node_fn (vlib_main_t * vm, /* Per-user LRU list maintenance for dynamic translation */ if (!snat_is_session_static (s0)) { - clib_dlist_remove (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_remove (sm->per_thread_data[thread_index].list_pool, s0->per_user_index); - clib_dlist_addtail (sm->per_thread_data[cpu_index].list_pool, + clib_dlist_addtail (sm->per_thread_data[thread_index].list_pool, s0->per_user_list_head_index, s0->per_user_index); } @@ -1118,7 +1118,7 @@ snat_out2in_node_fn (vlib_main_t * vm, t->next_index = next0; t->session_index = ~0; if (s0) - t->session_index = s0 - sm->per_thread_data[cpu_index].sessions; + t->session_index = s0 - sm->per_thread_data[thread_index].sessions; } pkts_processed += next0 != SNAT_OUT2IN_NEXT_DROP; @@ -1599,7 +1599,7 @@ snat_out2in_worker_handoff_fn (vlib_main_t * vm, u32 n_left_to_next_worker = 0, *to_next_worker = 0; u32 next_worker_index = 0; u32 current_worker_index = ~0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ASSERT (vec_len (sm->workers)); @@ -1637,7 +1637,7 @@ snat_out2in_worker_handoff_fn (vlib_main_t * vm, next_worker_index = sm->worker_out2in_cb(ip0, rx_fib_index0); - if (PREDICT_FALSE (next_worker_index != cpu_index)) + if (PREDICT_FALSE (next_worker_index != thread_index)) { do_handoff = 1; diff --git a/src/plugins/snat/snat.h b/src/plugins/snat/snat.h index 017825c0..f4e1c5c0 100644 --- a/src/plugins/snat/snat.h +++ b/src/plugins/snat/snat.h @@ -221,7 +221,7 @@ struct snat_main_s; typedef u32 snat_icmp_match_function_t (struct snat_main_s *sm, vlib_node_runtime_t *node, - u32 cpu_index, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, @@ -402,22 +402,22 @@ typedef struct { } tcp_udp_header_t; u32 icmp_match_in2out_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_in2out_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_out2in_fast(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); u32 icmp_match_out2in_slow(snat_main_t *sm, vlib_node_runtime_t *node, - u32 cpu_index, vlib_buffer_t *b0, + u32 thread_index, vlib_buffer_t *b0, snat_session_key_t *p_key, snat_session_key_t *p_value, u8 *p_dont_translate, void *d); diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c index a517a597..be3b41ef 100644 --- a/src/vlib/buffer.c +++ b/src/vlib/buffer.c @@ -299,7 +299,7 @@ vlib_buffer_validate_alloc_free (vlib_main_t * vm, if (CLIB_DEBUG == 0) return; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); /* smp disaster check */ if (vec_len (vlib_mains) > 1) @@ -355,7 +355,7 @@ vlib_buffer_create_free_list_helper (vlib_main_t * vm, vlib_buffer_free_list_t *f; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (!is_default && pool_elts (bm->buffer_free_list_pool) == 0) { @@ -474,7 +474,7 @@ vlib_buffer_delete_free_list_internal (vlib_main_t * vm, u32 free_list_index) u32 merge_index; int i; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); f = vlib_buffer_get_free_list (vm, free_list_index); diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index 394c336a..328660a3 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -209,7 +209,7 @@ always_inline vlib_buffer_known_state_t vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_main_t *bm = vm->buffer_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); uword *p = hash_get (bm->buffer_known_hash, buffer_index); return p ? p[0] : VLIB_BUFFER_UNKNOWN; @@ -221,7 +221,7 @@ vlib_buffer_set_known_state (vlib_main_t * vm, vlib_buffer_known_state_t state) { vlib_buffer_main_t *bm = vm->buffer_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); hash_set (bm->buffer_known_hash, buffer_index, state); } diff --git a/src/vlib/cli.c b/src/vlib/cli.c index f853f655..3cc95076 100644 --- a/src/vlib/cli.c +++ b/src/vlib/cli.c @@ -709,7 +709,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap->flags |= MHEAP_FLAG_VALIDATE; // Turn off small object cache because it delays detection of errors @@ -722,7 +722,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap->flags &= ~MHEAP_FLAG_VALIDATE; mheap->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE; @@ -733,7 +733,7 @@ test_heap_validate (vlib_main_t * vm, unformat_input_t * input, { /* *INDENT-OFF* */ foreach_vlib_main({ - heap = clib_per_cpu_mheaps[this_vlib_main->cpu_index]; + heap = clib_per_cpu_mheaps[this_vlib_main->thread_index]; mheap = mheap_header(heap); mheap_validate(heap); }); diff --git a/src/vlib/counter.h b/src/vlib/counter.h index 17a85217..60e2055d 100644 --- a/src/vlib/counter.h +++ b/src/vlib/counter.h @@ -70,17 +70,17 @@ u32 vlib_simple_counter_n_counters (const vlib_simple_counter_main_t * cm); /** Increment a simple counter @param cm - (vlib_simple_counter_main_t *) simple counter main pointer - @param cpu_index - (u32) the current cpu index + @param thread_index - (u32) the current cpu index @param index - (u32) index of the counter to increment @param increment - (u64) quantitiy to add to the counter */ always_inline void vlib_increment_simple_counter (vlib_simple_counter_main_t * cm, - u32 cpu_index, u32 index, u64 increment) + u32 thread_index, u32 index, u64 increment) { counter_t *my_counters; - my_counters = cm->counters[cpu_index]; + my_counters = cm->counters[thread_index]; my_counters[index] += increment; } @@ -201,7 +201,7 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); /** Increment a combined counter @param cm - (vlib_combined_counter_main_t *) comined counter main pointer - @param cpu_index - (u32) the current cpu index + @param thread_index - (u32) the current cpu index @param index - (u32) index of the counter to increment @param packet_increment - (u64) number of packets to add to the counter @param byte_increment - (u64) number of bytes to add to the counter @@ -209,13 +209,13 @@ void vlib_clear_combined_counters (vlib_combined_counter_main_t * cm); always_inline void vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, - u32 cpu_index, + u32 thread_index, u32 index, u64 n_packets, u64 n_bytes) { vlib_counter_t *my_counters; /* Use this CPU's counter array */ - my_counters = cm->counters[cpu_index]; + my_counters = cm->counters[thread_index]; my_counters[index].packets += n_packets; my_counters[index].bytes += n_bytes; @@ -224,14 +224,14 @@ vlib_increment_combined_counter (vlib_combined_counter_main_t * cm, /** Pre-fetch a per-thread combined counter for the given object index */ always_inline void vlib_prefetch_combined_counter (const vlib_combined_counter_main_t * cm, - u32 cpu_index, u32 index) + u32 thread_index, u32 index) { vlib_counter_t *cpu_counters; /* * This CPU's index is assumed to already be in cache */ - cpu_counters = cm->counters[cpu_index]; + cpu_counters = cm->counters[thread_index]; CLIB_PREFETCH (cpu_counters + index, CLIB_CACHE_LINE_BYTES, STORE); } diff --git a/src/vlib/error.c b/src/vlib/error.c index a2c23176..e4ed4ee3 100644 --- a/src/vlib/error.c +++ b/src/vlib/error.c @@ -149,7 +149,7 @@ vlib_register_errors (vlib_main_t * vm, vlib_node_t *n = vlib_get_node (vm, node_index); uword l; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); /* Free up any previous error strings. */ if (n->n_errors > 0) diff --git a/src/vlib/global_funcs.h b/src/vlib/global_funcs.h index f51ec381..9dd01fbf 100644 --- a/src/vlib/global_funcs.h +++ b/src/vlib/global_funcs.h @@ -23,7 +23,7 @@ always_inline vlib_main_t * vlib_get_main (void) { vlib_main_t *vm; - vm = vlib_mains[os_get_cpu_number ()]; + vm = vlib_mains[vlib_get_thread_index ()]; ASSERT (vm); return vm; } diff --git a/src/vlib/main.c b/src/vlib/main.c index b22203f0..422d3e26 100644 --- a/src/vlib/main.c +++ b/src/vlib/main.c @@ -136,18 +136,18 @@ vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); - f->cpu_index = vm->cpu_index; + f->thread_index = vm->thread_index; fi = vlib_frame_index_no_check (vm, f); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) { - u32 save_cpu_index = f->cpu_index; + u32 save_thread_index = f->thread_index; memset (f, 0xfe, n); - f->cpu_index = save_cpu_index; + f->thread_index = save_thread_index; } /* Insert magic number. */ @@ -517,7 +517,7 @@ vlib_put_next_frame (vlib_main_t * vm, * a dangling frame reference. Each thread has its own copy of * the next_frames vector. */ - if (0 && r->cpu_index != next_runtime->cpu_index) + if (0 && r->thread_index != next_runtime->thread_index) { nf->frame_index = ~0; nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED); @@ -866,7 +866,7 @@ vlib_elog_main_loop_event (vlib_main_t * vm, : evm->node_call_elog_event_types, node_index), /* track */ - (vm->cpu_index ? &vlib_worker_threads[vm->cpu_index]. + (vm->thread_index ? &vlib_worker_threads[vm->thread_index]. elog_track : &em->default_track), /* data to log */ n_vectors); } @@ -963,7 +963,7 @@ dispatch_node (vlib_main_t * vm, vm->cpu_time_last_node_dispatch = last_time_stamp; - if (1 /* || vm->cpu_index == node->cpu_index */ ) + if (1 /* || vm->thread_index == node->thread_index */ ) { vlib_main_t *stat_vm; @@ -1029,7 +1029,7 @@ dispatch_node (vlib_main_t * vm, { u32 node_name, vector_length, is_polling; } *ed; - vlib_worker_thread_t *w = vlib_worker_threads + vm->cpu_index; + vlib_worker_thread_t *w = vlib_worker_threads + vm->thread_index; #endif if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT diff --git a/src/vlib/main.h b/src/vlib/main.h index 0197b4f3..329bf073 100644 --- a/src/vlib/main.h +++ b/src/vlib/main.h @@ -156,7 +156,7 @@ typedef struct vlib_main_t uword *init_functions_called; /* to compare with node runtime */ - u32 cpu_index; + u32 thread_index; void **mbuf_alloc_list; diff --git a/src/vlib/node.c b/src/vlib/node.c index dc0a4de5..bbd3a42e 100644 --- a/src/vlib/node.c +++ b/src/vlib/node.c @@ -99,7 +99,7 @@ vlib_node_runtime_update (vlib_main_t * vm, u32 node_index, u32 next_index) vlib_pending_frame_t *pf; i32 i, j, n_insert; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); vlib_worker_thread_barrier_sync (vm); diff --git a/src/vlib/node.h b/src/vlib/node.h index fc7e7da2..1e2f4c38 100644 --- a/src/vlib/node.h +++ b/src/vlib/node.h @@ -344,8 +344,8 @@ typedef struct vlib_frame_t /* Number of vector elements currently in frame. */ u16 n_vectors; - /* Owner cpuid / heap id */ - u16 cpu_index; + /* Owner thread / heap id */ + u16 thread_index; /* Scalar and vector arguments to next node. */ u8 arguments[0]; @@ -459,7 +459,7 @@ typedef struct vlib_node_runtime_t zero before first run of this node. */ - u16 cpu_index; /**< CPU this node runs on */ + u16 thread_index; /**< thread this node runs on */ u8 runtime_data[0]; /**< Function dependent node-runtime data. This data is diff --git a/src/vlib/node_funcs.h b/src/vlib/node_funcs.h index 1f7d94e1..54e36874 100644 --- a/src/vlib/node_funcs.h +++ b/src/vlib/node_funcs.h @@ -201,9 +201,9 @@ always_inline vlib_frame_t * vlib_get_frame_no_check (vlib_main_t * vm, uword frame_index) { vlib_frame_t *f; - u32 cpu_index = frame_index & VLIB_CPU_MASK; + u32 thread_index = frame_index & VLIB_CPU_MASK; u32 offset = frame_index & VLIB_OFFSET_MASK; - vm = vlib_mains[cpu_index]; + vm = vlib_mains[thread_index]; f = vm->heap_base + offset; return f; } @@ -215,10 +215,10 @@ vlib_frame_index_no_check (vlib_main_t * vm, vlib_frame_t * f) ASSERT (((uword) f & VLIB_CPU_MASK) == 0); - vm = vlib_mains[f->cpu_index]; + vm = vlib_mains[f->thread_index]; i = ((u8 *) f - (u8 *) vm->heap_base); - return i | f->cpu_index; + return i | f->thread_index; } always_inline vlib_frame_t * diff --git a/src/vlib/threads.c b/src/vlib/threads.c index ef3a24d3..4a111f8d 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -35,27 +35,12 @@ vl (void *p) vlib_worker_thread_t *vlib_worker_threads; vlib_thread_main_t vlib_thread_main; +__thread uword vlib_thread_index = 0; + uword os_get_cpu_number (void) { - void *sp; - uword n; - u32 len; - - len = vec_len (vlib_thread_stacks); - if (len == 0) - return 0; - - /* Get any old stack address. */ - sp = &sp; - - n = ((uword) sp - (uword) vlib_thread_stacks[0]) - >> VLIB_LOG2_THREAD_STACK_SIZE; - - /* "processes" have their own stacks, and they always run in thread 0 */ - n = n >= len ? 0 : n; - - return n; + return vlib_thread_index; } uword @@ -275,21 +260,6 @@ vlib_thread_init (vlib_main_t * vm) return 0; } -vlib_worker_thread_t * -vlib_alloc_thread (vlib_main_t * vm) -{ - vlib_worker_thread_t *w; - - if (vec_len (vlib_worker_threads) >= vec_len (vlib_thread_stacks)) - { - clib_warning ("out of worker threads... Quitting..."); - exit (1); - } - vec_add2 (vlib_worker_threads, w, 1); - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; - return w; -} - vlib_frame_queue_t * vlib_frame_queue_alloc (int nelts) { @@ -427,7 +397,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, f64 b4 = vlib_time_now_ticks (vm, before); vlib_worker_thread_barrier_check (vm, b4); /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */ - // vlib_frame_queue_dequeue (vm->cpu_index, vm, nm); + // vlib_frame_queue_dequeue (vm->thread_index, vm, nm); } elt = fq->elts + (new_tail & (fq->nelts - 1)); @@ -497,6 +467,8 @@ vlib_worker_thread_bootstrap_fn (void *arg) w->lwp = syscall (SYS_gettid); w->thread_id = pthread_self (); + vlib_thread_index = w - vlib_worker_threads; + rv = (void *) clib_calljmp ((uword (*)(uword)) w->thread_function, (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE); @@ -610,7 +582,9 @@ start_workers (vlib_main_t * vm) mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = k; @@ -630,7 +604,7 @@ start_workers (vlib_main_t * vm) vm_clone = clib_mem_alloc (sizeof (*vm_clone)); clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone)); - vm_clone->cpu_index = worker_thread_index; + vm_clone->thread_index = worker_thread_index; vm_clone->heap_base = w->thread_mheap; vm_clone->mbuf_alloc_list = 0; vm_clone->init_functions_called = @@ -679,7 +653,7 @@ start_workers (vlib_main_t * vm) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy initial runtime_data from node */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -692,7 +666,7 @@ start_workers (vlib_main_t * vm) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy initial runtime_data from node */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -756,7 +730,8 @@ start_workers (vlib_main_t * vm) mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = j; @@ -827,7 +802,7 @@ vlib_worker_thread_node_runtime_update (void) uword n_calls, uword n_vectors, uword n_clocks); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (vec_len (vlib_mains) == 1) return; @@ -835,7 +810,7 @@ vlib_worker_thread_node_runtime_update (void) vm = vlib_mains[0]; nm = &vm->node_main; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); ASSERT (*vlib_worker_threads->wait_at_barrier == 1); /* @@ -955,7 +930,7 @@ vlib_worker_thread_node_runtime_update (void) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy runtime_data, will be overwritten later for existing rt */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -981,7 +956,7 @@ vlib_worker_thread_node_runtime_update (void) vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) { vlib_node_t *n = vlib_get_node (vm, rt->node_index); - rt->cpu_index = vm_clone->cpu_index; + rt->thread_index = vm_clone->thread_index; /* copy runtime_data, will be overwritten later for existing rt */ if (n->runtime_data && n->runtime_data_bytes > 0) clib_memcpy (rt->runtime_data, n->runtime_data, @@ -1180,7 +1155,7 @@ vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which) if (vlib_mains == 0) return; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); vlib_worker_thread_barrier_sync (vm); switch (which) @@ -1212,7 +1187,7 @@ vlib_worker_thread_barrier_sync (vlib_main_t * vm) vlib_worker_threads[0].barrier_sync_count++; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT; @@ -1260,7 +1235,7 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) int vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) { - u32 thread_id = vm->cpu_index; + u32 thread_id = vm->thread_index; vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id]; vlib_frame_queue_elt_t *elt; u32 *from, *to; @@ -1393,7 +1368,7 @@ vlib_worker_thread_fn (void *arg) vlib_main_t *vm = vlib_get_main (); clib_error_t *e; - ASSERT (vm->cpu_index == os_get_cpu_number ()); + ASSERT (vm->thread_index == vlib_get_thread_index ()); vlib_worker_thread_init (w); clib_time_init (&vm->clib_time); diff --git a/src/vlib/threads.h b/src/vlib/threads.h index eca4fc26..101d3d4a 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -153,8 +153,6 @@ typedef struct /* Called early, in thread 0's context */ clib_error_t *vlib_thread_init (vlib_main_t * vm); -vlib_worker_thread_t *vlib_alloc_thread (vlib_main_t * vm); - int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, u32 frame_queue_index, vlib_frame_t * frame, vlib_frame_queue_msg_type_t type); @@ -183,12 +181,19 @@ u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts); void vlib_worker_thread_barrier_sync (vlib_main_t * vm); void vlib_worker_thread_barrier_release (vlib_main_t * vm); +extern __thread uword vlib_thread_index; +static_always_inline uword +vlib_get_thread_index (void) +{ + return vlib_thread_index; +} + always_inline void vlib_smp_unsafe_warning (void) { if (CLIB_DEBUG > 0) { - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__); } } @@ -331,21 +336,21 @@ vlib_num_workers () } always_inline u32 -vlib_get_worker_cpu_index (u32 worker_index) +vlib_get_worker_thread_index (u32 worker_index) { return worker_index + 1; } always_inline u32 -vlib_get_worker_index (u32 cpu_index) +vlib_get_worker_index (u32 thread_index) { - return cpu_index - 1; + return thread_index - 1; } always_inline u32 vlib_get_current_worker_index () { - return os_get_cpu_number () - 1; + return vlib_get_thread_index () - 1; } static inline void @@ -467,6 +472,8 @@ vlib_get_worker_handoff_queue_elt (u32 frame_queue_index, return elt; } +u8 *vlib_thread_stack_init (uword thread_index); + int vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb); diff --git a/src/vlib/unix/cj.c b/src/vlib/unix/cj.c index 33ba163a..7c1e9475 100644 --- a/src/vlib/unix/cj.c +++ b/src/vlib/unix/cj.c @@ -48,7 +48,7 @@ cj_log (u32 type, void *data0, void *data1) r = (cj_record_t *) & (cjm->records[new_tail & (cjm->num_records - 1)]); r->time = vlib_time_now (cjm->vlib_main); - r->cpu = os_get_cpu_number (); + r->thread_index = vlib_get_thread_index (); r->type = type; r->data[0] = pointer_to_uword (data0); r->data[1] = pointer_to_uword (data1); @@ -133,7 +133,8 @@ static inline void cj_dump_one_record (cj_record_t * r) { fprintf (stderr, "[%d]: %10.6f T%02d %llx %llx\n", - r->cpu, r->time, r->type, (long long unsigned int) r->data[0], + r->thread_index, r->time, r->type, + (long long unsigned int) r->data[0], (long long unsigned int) r->data[1]); } @@ -161,7 +162,7 @@ cj_dump_internal (u8 filter0_enable, u64 filter0, index = (cjm->tail + 1) & (cjm->num_records - 1); r = &(cjm->records[index]); - if (r->cpu != (u32) ~ 0) + if (r->thread_index != (u32) ~ 0) { /* Yes, dump from tail + 1 to the end */ for (i = index; i < cjm->num_records; i++) diff --git a/src/vlib/unix/cj.h b/src/vlib/unix/cj.h index 67626afe..d0a1d46e 100644 --- a/src/vlib/unix/cj.h +++ b/src/vlib/unix/cj.h @@ -23,7 +23,7 @@ typedef struct { f64 time; - u32 cpu; + u32 thread_index; u32 type; u64 data[2]; } cj_record_t; diff --git a/src/vlib/unix/main.c b/src/vlib/unix/main.c index 6b96cc0d..db5ddd64 100644 --- a/src/vlib/unix/main.c +++ b/src/vlib/unix/main.c @@ -510,13 +510,28 @@ thread0 (uword arg) return i; } +u8 * +vlib_thread_stack_init (uword thread_index) +{ + vec_validate (vlib_thread_stacks, thread_index); + vlib_thread_stacks[thread_index] = clib_mem_alloc_aligned + (VLIB_THREAD_STACK_SIZE, VLIB_THREAD_STACK_SIZE); + + /* + * Disallow writes to the bottom page of the stack, to + * catch stack overflows. + */ + if (mprotect (vlib_thread_stacks[thread_index], + clib_mem_get_page_size (), PROT_READ) < 0) + clib_unix_warning ("thread stack"); + return vlib_thread_stacks[thread_index]; +} + int vlib_unix_main (int argc, char *argv[]) { vlib_main_t *vm = &vlib_global_main; /* one and only time for this! */ - vlib_thread_main_t *tm = &vlib_thread_main; unformat_input_t input; - u8 *thread_stacks; clib_error_t *e; int i; @@ -548,29 +563,9 @@ vlib_unix_main (int argc, char *argv[]) } unformat_free (&input); - /* - * allocate n x VLIB_THREAD_STACK_SIZE stacks, aligned to a - * VLIB_THREAD_STACK_SIZE boundary - * See also: os_get_cpu_number() in vlib/vlib/threads.c - */ - thread_stacks = clib_mem_alloc_aligned - ((uword) tm->n_thread_stacks * VLIB_THREAD_STACK_SIZE, - VLIB_THREAD_STACK_SIZE); - - vec_validate (vlib_thread_stacks, tm->n_thread_stacks - 1); - for (i = 0; i < vec_len (vlib_thread_stacks); i++) - { - vlib_thread_stacks[i] = thread_stacks; - - /* - * Disallow writes to the bottom page of the stack, to - * catch stack overflows. - */ - if (mprotect (thread_stacks, clib_mem_get_page_size (), PROT_READ) < 0) - clib_unix_warning ("thread stack"); + vlib_thread_stack_init (0); - thread_stacks += VLIB_THREAD_STACK_SIZE; - } + vlib_thread_index = 0; i = clib_calljmp (thread0, (uword) vm, (void *) (vlib_thread_stacks[0] + diff --git a/src/vnet/adj/adj_l2.c b/src/vnet/adj/adj_l2.c index f68e54e0..20d70dd4 100644 --- a/src/vnet/adj/adj_l2.c +++ b/src/vnet/adj/adj_l2.c @@ -52,7 +52,7 @@ adj_l2_rewrite_inline (vlib_main_t * vm, { u32 * from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); ethernet_main_t * em = ðernet_main; n_left_from = frame->n_vectors; @@ -93,7 +93,7 @@ adj_l2_rewrite_inline (vlib_main_t * vm, vnet_buffer(p0)->sw_if_index[VLIB_TX] = adj0->rewrite_header.sw_if_index; vlib_increment_combined_counter(&adjacency_counters, - cpu_index, + thread_index, adj_index0, /* packet increment */ 0, /* byte increment */ rw_len0); diff --git a/src/vnet/adj/adj_midchain.c b/src/vnet/adj/adj_midchain.c index e8087f08..5756de43 100644 --- a/src/vnet/adj/adj_midchain.c +++ b/src/vnet/adj/adj_midchain.c @@ -49,7 +49,7 @@ adj_midchain_tx_inline (vlib_main_t * vm, u32 next_index; vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; /* Vector of buffer / pkt indices we're supposed to process */ from = vlib_frame_vector_args (frame); @@ -124,13 +124,13 @@ adj_midchain_tx_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj1->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b1)); @@ -181,7 +181,7 @@ adj_midchain_tx_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, adj0->rewrite_header.sw_if_index, 1, vlib_buffer_length_in_chain (vm, b0)); diff --git a/src/vnet/adj/adj_nsh.c b/src/vnet/adj/adj_nsh.c index 9a0f9d8b..128570b0 100644 --- a/src/vnet/adj/adj_nsh.c +++ b/src/vnet/adj/adj_nsh.c @@ -53,7 +53,7 @@ adj_nsh_rewrite_inline (vlib_main_t * vm, { u32 * from = vlib_frame_vector_args (frame); u32 n_left_from, n_left_to_next, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -94,7 +94,7 @@ adj_nsh_rewrite_inline (vlib_main_t * vm, vnet_buffer(p0)->ip.save_rewrite_length = rw_len0; vlib_increment_combined_counter(&adjacency_counters, - cpu_index, + thread_index, adj_index0, /* packet increment */ 0, /* byte increment */ rw_len0); diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 98842a48..70a189b0 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -251,12 +251,12 @@ static inline void make_working_copy vnet_classify_entry_##size##_t * working_copy##size = 0; foreach_size_in_u32x4; #undef _ - u32 cpu_number = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); - if (cpu_number >= vec_len (t->working_copies)) + if (thread_index >= vec_len (t->working_copies)) { oldheap = clib_mem_set_heap (t->mheap); - vec_validate (t->working_copies, cpu_number); + vec_validate (t->working_copies, thread_index); clib_mem_set_heap (oldheap); } @@ -265,7 +265,7 @@ static inline void make_working_copy * updates from multiple threads will not result in sporadic, spurious * lookup failures. */ - working_copy = t->working_copies[cpu_number]; + working_copy = t->working_copies[thread_index]; t->saved_bucket.as_u64 = b->as_u64; oldheap = clib_mem_set_heap (t->mheap); @@ -290,7 +290,7 @@ static inline void make_working_copy default: abort(); } - t->working_copies[cpu_number] = working_copy; + t->working_copies[thread_index] = working_copy; } _vec_len(working_copy) = (1<log2_pages)*t->entries_per_page; @@ -318,7 +318,7 @@ static inline void make_working_copy working_bucket.offset = vnet_classify_get_offset (t, working_copy); CLIB_MEMORY_BARRIER(); b->as_u64 = working_bucket.as_u64; - t->working_copies[cpu_number] = working_copy; + t->working_copies[thread_index] = working_copy; } static vnet_classify_entry_t * @@ -387,7 +387,7 @@ int vnet_classify_add_del (vnet_classify_table_t * t, int i; u64 hash, new_hash; u32 new_log2_pages; - u32 cpu_number = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u8 * key_minus_skip; ASSERT ((add_v->flags & VNET_CLASSIFY_ENTRY_FREE) == 0); @@ -498,7 +498,7 @@ int vnet_classify_add_del (vnet_classify_table_t * t, new_log2_pages = t->saved_bucket.log2_pages + 1; expand_again: - working_copy = t->working_copies[cpu_number]; + working_copy = t->working_copies[thread_index]; new_v = split_and_rehash (t, working_copy, new_log2_pages); if (new_v == 0) diff --git a/src/vnet/cop/ip4_whitelist.c b/src/vnet/cop/ip4_whitelist.c index 6ef3d7d7..1b5e336b 100644 --- a/src/vnet/cop/ip4_whitelist.c +++ b/src/vnet/cop/ip4_whitelist.c @@ -60,7 +60,7 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, cop_feature_type_t next_index; cop_main_t *cm = &cop_main; vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -177,12 +177,12 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, dpo1 = load_balance_get_bucket_i(lb1, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); vlib_increment_combined_counter - (vcm, cpu_index, lb_index1, 1, + (vcm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, b1) + sizeof(ethernet_header_t)); @@ -273,7 +273,7 @@ ip4_cop_whitelist_node_fn (vlib_main_t * vm, dpo0 = load_balance_get_bucket_i(lb0, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); diff --git a/src/vnet/cop/ip6_whitelist.c b/src/vnet/cop/ip6_whitelist.c index c2e16ccf..f3fe62e3 100644 --- a/src/vnet/cop/ip6_whitelist.c +++ b/src/vnet/cop/ip6_whitelist.c @@ -61,7 +61,7 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, cop_main_t *cm = &cop_main; ip6_main_t * im6 = &ip6_main; vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -153,12 +153,12 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, dpo1 = load_balance_get_bucket_i(lb1, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); vlib_increment_combined_counter - (vcm, cpu_index, lb_index1, 1, + (vcm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, b1) + sizeof(ethernet_header_t)); @@ -233,7 +233,7 @@ ip6_cop_whitelist_node_fn (vlib_main_t * vm, dpo0 = load_balance_get_bucket_i(lb0, 0); vlib_increment_combined_counter - (vcm, cpu_index, lb_index0, 1, + (vcm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, b0) + sizeof(ethernet_header_t)); diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c index ba337f3f..76980102 100644 --- a/src/vnet/devices/af_packet/node.c +++ b/src/vnet/devices/af_packet/node.c @@ -124,7 +124,7 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 frame_num = apif->rx_req->tp_frame_nr; u8 *block_start = apif->rx_ring + block * block_size; uword n_trace = vlib_get_trace_count (vm, node); - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes; @@ -132,15 +132,15 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, if (apif->per_interface_next_index != ~0) next_index = apif->per_interface_next_index; - n_free_bufs = vec_len (apm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { - vec_validate (apm->rx_buffers[cpu_index], + vec_validate (apm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &apm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); - _vec_len (apm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs; } rx_frame = apif->next_rx_frame; @@ -163,11 +163,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { /* grab free buffer */ u32 last_empty_buffer = - vec_len (apm->rx_buffers[cpu_index]) - 1; + vec_len (apm->rx_buffers[thread_index]) - 1; prev_bi0 = bi0; - bi0 = apm->rx_buffers[cpu_index][last_empty_buffer]; + bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); - _vec_len (apm->rx_buffers[cpu_index]) = last_empty_buffer; + _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; /* copy data */ @@ -236,9 +236,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), apif->hw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } diff --git a/src/vnet/devices/devices.c b/src/vnet/devices/devices.c index 41645220..5e5e812c 100644 --- a/src/vnet/devices/devices.c +++ b/src/vnet/devices/devices.c @@ -104,7 +104,7 @@ vnet_device_queue_sort (void *a1, void *a2) void vnet_device_input_assign_thread (u32 hw_if_index, - u16 queue_id, uword cpu_index) + u16 queue_id, uword thread_index) { vnet_main_t *vnm = vnet_get_main (); vnet_device_main_t *vdm = &vnet_device_main; @@ -115,19 +115,19 @@ vnet_device_input_assign_thread (u32 hw_if_index, ASSERT (hw->input_node_index > 0); - if (vdm->first_worker_cpu_index == 0) - cpu_index = 0; + if (vdm->first_worker_thread_index == 0) + thread_index = 0; - if (cpu_index != 0 && - (cpu_index < vdm->first_worker_cpu_index || - cpu_index > vdm->last_worker_cpu_index)) + if (thread_index != 0 && + (thread_index < vdm->first_worker_thread_index || + thread_index > vdm->last_worker_thread_index)) { - cpu_index = vdm->next_worker_cpu_index++; - if (vdm->next_worker_cpu_index > vdm->last_worker_cpu_index) - vdm->next_worker_cpu_index = vdm->first_worker_cpu_index; + thread_index = vdm->next_worker_thread_index++; + if (vdm->next_worker_thread_index > vdm->last_worker_thread_index) + vdm->next_worker_thread_index = vdm->first_worker_thread_index; } - vm = vlib_mains[cpu_index]; + vm = vlib_mains[thread_index]; rt = vlib_node_get_runtime_data (vm, hw->input_node_index); vec_add2 (rt->devices_and_queues, dq, 1); @@ -136,33 +136,33 @@ vnet_device_input_assign_thread (u32 hw_if_index, dq->queue_id = queue_id; vec_sort_with_function (rt->devices_and_queues, vnet_device_queue_sort); - vec_validate (hw->input_node_cpu_index_by_queue, queue_id); - hw->input_node_cpu_index_by_queue[queue_id] = cpu_index; + vec_validate (hw->input_node_thread_index_by_queue, queue_id); + hw->input_node_thread_index_by_queue[queue_id] = thread_index; } static int vnet_device_input_unassign_thread (u32 hw_if_index, u16 queue_id, - uword cpu_index) + uword thread_index) { vnet_main_t *vnm = vnet_get_main (); vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); vnet_device_input_runtime_t *rt; vnet_device_and_queue_t *dq; - uword old_cpu_index; + uword old_thread_index; - if (hw->input_node_cpu_index_by_queue == 0) + if (hw->input_node_thread_index_by_queue == 0) return VNET_API_ERROR_INVALID_INTERFACE; - if (vec_len (hw->input_node_cpu_index_by_queue) < queue_id + 1) + if (vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) return VNET_API_ERROR_INVALID_INTERFACE; - old_cpu_index = hw->input_node_cpu_index_by_queue[queue_id]; + old_thread_index = hw->input_node_thread_index_by_queue[queue_id]; - if (old_cpu_index == cpu_index) + if (old_thread_index == thread_index) return 0; rt = - vlib_node_get_runtime_data (vlib_mains[old_cpu_index], + vlib_node_get_runtime_data (vlib_mains[old_thread_index], hw->input_node_index); vec_foreach (dq, rt->devices_and_queues) @@ -240,7 +240,7 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, vnet_device_main_t *vdm = &vnet_device_main; u32 hw_if_index = (u32) ~ 0; u32 queue_id = (u32) 0; - u32 cpu_index = (u32) ~ 0; + u32 thread_index = (u32) ~ 0; int rv; if (!unformat_user (input, unformat_line_input, line_input)) @@ -253,10 +253,10 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, ; else if (unformat (line_input, "queue %d", &queue_id)) ; - else if (unformat (line_input, "main", &cpu_index)) - cpu_index = 0; - else if (unformat (line_input, "worker %d", &cpu_index)) - cpu_index += vdm->first_worker_cpu_index; + else if (unformat (line_input, "main", &thread_index)) + thread_index = 0; + else if (unformat (line_input, "worker %d", &thread_index)) + thread_index += vdm->first_worker_thread_index; else { error = clib_error_return (0, "parse error: '%U'", @@ -271,16 +271,17 @@ set_device_placement (vlib_main_t * vm, unformat_input_t * input, if (hw_if_index == (u32) ~ 0) return clib_error_return (0, "please specify valid interface name"); - if (cpu_index > vdm->last_worker_cpu_index) + if (thread_index > vdm->last_worker_thread_index) return clib_error_return (0, "please specify valid worker thread or main"); - rv = vnet_device_input_unassign_thread (hw_if_index, queue_id, cpu_index); + rv = + vnet_device_input_unassign_thread (hw_if_index, queue_id, thread_index); if (rv) return clib_error_return (0, "not found"); - vnet_device_input_assign_thread (hw_if_index, queue_id, cpu_index); + vnet_device_input_assign_thread (hw_if_index, queue_id, thread_index); return 0; } @@ -326,9 +327,9 @@ vnet_device_init (vlib_main_t * vm) tr = p ? (vlib_thread_registration_t *) p[0] : 0; if (tr && tr->count > 0) { - vdm->first_worker_cpu_index = tr->first_index; - vdm->next_worker_cpu_index = tr->first_index; - vdm->last_worker_cpu_index = tr->first_index + tr->count - 1; + vdm->first_worker_thread_index = tr->first_index; + vdm->next_worker_thread_index = tr->first_index; + vdm->last_worker_thread_index = tr->first_index + tr->count - 1; } return 0; } diff --git a/src/vnet/devices/devices.h b/src/vnet/devices/devices.h index bbb29fe3..966f8302 100644 --- a/src/vnet/devices/devices.h +++ b/src/vnet/devices/devices.h @@ -50,9 +50,9 @@ typedef struct typedef struct { vnet_device_per_worker_data_t *workers; - uword first_worker_cpu_index; - uword last_worker_cpu_index; - uword next_worker_cpu_index; + uword first_worker_thread_index; + uword last_worker_thread_index; + uword next_worker_thread_index; } vnet_device_main_t; typedef struct @@ -80,7 +80,7 @@ vnet_set_device_input_node (u32 hw_if_index, u32 node_index) } void vnet_device_input_assign_thread (u32 hw_if_index, u16 queue_id, - uword cpu_index); + uword thread_index); static inline u64 vnet_get_aggregate_rx_packets (void) @@ -95,12 +95,12 @@ vnet_get_aggregate_rx_packets (void) } static inline void -vnet_device_increment_rx_packets (u32 cpu_index, u64 count) +vnet_device_increment_rx_packets (u32 thread_index, u64 count) { vnet_device_main_t *vdm = &vnet_device_main; vnet_device_per_worker_data_t *pwd; - pwd = vec_elt_at_index (vdm->workers, cpu_index); + pwd = vec_elt_at_index (vdm->workers, thread_index); pwd->aggregate_rx_packets += count; } @@ -117,9 +117,9 @@ vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index, { vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); - ASSERT (queue_id < vec_len (hw->input_node_cpu_index_by_queue)); - u32 cpu_index = hw->input_node_cpu_index_by_queue[queue_id]; - vlib_node_set_interrupt_pending (vlib_mains[cpu_index], + ASSERT (queue_id < vec_len (hw->input_node_thread_index_by_queue)); + u32 thread_index = hw->input_node_thread_index_by_queue[queue_id]; + vlib_node_set_interrupt_pending (vlib_mains[thread_index], hw->input_node_index); } diff --git a/src/vnet/devices/netmap/node.c b/src/vnet/devices/netmap/node.c index 68ea7832..e120eeae 100644 --- a/src/vnet/devices/netmap/node.c +++ b/src/vnet/devices/netmap/node.c @@ -98,22 +98,22 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_free_bufs; struct netmap_ring *ring; int cur_ring; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (nif->per_interface_next_index != ~0) next_index = nif->per_interface_next_index; - n_free_bufs = vec_len (nm->rx_buffers[cpu_index]); + n_free_bufs = vec_len (nm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { - vec_validate (nm->rx_buffers[cpu_index], + vec_validate (nm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += - vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs], + vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); - _vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs; + _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs; } cur_ring = nif->first_rx_ring; @@ -163,11 +163,11 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_t *b0; /* grab free buffer */ u32 last_empty_buffer = - vec_len (nm->rx_buffers[cpu_index]) - 1; + vec_len (nm->rx_buffers[thread_index]) - 1; prev_bi0 = bi0; - bi0 = nm->rx_buffers[cpu_index][last_empty_buffer]; + bi0 = nm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); - _vec_len (nm->rx_buffers[cpu_index]) = last_empty_buffer; + _vec_len (nm->rx_buffers[thread_index]) = last_empty_buffer; n_free_bufs--; /* copy data */ @@ -247,9 +247,9 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), nif->hw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), nif->hw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } @@ -260,7 +260,7 @@ netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { int i; u32 n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); netmap_main_t *nm = &netmap_main; netmap_if_t *nmi; @@ -269,7 +269,7 @@ netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node, nmi = vec_elt_at_index (nm->interfaces, i); if (nmi->is_admin_up && (i % nm->input_cpu_count) == - (cpu_index - nm->input_cpu_first_index)) + (thread_index - nm->input_cpu_first_index)) n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi); } diff --git a/src/vnet/devices/ssvm/node.c b/src/vnet/devices/ssvm/node.c index a6c9dfd7..539b4161 100644 --- a/src/vnet/devices/ssvm/node.c +++ b/src/vnet/devices/ssvm/node.c @@ -89,7 +89,7 @@ ssvm_eth_device_input (ssvm_eth_main_t * em, ethernet_header_t *eh0; u16 type0; u32 n_rx_bytes = 0, l3_offset0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 trace_cnt __attribute__ ((unused)) = vlib_get_trace_count (vm, node); volatile u32 *lock; u32 *elt_indices; @@ -284,10 +284,10 @@ out: vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters - + VNET_INTERFACE_COUNTER_RX, cpu_index, + + VNET_INTERFACE_COUNTER_RX, thread_index, intfc->vlib_hw_if_index, rx_queue_index, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, rx_queue_index); + vnet_device_increment_rx_packets (thread_index, rx_queue_index); return rx_queue_index; } diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c index 00807dc0..5e720f65 100644 --- a/src/vnet/devices/virtio/vhost-user.c +++ b/src/vnet/devices/virtio/vhost-user.c @@ -331,7 +331,7 @@ vhost_user_tx_thread_placement (vhost_user_intf_t * vui) { //Let's try to assign one queue to each thread u32 qid = 0; - u32 cpu_index = 0; + u32 thread_index = 0; vui->use_tx_spinlock = 0; while (1) { @@ -341,20 +341,21 @@ vhost_user_tx_thread_placement (vhost_user_intf_t * vui) if (!rxvq->started || !rxvq->enabled) continue; - vui->per_cpu_tx_qid[cpu_index] = qid; - cpu_index++; - if (cpu_index == vlib_get_thread_main ()->n_vlib_mains) + vui->per_cpu_tx_qid[thread_index] = qid; + thread_index++; + if (thread_index == vlib_get_thread_main ()->n_vlib_mains) return; } //We need to loop, meaning the spinlock has to be used vui->use_tx_spinlock = 1; - if (cpu_index == 0) + if (thread_index == 0) { //Could not find a single valid one - for (cpu_index = 0; - cpu_index < vlib_get_thread_main ()->n_vlib_mains; cpu_index++) + for (thread_index = 0; + thread_index < vlib_get_thread_main ()->n_vlib_mains; + thread_index++) { - vui->per_cpu_tx_qid[cpu_index] = 0; + vui->per_cpu_tx_qid[thread_index] = 0; } return; } @@ -368,7 +369,7 @@ vhost_user_rx_thread_placement () vhost_user_intf_t *vui; vhost_cpu_t *vhc; u32 *workers = 0; - u32 cpu_index; + u32 thread_index; vlib_main_t *vm; //Let's list all workers cpu indexes @@ -400,9 +401,9 @@ vhost_user_rx_thread_placement () continue; i %= vec_len (vui_workers); - cpu_index = vui_workers[i]; + thread_index = vui_workers[i]; i++; - vhc = &vum->cpus[cpu_index]; + vhc = &vum->cpus[thread_index]; iaq.qid = qid; iaq.vhost_iface_index = vui - vum->vhost_user_interfaces; @@ -429,14 +430,14 @@ vhost_user_rx_thread_placement () vhc->operation_mode = mode; } - for (cpu_index = vum->input_cpu_first_index; - cpu_index < vum->input_cpu_first_index + vum->input_cpu_count; - cpu_index++) + for (thread_index = vum->input_cpu_first_index; + thread_index < vum->input_cpu_first_index + vum->input_cpu_count; + thread_index++) { vlib_node_state_t state = VLIB_NODE_STATE_POLLING; - vhc = &vum->cpus[cpu_index]; - vm = vlib_mains ? vlib_mains[cpu_index] : &vlib_global_main; + vhc = &vum->cpus[thread_index]; + vm = vlib_mains ? vlib_mains[thread_index] : &vlib_global_main; switch (vhc->operation_mode) { case VHOST_USER_INTERRUPT_MODE: @@ -532,7 +533,7 @@ vhost_user_set_interrupt_pending (vhost_user_intf_t * vui, u32 ifq) { vhost_user_main_t *vum = &vhost_user_main; vhost_cpu_t *vhc; - u32 cpu_index; + u32 thread_index; vhost_iface_and_queue_t *vhiq; vlib_main_t *vm; u32 ifq2; @@ -553,8 +554,8 @@ vhost_user_set_interrupt_pending (vhost_user_intf_t * vui, u32 ifq) if ((vhiq->vhost_iface_index == (ifq >> 8)) && (VHOST_VRING_IDX_TX (vhiq->qid) == (ifq & 0xff))) { - cpu_index = vhc - vum->cpus; - vm = vlib_mains ? vlib_mains[cpu_index] : &vlib_global_main; + thread_index = vhc - vum->cpus; + vm = vlib_mains ? vlib_mains[thread_index] : &vlib_global_main; /* * Convert RX virtqueue number in the lower byte to vring * queue index for the input node process. Top bytes contain @@ -1592,7 +1593,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 n_trace = vlib_get_trace_count (vm, node); u16 qsz_mask; u32 map_hint = 0; - u16 cpu_index = os_get_cpu_number (); + u16 thread_index = vlib_get_thread_index (); u16 copy_len = 0; { @@ -1651,32 +1652,32 @@ vhost_user_if_input (vlib_main_t * vm, * in the loop and come back later. This is not an issue as for big packet, * processing cost really comes from the memory copy. */ - if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len < n_left + 1)) + if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1)) { - u32 curr_len = vum->cpus[cpu_index].rx_buffers_len; - vum->cpus[cpu_index].rx_buffers_len += + u32 curr_len = vum->cpus[thread_index].rx_buffers_len; + vum->cpus[thread_index].rx_buffers_len += vlib_buffer_alloc_from_free_list (vm, - vum->cpus[cpu_index].rx_buffers + + vum->cpus[thread_index].rx_buffers + curr_len, VHOST_USER_RX_BUFFERS_N - curr_len, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); if (PREDICT_FALSE - (vum->cpus[cpu_index].rx_buffers_len < + (vum->cpus[thread_index].rx_buffers_len < VHOST_USER_RX_BUFFER_STARVATION)) { /* In case of buffer starvation, discard some packets from the queue * and log the event. * We keep doing best effort for the remaining packets. */ - u32 flush = (n_left + 1 > vum->cpus[cpu_index].rx_buffers_len) ? - n_left + 1 - vum->cpus[cpu_index].rx_buffers_len : 1; + u32 flush = (n_left + 1 > vum->cpus[thread_index].rx_buffers_len) ? + n_left + 1 - vum->cpus[thread_index].rx_buffers_len : 1; flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush); n_left -= flush; vlib_increment_simple_counter (vnet_main. interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, - os_get_cpu_number (), + vlib_get_thread_index (), vui->sw_if_index, flush); vlib_error_count (vm, vhost_user_input_node.index, @@ -1696,7 +1697,7 @@ vhost_user_if_input (vlib_main_t * vm, u32 desc_data_offset; vring_desc_t *desc_table = txvq->desc; - if (PREDICT_FALSE (vum->cpus[cpu_index].rx_buffers_len <= 1)) + if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len <= 1)) { /* Not enough rx_buffers * Note: We yeld on 1 so we don't need to do an additional @@ -1707,17 +1708,18 @@ vhost_user_if_input (vlib_main_t * vm, } desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask]; - vum->cpus[cpu_index].rx_buffers_len--; - bi_current = (vum->cpus[cpu_index].rx_buffers) - [vum->cpus[cpu_index].rx_buffers_len]; + vum->cpus[thread_index].rx_buffers_len--; + bi_current = (vum->cpus[thread_index].rx_buffers) + [vum->cpus[thread_index].rx_buffers_len]; b_head = b_current = vlib_get_buffer (vm, bi_current); to_next[0] = bi_current; //We do that now so we can forget about bi_current to_next++; n_left_to_next--; vlib_prefetch_buffer_with_index (vm, - (vum->cpus[cpu_index].rx_buffers) - [vum->cpus[cpu_index]. + (vum-> + cpus[thread_index].rx_buffers) + [vum->cpus[thread_index]. rx_buffers_len - 1], LOAD); /* Just preset the used descriptor id and length for later */ @@ -1791,7 +1793,7 @@ vhost_user_if_input (vlib_main_t * vm, (b_current->current_length == VLIB_BUFFER_DATA_SIZE)) { if (PREDICT_FALSE - (vum->cpus[cpu_index].rx_buffers_len == 0)) + (vum->cpus[thread_index].rx_buffers_len == 0)) { /* Cancel speculation */ to_next--; @@ -1805,17 +1807,18 @@ vhost_user_if_input (vlib_main_t * vm, * but valid. */ vhost_user_input_rewind_buffers (vm, - &vum->cpus[cpu_index], + &vum->cpus + [thread_index], b_head); n_left = 0; goto stop; } /* Get next output */ - vum->cpus[cpu_index].rx_buffers_len--; + vum->cpus[thread_index].rx_buffers_len--; u32 bi_next = - (vum->cpus[cpu_index].rx_buffers)[vum->cpus - [cpu_index].rx_buffers_len]; + (vum->cpus[thread_index].rx_buffers)[vum->cpus + [thread_index].rx_buffers_len]; b_current->next_buffer = bi_next; b_current->flags |= VLIB_BUFFER_NEXT_PRESENT; bi_current = bi_next; @@ -1823,7 +1826,7 @@ vhost_user_if_input (vlib_main_t * vm, } /* Prepare a copy order executed later for the data */ - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; u32 desc_data_l = desc_table[desc_current].len - desc_data_offset; @@ -1880,7 +1883,7 @@ vhost_user_if_input (vlib_main_t * vm, if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD)) { if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning @@ -1905,7 +1908,7 @@ vhost_user_if_input (vlib_main_t * vm, /* Do the memory copies */ if (PREDICT_FALSE - (vhost_user_input_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_input_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning ("Memory mapping error on interface hw_if_index=%d " @@ -1933,9 +1936,9 @@ vhost_user_if_input (vlib_main_t * vm, vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), vui->sw_if_index, n_rx_packets, n_rx_bytes); + vlib_get_thread_index (), vui->sw_if_index, n_rx_packets, n_rx_bytes); - vnet_device_increment_rx_packets (cpu_index, n_rx_packets); + vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } @@ -1946,15 +1949,15 @@ vhost_user_input (vlib_main_t * vm, { vhost_user_main_t *vum = &vhost_user_main; uword n_rx_packets = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); vhost_iface_and_queue_t *vhiq; vhost_user_intf_t *vui; vhost_cpu_t *vhc; - vhc = &vum->cpus[cpu_index]; + vhc = &vum->cpus[thread_index]; if (PREDICT_TRUE (vhc->operation_mode == VHOST_USER_POLLING_MODE)) { - vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues) + vec_foreach (vhiq, vum->cpus[thread_index].rx_queues) { vui = &vum->vhost_user_interfaces[vhiq->vhost_iface_index]; n_rx_packets += vhost_user_if_input (vm, vum, vui, vhiq->qid, node); @@ -2096,7 +2099,7 @@ vhost_user_tx (vlib_main_t * vm, vhost_user_vring_t *rxvq; u16 qsz_mask; u8 error; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 map_hint = 0; u8 retry = 8; u16 copy_len; @@ -2116,7 +2119,7 @@ vhost_user_tx (vlib_main_t * vm, qid = VHOST_VRING_IDX_RX (*vec_elt_at_index - (vui->per_cpu_tx_qid, os_get_cpu_number ())); + (vui->per_cpu_tx_qid, vlib_get_thread_index ())); rxvq = &vui->vrings[qid]; if (PREDICT_FALSE (vui->use_tx_spinlock)) vhost_user_vring_lock (vui, qid); @@ -2143,10 +2146,10 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[cpu_index].current_trace = + vum->cpus[thread_index].current_trace = vlib_add_trace (vm, node, b0, - sizeof (*vum->cpus[cpu_index].current_trace)); - vhost_user_tx_trace (vum->cpus[cpu_index].current_trace, + sizeof (*vum->cpus[thread_index].current_trace)); + vhost_user_tx_trace (vum->cpus[thread_index].current_trace, vui, qid / 2, b0, rxvq); } @@ -2188,14 +2191,14 @@ retry: { // Get a header from the header array virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[cpu_index].tx_headers[tx_headers_len]; + &vum->cpus[thread_index].tx_headers[tx_headers_len]; tx_headers_len++; hdr->hdr.flags = 0; hdr->hdr.gso_type = 0; hdr->num_buffers = 1; //This is local, no need to check // Prepare a copy order executed later for the header - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; cpy->len = vui->virtio_net_hdr_sz; cpy->dst = buffer_map_addr; @@ -2220,7 +2223,7 @@ retry: else if (vui->virtio_net_hdr_sz == 12) //MRG is available { virtio_net_hdr_mrg_rxbuf_t *hdr = - &vum->cpus[cpu_index].tx_headers[tx_headers_len - 1]; + &vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; //Move from available to used buffer rxvq->used->ring[rxvq->last_used_idx & qsz_mask].id = @@ -2282,7 +2285,7 @@ retry: } { - vhost_copy_t *cpy = &vum->cpus[cpu_index].copy[copy_len]; + vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len]; copy_len++; cpy->len = bytes_left; cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len; @@ -2325,8 +2328,8 @@ retry: if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) { - vum->cpus[cpu_index].current_trace->hdr = - vum->cpus[cpu_index].tx_headers[tx_headers_len - 1]; + vum->cpus[thread_index].current_trace->hdr = + vum->cpus[thread_index].tx_headers[tx_headers_len - 1]; } n_left--; //At the end for error counting when 'goto done' is invoked @@ -2336,7 +2339,7 @@ retry: done: //Do the memory copies if (PREDICT_FALSE - (vhost_user_tx_copy (vui, vum->cpus[cpu_index].copy, + (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy, copy_len, &map_hint))) { clib_warning ("Memory mapping error on interface hw_if_index=%d " @@ -2386,7 +2389,7 @@ done3: vlib_increment_simple_counter (vnet_main.interface_main.sw_if_counters + VNET_INTERFACE_COUNTER_DROP, - os_get_cpu_number (), vui->sw_if_index, n_left); + vlib_get_thread_index (), vui->sw_if_index, n_left); } vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors); @@ -2773,11 +2776,11 @@ vhost_user_send_interrupt_process (vlib_main_t * vm, case ~0: vec_foreach (vhc, vum->cpus) { - u32 cpu_index = vhc - vum->cpus; + u32 thread_index = vhc - vum->cpus; f64 next_timeout; next_timeout = timeout; - vec_foreach (vhiq, vum->cpus[cpu_index].rx_queues) + vec_foreach (vhiq, vum->cpus[thread_index].rx_queues) { vui = &vum->vhost_user_interfaces[vhiq->vhost_iface_index]; vhost_user_vring_t *rxvq = diff --git a/src/vnet/dpo/lookup_dpo.c b/src/vnet/dpo/lookup_dpo.c index e94e871c..97ad0a44 100644 --- a/src/vnet/dpo/lookup_dpo.c +++ b/src/vnet/dpo/lookup_dpo.c @@ -266,7 +266,7 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, int table_from_interface) { u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; from = vlib_frame_vector_args (from_frame); @@ -407,10 +407,10 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -511,7 +511,7 @@ lookup_dpo_ip4_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -606,7 +606,7 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -749,10 +749,10 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -853,7 +853,7 @@ lookup_dpo_ip6_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -930,7 +930,7 @@ lookup_dpo_mpls_inline (vlib_main_t * vm, int table_from_interface) { u32 n_left_from, next_index, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; from = vlib_frame_vector_args (from_frame); @@ -994,7 +994,7 @@ lookup_dpo_mpls_inline (vlib_main_t * vm, vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/dpo/replicate_dpo.c b/src/vnet/dpo/replicate_dpo.c index a9f334be..e25ceae9 100644 --- a/src/vnet/dpo/replicate_dpo.c +++ b/src/vnet/dpo/replicate_dpo.c @@ -627,7 +627,7 @@ replicate_inline (vlib_main_t * vm, vlib_combined_counter_main_t * cm = &replicate_main.repm_counters; replicate_main_t * rm = &replicate_main; u32 n_left_from, * from, * to_next, next_index; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -657,12 +657,12 @@ replicate_inline (vlib_main_t * vm, rep0 = replicate_get(repi0); vlib_increment_combined_counter( - cm, cpu_index, repi0, 1, + cm, thread_index, repi0, 1, vlib_buffer_length_in_chain(vm, b0)); - vec_validate (rm->clones[cpu_index], rep0->rep_n_buckets - 1); + vec_validate (rm->clones[thread_index], rep0->rep_n_buckets - 1); - num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[cpu_index], rep0->rep_n_buckets, 128); + num_cloned = vlib_buffer_clone (vm, bi0, rm->clones[thread_index], rep0->rep_n_buckets, 128); if (num_cloned != rep0->rep_n_buckets) { @@ -673,7 +673,7 @@ replicate_inline (vlib_main_t * vm, for (bucket = 0; bucket < num_cloned; bucket++) { - ci0 = rm->clones[cpu_index][bucket]; + ci0 = rm->clones[thread_index][bucket]; c0 = vlib_get_buffer(vm, ci0); to_next[0] = ci0; @@ -700,7 +700,7 @@ replicate_inline (vlib_main_t * vm, vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); } } - vec_reset_length (rm->clones[cpu_index]); + vec_reset_length (rm->clones[thread_index]); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); diff --git a/src/vnet/ethernet/arp.c b/src/vnet/ethernet/arp.c index ee757505..c74a097e 100644 --- a/src/vnet/ethernet/arp.c +++ b/src/vnet/ethernet/arp.c @@ -1771,7 +1771,7 @@ set_ip4_over_ethernet_rpc_callback (vnet_arp_set_ip4_over_ethernet_rpc_args_t * a) { vnet_main_t *vm = vnet_get_main (); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); if (a->flags & ETHERNET_ARP_ARGS_REMOVE) vnet_arp_unset_ip4_over_ethernet_internal (vm, a); diff --git a/src/vnet/ethernet/interface.c b/src/vnet/ethernet/interface.c index 9894e3c8..335e3f9f 100644 --- a/src/vnet/ethernet/interface.c +++ b/src/vnet/ethernet/interface.c @@ -362,7 +362,7 @@ simulated_ethernet_interface_tx (vlib_main_t * vm, u32 next_index = VNET_SIMULATED_ETHERNET_TX_NEXT_ETHERNET_INPUT; u32 i, next_node_index, bvi_flag, sw_if_index; u32 n_pkts = 0, n_bytes = 0; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; vnet_main_t *vnm = vnet_get_main (); vnet_interface_main_t *im = &vnm->interface_main; vlib_node_main_t *nm = &vm->node_main; @@ -420,8 +420,9 @@ simulated_ethernet_interface_tx (vlib_main_t * vm, /* increment TX interface stat */ vlib_increment_combined_counter (im->combined_sw_if_counters + - VNET_INTERFACE_COUNTER_TX, cpu_index, - sw_if_index, n_pkts, n_bytes); + VNET_INTERFACE_COUNTER_TX, + thread_index, sw_if_index, n_pkts, + n_bytes); } return n_left_from; diff --git a/src/vnet/ethernet/node.c b/src/vnet/ethernet/node.c index b699e381..f7787ed2 100755 --- a/src/vnet/ethernet/node.c +++ b/src/vnet/ethernet/node.c @@ -291,7 +291,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_node_runtime_t *error_node; u32 n_left_from, next_index, *from, *to_next; u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 cached_sw_if_index = ~0; u32 cached_is_l2 = 0; /* shut up gcc */ vnet_hw_interface_t *hi = NULL; /* used for main interface only */ @@ -510,7 +510,7 @@ ethernet_input_inline (vlib_main_t * vm, interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, new_sw_if_index0, 1, len0); if (new_sw_if_index1 != old_sw_if_index1 @@ -519,7 +519,7 @@ ethernet_input_inline (vlib_main_t * vm, interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, new_sw_if_index1, 1, len1); @@ -530,7 +530,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = stats_n_bytes = 0; @@ -696,13 +696,13 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, new_sw_if_index0, 1, len0); + thread_index, new_sw_if_index0, 1, len0); if (stats_n_packets > 0) { vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = stats_n_bytes = 0; } @@ -734,7 +734,7 @@ ethernet_input_inline (vlib_main_t * vm, vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/gre/node.c b/src/vnet/gre/node.c index 2683586e..acf15f24 100644 --- a/src/vnet/gre/node.c +++ b/src/vnet/gre/node.c @@ -75,7 +75,7 @@ gre_input (vlib_main_t * vm, u64 cached_tunnel_key6[4]; u32 cached_tunnel_sw_if_index = 0, tunnel_sw_if_index = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 len; vnet_interface_main_t *im = &gm->vnet_main->interface_main; @@ -257,7 +257,7 @@ gre_input (vlib_main_t * vm, len = vlib_buffer_length_in_chain (vm, b0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); @@ -324,7 +324,7 @@ drop0: len = vlib_buffer_length_in_chain (vm, b1); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); @@ -502,7 +502,7 @@ drop1: len = vlib_buffer_length_in_chain (vm, b0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, + thread_index, tunnel_sw_if_index, 1 /* packets */, len /* bytes */); diff --git a/src/vnet/interface.h b/src/vnet/interface.h index a1ea2d61..08f08b10 100644 --- a/src/vnet/interface.h +++ b/src/vnet/interface.h @@ -468,7 +468,7 @@ typedef struct vnet_hw_interface_t u32 input_node_index; /* input node cpu index by queue */ - u32 *input_node_cpu_index_by_queue; + u32 *input_node_thread_index_by_queue; } vnet_hw_interface_t; diff --git a/src/vnet/interface_output.c b/src/vnet/interface_output.c index 03f2cdca..663dc309 100644 --- a/src/vnet/interface_output.c +++ b/src/vnet/interface_output.c @@ -196,7 +196,7 @@ slow_path (vlib_main_t * vm, */ static_always_inline void incr_output_stats (vnet_main_t * vnm, - u32 cpu_index, + u32 thread_index, u32 length, u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes) @@ -216,7 +216,7 @@ incr_output_stats (vnet_main_t * vnm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, *last_sw_if_index, *n_packets, *n_bytes); } @@ -240,7 +240,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, u32 n_left_to_tx, *from, *from_end, *to_tx; u32 n_bytes, n_buffers, n_packets; u32 last_sw_if_index; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; n_buffers = frame->n_vectors; @@ -266,7 +266,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); - vlib_increment_simple_counter (cm, cpu_index, + vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, n_buffers); return vlib_error_drop_buffers (vm, node, from, /* buffer stride */ 1, @@ -341,18 +341,18 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, from += 1; to_tx += n_buffers; n_left_to_tx -= n_buffers; - incr_output_stats (vnm, cpu_index, n_slow_bytes, + incr_output_stats (vnm, thread_index, n_slow_bytes, vnet_buffer (b)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); } } else { - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b0)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b1)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); @@ -396,7 +396,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, to_tx += n_buffers; n_left_to_tx -= n_buffers; } - incr_output_stats (vnm, cpu_index, + incr_output_stats (vnm, thread_index, vlib_buffer_length_in_chain (vm, b0), vnet_buffer (b0)->sw_if_index[VLIB_TX], &last_sw_if_index, &n_packets, &n_bytes); @@ -408,7 +408,7 @@ vnet_interface_output_node_flatten (vlib_main_t * vm, } /* Final update of interface stats. */ - incr_output_stats (vnm, cpu_index, 0, ~0, /* ~0 will flush stats */ + incr_output_stats (vnm, thread_index, 0, ~0, /* ~0 will flush stats */ &last_sw_if_index, &n_packets, &n_bytes); return n_buffers; @@ -428,7 +428,7 @@ vnet_interface_output_node (vlib_main_t * vm, u32 n_left_to_tx, *from, *from_end, *to_tx; u32 n_bytes, n_buffers, n_packets; u32 n_bytes_b0, n_bytes_b1, n_bytes_b2, n_bytes_b3; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; vnet_interface_main_t *im = &vnm->interface_main; u32 next_index = VNET_INTERFACE_OUTPUT_NEXT_TX; u32 current_config_index = ~0; @@ -458,7 +458,7 @@ vnet_interface_output_node (vlib_main_t * vm, cm = vec_elt_at_index (vnm->interface_main.sw_if_counters, VNET_INTERFACE_COUNTER_TX_ERROR); - vlib_increment_simple_counter (cm, cpu_index, + vlib_increment_simple_counter (cm, thread_index, rt->sw_if_index, n_buffers); return vlib_error_drop_buffers (vm, node, from, @@ -558,7 +558,7 @@ vnet_interface_output_node (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif0, 1, + thread_index, tx_swif0, 1, n_bytes_b0); } @@ -567,7 +567,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif1, 1, + thread_index, tx_swif1, 1, n_bytes_b1); } @@ -576,7 +576,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif2, 1, + thread_index, tx_swif2, 1, n_bytes_b2); } if (PREDICT_FALSE (tx_swif3 != rt->sw_if_index)) @@ -584,7 +584,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif3, 1, + thread_index, tx_swif3, 1, n_bytes_b3); } } @@ -623,7 +623,7 @@ vnet_interface_output_node (vlib_main_t * vm, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, tx_swif0, 1, + thread_index, tx_swif0, 1, n_bytes_b0); } } @@ -634,7 +634,7 @@ vnet_interface_output_node (vlib_main_t * vm, /* Update main interface stats. */ vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, + thread_index, rt->sw_if_index, n_packets, n_bytes); return n_buffers; } @@ -893,7 +893,7 @@ process_drop_punt (vlib_main_t * vm, u32 current_sw_if_index, n_errors_current_sw_if_index; u64 current_counter; vlib_simple_counter_main_t *cm; - u32 cpu_index = vm->cpu_index; + u32 thread_index = vm->thread_index; static vlib_error_t memory[VNET_ERROR_N_DISPOSITION]; static char memory_init[VNET_ERROR_N_DISPOSITION]; @@ -965,19 +965,19 @@ process_drop_punt (vlib_main_t * vm, current_counter -= 2; n_errors_current_sw_if_index -= 2; - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); /* Increment super-interface drop/punt counters for sub-interfaces. */ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0); vlib_increment_simple_counter - (cm, cpu_index, sw_if0->sup_sw_if_index, + (cm, thread_index, sw_if0->sup_sw_if_index, sw_if0->sup_sw_if_index != sw_if_index0); sw_if1 = vnet_get_sw_interface (vnm, sw_if_index1); vlib_increment_simple_counter - (cm, cpu_index, sw_if1->sup_sw_if_index, + (cm, thread_index, sw_if1->sup_sw_if_index, sw_if1->sup_sw_if_index != sw_if_index1); em->counters[current_counter_index] = current_counter; @@ -1013,11 +1013,12 @@ process_drop_punt (vlib_main_t * vm, sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; /* Increment drop/punt counters. */ - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); /* Increment super-interface drop/punt counters for sub-interfaces. */ sw_if0 = vnet_get_sw_interface (vnm, sw_if_index0); - vlib_increment_simple_counter (cm, cpu_index, sw_if0->sup_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, + sw_if0->sup_sw_if_index, sw_if0->sup_sw_if_index != sw_if_index0); if (PREDICT_FALSE (e0 != current_error)) @@ -1041,12 +1042,12 @@ process_drop_punt (vlib_main_t * vm, { vnet_sw_interface_t *si; - vlib_increment_simple_counter (cm, cpu_index, current_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, current_sw_if_index, n_errors_current_sw_if_index); si = vnet_get_sw_interface (vnm, current_sw_if_index); if (si->sup_sw_if_index != current_sw_if_index) - vlib_increment_simple_counter (cm, cpu_index, si->sup_sw_if_index, + vlib_increment_simple_counter (cm, thread_index, si->sup_sw_if_index, n_errors_current_sw_if_index); } diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index ee1703e7..fdfe7f63 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -75,7 +75,7 @@ ip4_lookup_inline (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -292,19 +292,19 @@ ip4_lookup_inline (vlib_main_t * vm, vnet_buffer (p3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lb_index0, 1, + (cm, thread_index, lb_index0, 1, vlib_buffer_length_in_chain (vm, p0) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index1, 1, + (cm, thread_index, lb_index1, 1, vlib_buffer_length_in_chain (vm, p1) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index2, 1, + (cm, thread_index, lb_index2, 1, vlib_buffer_length_in_chain (vm, p2) + sizeof (ethernet_header_t)); vlib_increment_combined_counter - (cm, cpu_index, lb_index3, 1, + (cm, thread_index, lb_index3, 1, vlib_buffer_length_in_chain (vm, p3) + sizeof (ethernet_header_t)); @@ -392,7 +392,7 @@ ip4_lookup_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); from += 1; to_next += 1; @@ -479,7 +479,7 @@ ip4_load_balance (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -584,9 +584,9 @@ ip4_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); vlib_validate_buffer_enqueue_x2 (vm, node, next, to_next, n_left_to_next, @@ -639,7 +639,7 @@ ip4_load_balance (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, @@ -2330,7 +2330,7 @@ ip4_rewrite_inline (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -2379,9 +2379,9 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) { vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + thread_index, adj_index0); vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index1); + thread_index, adj_index1); } ip0 = vlib_buffer_get_current (p0); @@ -2527,13 +2527,13 @@ ip4_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); } @@ -2618,7 +2618,7 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) vlib_prefetch_combined_counter (&adjacency_counters, - cpu_index, adj_index0); + thread_index, adj_index0); /* Guess we are only writing on simple Ethernet header. */ vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t)); @@ -2637,7 +2637,7 @@ ip4_rewrite_inline (vlib_main_t * vm, if (do_counters) vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); /* Check MTU of outgoing interface. */ diff --git a/src/vnet/ip/ip4_input.c b/src/vnet/ip/ip4_input.c index ba200a9f..3b08f4b0 100644 --- a/src/vnet/ip/ip4_input.c +++ b/src/vnet/ip/ip4_input.c @@ -85,7 +85,7 @@ ip4_input_inline (vlib_main_t * vm, vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); vlib_simple_counter_main_t *cm; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -178,8 +178,8 @@ ip4_input_inline (vlib_main_t * vm, vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); /* Punt packets with options or wrong version. */ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45)) @@ -299,7 +299,7 @@ ip4_input_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0; vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); /* Punt packets with options or wrong version. */ if (PREDICT_FALSE (ip0->ip_version_and_header_length != 0x45)) diff --git a/src/vnet/ip/ip6_forward.c b/src/vnet/ip/ip6_forward.c index c120f12c..c2fc4f87 100644 --- a/src/vnet/ip/ip6_forward.c +++ b/src/vnet/ip/ip6_forward.c @@ -74,7 +74,7 @@ ip6_lookup_inline (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_to_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -185,9 +185,9 @@ ip6_lookup_inline (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); from += 2; to_next += 2; @@ -291,7 +291,7 @@ ip6_lookup_inline (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); from += 1; to_next += 1; @@ -703,7 +703,7 @@ ip6_load_balance (vlib_main_t * vm, vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, *from, *to_next; ip_lookup_next_t next; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ip6_main_t *im = &ip6_main; from = vlib_frame_vector_args (frame); @@ -824,9 +824,9 @@ ip6_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); vlib_validate_buffer_enqueue_x2 (vm, node, next, to_next, n_left_to_next, @@ -886,7 +886,7 @@ ip6_load_balance (vlib_main_t * vm, } vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, to_next, n_left_to_next, @@ -1897,7 +1897,7 @@ ip6_rewrite_inline (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -2019,11 +2019,11 @@ ip6_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index1, 1, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); } @@ -2156,7 +2156,7 @@ ip6_rewrite_inline (vlib_main_t * vm, { vlib_increment_combined_counter (&adjacency_counters, - cpu_index, adj_index0, 1, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); } diff --git a/src/vnet/ip/ip6_input.c b/src/vnet/ip/ip6_input.c index 20306088..ffdc4727 100644 --- a/src/vnet/ip/ip6_input.c +++ b/src/vnet/ip/ip6_input.c @@ -82,7 +82,7 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_input_node.index); vlib_simple_counter_main_t *cm; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -171,8 +171,8 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); vnet_feature_arc_start (arc1, sw_if_index1, &next1, p1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); error0 = error1 = IP6_ERROR_NONE; @@ -270,7 +270,7 @@ ip6_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vnet_buffer (p0)->ip.adj_index[VLIB_RX] = ~0; vnet_feature_arc_start (arc0, sw_if_index0, &next0, p0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); error0 = IP6_ERROR_NONE; /* Version != 6? Drop it. */ diff --git a/src/vnet/ip/ip6_neighbor.c b/src/vnet/ip/ip6_neighbor.c index 5d1fb6f8..2af546df 100644 --- a/src/vnet/ip/ip6_neighbor.c +++ b/src/vnet/ip/ip6_neighbor.c @@ -581,7 +581,7 @@ vnet_set_ip6_ethernet_neighbor (vlib_main_t * vm, u32 next_index; pending_resolution_t *pr, *mc; - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) { set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address, 1 /* set new neighbor */ , is_static, @@ -722,7 +722,7 @@ vnet_unset_ip6_ethernet_neighbor (vlib_main_t * vm, uword *p; int rv = 0; - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) { set_unset_ip6_neighbor_rpc (vm, sw_if_index, a, link_layer_address, 0 /* unset */ , 0, 0); diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h index 50cac806..799003b9 100644 --- a/src/vnet/ipsec/esp.h +++ b/src/vnet/ipsec/esp.h @@ -282,8 +282,8 @@ hmac_calc (ipsec_integ_alg_t alg, u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - HMAC_CTX *ctx = &(em->per_thread_data[cpu_index].hmac_ctx); + u32 thread_index = vlib_get_thread_index (); + HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx); const EVP_MD *md = NULL; unsigned int len; @@ -292,10 +292,10 @@ hmac_calc (ipsec_integ_alg_t alg, if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0)) return 0; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_integ_alg)) + if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg)) { md = em->esp_integ_algs[alg].md; - em->per_thread_data[cpu_index].last_integ_alg = alg; + em->per_thread_data[thread_index].last_integ_alg = alg; } HMAC_Init (ctx, key, key_len, md); diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 7289b260..925d2b45 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -85,8 +85,8 @@ esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg, u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].decrypt_ctx); + u32 thread_index = vlib_get_thread_index (); + EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx); const EVP_CIPHER *cipher = NULL; int out_len; @@ -95,10 +95,11 @@ esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg, if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == 0)) return; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_decrypt_alg)) + if (PREDICT_FALSE + (alg != em->per_thread_data[thread_index].last_decrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->per_thread_data[cpu_index].last_decrypt_alg = alg; + em->per_thread_data[thread_index].last_decrypt_alg = alg; } EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv); @@ -117,11 +118,11 @@ esp_decrypt_node_fn (vlib_main_t * vm, u32 *recycle = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ipsec_alloc_empty_buffers (vm, im); - u32 *empty_buffers = im->empty_buffers[cpu_index]; + u32 *empty_buffers = im->empty_buffers[thread_index]; if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 44ae2297..b2bc4e0b 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -88,8 +88,8 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv) { esp_main_t *em = &esp_main; - u32 cpu_index = os_get_cpu_number (); - EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx); + u32 thread_index = vlib_get_thread_index (); + EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx); const EVP_CIPHER *cipher = NULL; int out_len; @@ -98,10 +98,11 @@ esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg, if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE)) return; - if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg)) + if (PREDICT_FALSE + (alg != em->per_thread_data[thread_index].last_encrypt_alg)) { cipher = em->esp_crypto_algs[alg].type; - em->per_thread_data[cpu_index].last_encrypt_alg = alg; + em->per_thread_data[thread_index].last_encrypt_alg = alg; } EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv); @@ -119,11 +120,11 @@ esp_encrypt_node_fn (vlib_main_t * vm, n_left_from = from_frame->n_vectors; ipsec_main_t *im = &ipsec_main; u32 *recycle = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); ipsec_alloc_empty_buffers (vm, im); - u32 *empty_buffers = im->empty_buffers[cpu_index]; + u32 *empty_buffers = im->empty_buffers[thread_index]; if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from)) { diff --git a/src/vnet/ipsec/ikev2.c b/src/vnet/ipsec/ikev2.c index 2c1074d8..3f9978a7 100644 --- a/src/vnet/ipsec/ikev2.c +++ b/src/vnet/ipsec/ikev2.c @@ -303,16 +303,16 @@ static void ikev2_delete_sa (ikev2_sa_t * sa) { ikev2_main_t *km = &ikev2_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); uword *p; ikev2_sa_free_all_vec (sa); - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi); + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, sa->rspi); if (p) { - hash_unset (km->per_thread_data[cpu_index].sa_by_rspi, sa->rspi); - pool_put (km->per_thread_data[cpu_index].sas, sa); + hash_unset (km->per_thread_data[thread_index].sa_by_rspi, sa->rspi); + pool_put (km->per_thread_data[thread_index].sas, sa); } } @@ -776,29 +776,31 @@ ikev2_initial_contact_cleanup (ikev2_sa_t * sa) ikev2_sa_t *tmp; u32 i, *delete = 0; ikev2_child_sa_t *c; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); if (!sa->initial_contact) return; /* find old IKE SAs with the same authenticated identity */ /* *INDENT-OFF* */ - pool_foreach (tmp, km->per_thread_data[cpu_index].sas, ({ + pool_foreach (tmp, km->per_thread_data[thread_index].sas, ({ if (tmp->i_id.type != sa->i_id.type || vec_len(tmp->i_id.data) != vec_len(sa->i_id.data) || memcmp(sa->i_id.data, tmp->i_id.data, vec_len(sa->i_id.data))) continue; if (sa->rspi != tmp->rspi) - vec_add1(delete, tmp - km->per_thread_data[cpu_index].sas); + vec_add1(delete, tmp - km->per_thread_data[thread_index].sas); })); /* *INDENT-ON* */ for (i = 0; i < vec_len (delete); i++) { - tmp = pool_elt_at_index (km->per_thread_data[cpu_index].sas, delete[i]); - vec_foreach (c, tmp->childs) - ikev2_delete_tunnel_interface (km->vnet_main, tmp, c); + tmp = + pool_elt_at_index (km->per_thread_data[thread_index].sas, delete[i]); + vec_foreach (c, + tmp->childs) ikev2_delete_tunnel_interface (km->vnet_main, + tmp, c); ikev2_delete_sa (tmp); } @@ -1922,10 +1924,10 @@ ikev2_retransmit_sa_init (ike_header_t * ike, { ikev2_main_t *km = &ikev2_main; ikev2_sa_t *sa; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* *INDENT-OFF* */ - pool_foreach (sa, km->per_thread_data[cpu_index].sas, ({ + pool_foreach (sa, km->per_thread_data[thread_index].sas, ({ if (sa->ispi == clib_net_to_host_u64(ike->ispi) && sa->iaddr.as_u32 == iaddr.as_u32 && sa->raddr.as_u32 == raddr.as_u32) @@ -2036,7 +2038,7 @@ ikev2_node_fn (vlib_main_t * vm, u32 n_left_from, *from, *to_next; ikev2_next_t next_index; ikev2_main_t *km = &ikev2_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -2134,11 +2136,14 @@ ikev2_node_fn (vlib_main_t * vm, if (sa0->state == IKEV2_STATE_SA_INIT) { /* add SA to the pool */ - pool_get (km->per_thread_data[cpu_index].sas, sa0); + pool_get (km->per_thread_data[thread_index].sas, + sa0); clib_memcpy (sa0, &sa, sizeof (*sa0)); - hash_set (km->per_thread_data[cpu_index].sa_by_rspi, + hash_set (km-> + per_thread_data[thread_index].sa_by_rspi, sa0->rspi, - sa0 - km->per_thread_data[cpu_index].sas); + sa0 - + km->per_thread_data[thread_index].sas); } else { @@ -2169,11 +2174,11 @@ ikev2_node_fn (vlib_main_t * vm, if (sa0->state == IKEV2_STATE_SA_INIT) { /* add SA to the pool */ - pool_get (km->per_thread_data[cpu_index].sas, sa0); + pool_get (km->per_thread_data[thread_index].sas, sa0); clib_memcpy (sa0, &sa, sizeof (*sa0)); - hash_set (km->per_thread_data[cpu_index].sa_by_rspi, + hash_set (km->per_thread_data[thread_index].sa_by_rspi, sa0->rspi, - sa0 - km->per_thread_data[cpu_index].sas); + sa0 - km->per_thread_data[thread_index].sas); } else { @@ -2184,12 +2189,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_IKE_AUTH) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) @@ -2240,12 +2246,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_INFORMATIONAL) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) @@ -2305,12 +2312,13 @@ ikev2_node_fn (vlib_main_t * vm, else if (ike0->exchange == IKEV2_EXCHANGE_CREATE_CHILD_SA) { uword *p; - p = hash_get (km->per_thread_data[cpu_index].sa_by_rspi, + p = hash_get (km->per_thread_data[thread_index].sa_by_rspi, clib_net_to_host_u64 (ike0->rspi)); if (p) { - sa0 = pool_elt_at_index (km->per_thread_data[cpu_index].sas, - p[0]); + sa0 = + pool_elt_at_index (km->per_thread_data[thread_index].sas, + p[0]); r = ikev2_retransmit_resp (sa0, ike0); if (r == 1) diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 58f0f145..c884e360 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -324,21 +324,21 @@ int ipsec_set_interface_key (vnet_main_t * vnm, u32 hw_if_index, always_inline void ipsec_alloc_empty_buffers (vlib_main_t * vm, ipsec_main_t * im) { - u32 cpu_index = os_get_cpu_number (); - uword l = vec_len (im->empty_buffers[cpu_index]); + u32 thread_index = vlib_get_thread_index (); + uword l = vec_len (im->empty_buffers[thread_index]); uword n_alloc = 0; if (PREDICT_FALSE (l < VLIB_FRAME_SIZE)) { - if (!im->empty_buffers[cpu_index]) + if (!im->empty_buffers[thread_index]) { - vec_alloc (im->empty_buffers[cpu_index], 2 * VLIB_FRAME_SIZE); + vec_alloc (im->empty_buffers[thread_index], 2 * VLIB_FRAME_SIZE); } - n_alloc = vlib_buffer_alloc (vm, im->empty_buffers[cpu_index] + l, + n_alloc = vlib_buffer_alloc (vm, im->empty_buffers[thread_index] + l, 2 * VLIB_FRAME_SIZE - l); - _vec_len (im->empty_buffers[cpu_index]) = l + n_alloc; + _vec_len (im->empty_buffers[thread_index]) = l + n_alloc; } } diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c index dc882004..ed124894 100644 --- a/src/vnet/ipsec/ipsec_if.c +++ b/src/vnet/ipsec/ipsec_if.c @@ -99,7 +99,7 @@ static int ipsec_add_del_tunnel_if_rpc_callback (ipsec_add_del_tunnel_args_t * a) { vnet_main_t *vnm = vnet_get_main (); - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); return ipsec_add_del_tunnel_if_internal (vnm, a); } diff --git a/src/vnet/l2/l2_bvi.h b/src/vnet/l2/l2_bvi.h index dd1130a6..e21a1616 100644 --- a/src/vnet/l2/l2_bvi.h +++ b/src/vnet/l2/l2_bvi.h @@ -97,7 +97,7 @@ l2_to_bvi (vlib_main_t * vlib_main, vlib_increment_combined_counter (vnet_main->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - vlib_main->cpu_index, + vlib_main->thread_index, vnet_buffer (b0)->sw_if_index[VLIB_RX], 1, vlib_buffer_length_in_chain (vlib_main, b0)); return TO_BVI_ERR_OK; diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c index 041ff38d..e5d6878a 100644 --- a/src/vnet/l2/l2_input.c +++ b/src/vnet/l2/l2_input.c @@ -117,7 +117,7 @@ typedef enum static_always_inline void classify_and_dispatch (vlib_main_t * vm, vlib_node_runtime_t * node, - u32 cpu_index, + u32 thread_index, l2input_main_t * msm, vlib_buffer_t * b0, u32 * next0) { /* @@ -237,7 +237,7 @@ l2input_node_inline (vlib_main_t * vm, u32 n_left_from, *from, *to_next; l2input_next_t next_index; l2input_main_t *msm = &l2input_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; /* number of packets to process */ @@ -350,10 +350,10 @@ l2input_node_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, l2input_node.index, L2INPUT_ERROR_L2INPUT, 4); - classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0); - classify_and_dispatch (vm, node, cpu_index, msm, b1, &next1); - classify_and_dispatch (vm, node, cpu_index, msm, b2, &next2); - classify_and_dispatch (vm, node, cpu_index, msm, b3, &next3); + classify_and_dispatch (vm, node, thread_index, msm, b0, &next0); + classify_and_dispatch (vm, node, thread_index, msm, b1, &next1); + classify_and_dispatch (vm, node, thread_index, msm, b2, &next2); + classify_and_dispatch (vm, node, thread_index, msm, b3, &next3); /* verify speculative enqueues, maybe switch current next frame */ /* if next0==next1==next_index then nothing special needs to be done */ @@ -393,7 +393,7 @@ l2input_node_inline (vlib_main_t * vm, vlib_node_increment_counter (vm, l2input_node.index, L2INPUT_ERROR_L2INPUT, 1); - classify_and_dispatch (vm, node, cpu_index, msm, b0, &next0); + classify_and_dispatch (vm, node, thread_index, msm, b0, &next0); /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, diff --git a/src/vnet/l2/l2_output.c b/src/vnet/l2/l2_output.c index 00f22571..e17b2a16 100644 --- a/src/vnet/l2/l2_output.c +++ b/src/vnet/l2/l2_output.c @@ -643,11 +643,11 @@ l2output_create_output_node_mapping (vlib_main_t * vlib_main, vnet_main_t * vnet hw0 = vnet_get_sup_hw_interface (vnet_main, sw_if_index); - uword cpu_number; + uword thread_index; - cpu_number = os_get_cpu_number (); + thread_index = vlib_get_thread_index (); - if (cpu_number) + if (thread_index) { u32 oldflags; diff --git a/src/vnet/l2tp/decap.c b/src/vnet/l2tp/decap.c index e8986935..46104129 100644 --- a/src/vnet/l2tp/decap.c +++ b/src/vnet/l2tp/decap.c @@ -149,7 +149,7 @@ last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi) /* per-mapping byte stats include the ethernet header */ vlib_increment_combined_counter (&lm->counter_main, - os_get_cpu_number (), + vlib_get_thread_index (), counter_index, 1 /* packet_increment */ , vlib_buffer_length_in_chain (vm, b) + sizeof (ethernet_header_t)); diff --git a/src/vnet/l2tp/encap.c b/src/vnet/l2tp/encap.c index ed7a9580..dcdfde4b 100644 --- a/src/vnet/l2tp/encap.c +++ b/src/vnet/l2tp/encap.c @@ -124,7 +124,7 @@ last_stage (vlib_main_t * vm, vlib_node_runtime_t * node, u32 bi) /* per-mapping byte stats include the ethernet header */ vlib_increment_combined_counter (&lm->counter_main, - os_get_cpu_number (), + vlib_get_thread_index (), counter_index, 1 /* packet_increment */ , vlib_buffer_length_in_chain (vm, b)); diff --git a/src/vnet/l2tp/l2tp.c b/src/vnet/l2tp/l2tp.c index cb94d7e7..3dedc447 100644 --- a/src/vnet/l2tp/l2tp.c +++ b/src/vnet/l2tp/l2tp.c @@ -157,7 +157,7 @@ test_counters_command_fn (vlib_main_t * vm, u32 session_index; u32 counter_index; u32 nincr = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); /* *INDENT-OFF* */ pool_foreach (session, lm->sessions, @@ -167,11 +167,11 @@ test_counters_command_fn (vlib_main_t * vm, session_index_to_counter_index (session_index, SESSION_COUNTER_USER_TO_NETWORK); vlib_increment_combined_counter (&lm->counter_main, - cpu_index, + thread_index, counter_index, 1/*pkt*/, 1111 /*bytes*/); vlib_increment_combined_counter (&lm->counter_main, - cpu_index, + thread_index, counter_index+1, 1/*pkt*/, 2222 /*bytes*/); nincr++; diff --git a/src/vnet/lisp-gpe/decap.c b/src/vnet/lisp-gpe/decap.c index d887a95f..68769710 100644 --- a/src/vnet/lisp-gpe/decap.c +++ b/src/vnet/lisp-gpe/decap.c @@ -103,7 +103,7 @@ next_index_to_iface (lisp_gpe_main_t * lgm, u32 next_index) } static_always_inline void -incr_decap_stats (vnet_main_t * vnm, u32 cpu_index, u32 length, +incr_decap_stats (vnet_main_t * vnm, u32 thread_index, u32 length, u32 sw_if_index, u32 * last_sw_if_index, u32 * n_packets, u32 * n_bytes) { @@ -122,7 +122,7 @@ incr_decap_stats (vnet_main_t * vnm, u32 cpu_index, u32 length, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, *last_sw_if_index, + thread_index, *last_sw_if_index, *n_packets, *n_bytes); } *last_sw_if_index = sw_if_index; @@ -150,11 +150,11 @@ static uword lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 is_v4) { - u32 n_left_from, next_index, *from, *to_next, cpu_index; + u32 n_left_from, next_index, *from, *to_next, thread_index; u32 n_bytes = 0, n_packets = 0, last_sw_if_index = ~0, drops = 0; lisp_gpe_main_t *lgm = vnet_lisp_gpe_get_main (); - cpu_index = os_get_cpu_number (); + thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -267,7 +267,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si0) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b0), si0[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0]; @@ -282,7 +282,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si1) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b1), si1[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b1)->sw_if_index[VLIB_RX] = si1[0]; @@ -397,7 +397,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (si0) { - incr_decap_stats (lgm->vnet_main, cpu_index, + incr_decap_stats (lgm->vnet_main, thread_index, vlib_buffer_length_in_chain (vm, b0), si0[0], &last_sw_if_index, &n_packets, &n_bytes); vnet_buffer (b0)->sw_if_index[VLIB_RX] = si0[0]; @@ -430,7 +430,7 @@ lisp_gpe_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, } /* flush iface stats */ - incr_decap_stats (lgm->vnet_main, cpu_index, 0, ~0, &last_sw_if_index, + incr_decap_stats (lgm->vnet_main, thread_index, 0, ~0, &last_sw_if_index, &n_packets, &n_bytes); vlib_node_increment_counter (vm, lisp_gpe_ip4_input_node.index, LISP_GPE_ERROR_NO_TUNNEL, drops); diff --git a/src/vnet/lldp/lldp_input.c b/src/vnet/lldp/lldp_input.c index 762743d0..e88f6fdb 100644 --- a/src/vnet/lldp/lldp_input.c +++ b/src/vnet/lldp/lldp_input.c @@ -35,7 +35,7 @@ typedef struct static void lldp_rpc_update_peer_cb (const lldp_intf_update_t * a) { - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); lldp_intf_t *n = lldp_get_intf (&lldp_main, a->hw_if_index); if (!n) diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c index 1a20d704..e39b6f14 100644 --- a/src/vnet/map/ip4_map.c +++ b/src/vnet/map/ip4_map.c @@ -248,7 +248,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) next_index = node->cached_next_index; map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -377,7 +377,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip6h0->payload_length) + @@ -409,7 +409,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip41) ? IP4_MAP_NEXT_IP6_REWRITE : next1; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index1, 1, clib_net_to_host_u16 (ip6h1->payload_length) + @@ -520,7 +520,7 @@ ip4_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) ip40) ? IP4_MAP_NEXT_IP6_REWRITE : next0; vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip6h0->payload_length) + @@ -564,7 +564,7 @@ ip4_map_reass (vlib_main_t * vm, next_index = node->cached_next_index; map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 *fragments_to_drop = NULL; u32 *fragments_to_loopback = NULL; @@ -694,8 +694,8 @@ ip4_map_reass (vlib_main_t * vm, { if (error0 == MAP_ERROR_NONE) vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, map_domain_index0, - 1, + thread_index, + map_domain_index0, 1, clib_net_to_host_u16 (ip60->payload_length) + 40); next0 = diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c index b63d76bf..5f2bcbf9 100644 --- a/src/vnet/map/ip4_map_t.c +++ b/src/vnet/map/ip4_map_t.c @@ -477,7 +477,7 @@ ip4_map_t_icmp (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -520,7 +520,7 @@ ip4_map_t_icmp (vlib_main_t * vm, if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, len0); } @@ -1051,7 +1051,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -1158,7 +1158,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip40-> @@ -1169,7 +1169,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p1)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip41-> @@ -1252,7 +1252,7 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, - cpu_index, + thread_index, vnet_buffer (p0)->map_t. map_domain_index, 1, clib_net_to_host_u16 (ip40-> diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c index f7eb768f..63ada962 100644 --- a/src/vnet/map/ip6_map.c +++ b/src/vnet/map/ip6_map.c @@ -172,7 +172,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_get_runtime (vm, ip6_map_node.index); map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -319,7 +319,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next0; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); @@ -352,7 +352,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next1; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index1, 1, clib_net_to_host_u16 (ip41->length)); @@ -505,7 +505,7 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) IP6_MAP_NEXT_IP4_REWRITE : next0; } vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); @@ -820,7 +820,7 @@ ip6_map_ip4_reass (vlib_main_t * vm, vlib_node_get_runtime (vm, ip6_map_ip4_reass_node.index); map_main_t *mm = &map_main; vlib_combined_counter_main_t *cm = mm->domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 *fragments_to_drop = NULL; u32 *fragments_to_loopback = NULL; @@ -958,8 +958,8 @@ ip6_map_ip4_reass (vlib_main_t * vm, { if (error0 == MAP_ERROR_NONE) vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, map_domain_index0, - 1, + thread_index, + map_domain_index0, 1, clib_net_to_host_u16 (ip40->length)); next0 = @@ -1015,7 +1015,7 @@ ip6_map_icmp_relay (vlib_main_t * vm, vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_icmp_relay_node.index); map_main_t *mm = &map_main; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u16 *fragment_ids, *fid; from = vlib_frame_vector_args (frame); @@ -1143,7 +1143,8 @@ ip6_map_icmp_relay (vlib_main_t * vm, ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20); new_icmp40->checksum = ~ip_csum_fold (sum); - vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1); + vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0, + 1); error: if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/map/ip6_map_t.c b/src/vnet/map/ip6_map_t.c index eb3996c2..99151678 100644 --- a/src/vnet/map/ip6_map_t.c +++ b/src/vnet/map/ip6_map_t.c @@ -448,7 +448,7 @@ ip6_map_t_icmp (vlib_main_t * vm, n_left_from = frame->n_vectors; next_index = node->cached_next_index; vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -493,7 +493,7 @@ ip6_map_t_icmp (vlib_main_t * vm, if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, len0); @@ -1051,7 +1051,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip6_map_t_node.index); vlib_combined_counter_main_t *cm = map_main.domain_counters; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; @@ -1218,7 +1218,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 @@ -1229,7 +1229,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p1)-> map_t.map_domain_index, 1, clib_net_to_host_u16 @@ -1403,7 +1403,7 @@ ip6_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, - cpu_index, + thread_index, vnet_buffer (p0)-> map_t.map_domain_index, 1, clib_net_to_host_u16 diff --git a/src/vnet/mpls/mpls_input.c b/src/vnet/mpls/mpls_input.c index 893c4511..1b9bdd05 100644 --- a/src/vnet/mpls/mpls_input.c +++ b/src/vnet/mpls/mpls_input.c @@ -76,7 +76,7 @@ mpls_input_inline (vlib_main_t * vm, u32 n_left_from, next_index, * from, * to_next; mpls_input_runtime_t * rt; mpls_main_t * mm; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); vlib_simple_counter_main_t * cm; vnet_main_t * vnm = vnet_get_main(); @@ -151,7 +151,7 @@ mpls_input_inline (vlib_main_t * vm, next0 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); } if (PREDICT_FALSE(h1[3] == 0)) @@ -164,7 +164,7 @@ mpls_input_inline (vlib_main_t * vm, next1 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index1, &next1, b1); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index1, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index1, 1); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) @@ -215,7 +215,7 @@ mpls_input_inline (vlib_main_t * vm, { next0 = MPLS_INPUT_NEXT_LOOKUP; vnet_feature_arc_start(mm->input_feature_arc_index, sw_if_index0, &next0, b0); - vlib_increment_simple_counter (cm, cpu_index, sw_if_index0, 1); + vlib_increment_simple_counter (cm, thread_index, sw_if_index0, 1); } if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) diff --git a/src/vnet/mpls/mpls_lookup.c b/src/vnet/mpls/mpls_lookup.c index 475bb204..ace6a70f 100644 --- a/src/vnet/mpls/mpls_lookup.c +++ b/src/vnet/mpls/mpls_lookup.c @@ -67,7 +67,7 @@ mpls_lookup (vlib_main_t * vm, vlib_combined_counter_main_t * cm = &load_balance_main.lbm_to_counters; u32 n_left_from, next_index, * from, * to_next; mpls_main_t * mm = &mpls_main; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -220,16 +220,16 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer (b3)->ip.adj_index[VLIB_TX] = dpo3->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter - (cm, cpu_index, lbi2, 1, + (cm, thread_index, lbi2, 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter - (cm, cpu_index, lbi3, 1, + (cm, thread_index, lbi3, 1, vlib_buffer_length_in_chain (vm, b3)); /* @@ -351,7 +351,7 @@ mpls_lookup (vlib_main_t * vm, vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b0)); /* @@ -440,7 +440,7 @@ mpls_load_balance (vlib_main_t * vm, { vlib_combined_counter_main_t * cm = &load_balance_main.lbm_via_counters; u32 n_left_from, n_left_to_next, * from, * to_next; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 next; from = vlib_frame_vector_args (frame); @@ -536,10 +536,10 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_increment_combined_counter - (cm, cpu_index, lbi1, 1, + (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1)); if (PREDICT_FALSE(p0->flags & VLIB_BUFFER_IS_TRACED)) @@ -597,7 +597,7 @@ mpls_load_balance (vlib_main_t * vm, vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index; vlib_increment_combined_counter - (cm, cpu_index, lbi0, 1, + (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0)); vlib_validate_buffer_enqueue_x1 (vm, node, next, diff --git a/src/vnet/mpls/mpls_output.c b/src/vnet/mpls/mpls_output.c index 08018fd1..d90dec21 100644 --- a/src/vnet/mpls/mpls_output.c +++ b/src/vnet/mpls/mpls_output.c @@ -64,12 +64,12 @@ mpls_output_inline (vlib_main_t * vm, vlib_frame_t * from_frame, int is_midchain) { - u32 n_left_from, next_index, * from, * to_next, cpu_index; + u32 n_left_from, next_index, * from, * to_next, thread_index; vlib_node_runtime_t * error_node; u32 n_left_to_next; mpls_main_t *mm; - cpu_index = os_get_cpu_number(); + thread_index = vlib_get_thread_index(); error_node = vlib_node_get_runtime (vm, mpls_output_node.index); from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -137,13 +137,13 @@ mpls_output_inline (vlib_main_t * vm, /* Bump the adj counters for packet and bytes */ vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index1, 1, vlib_buffer_length_in_chain (vm, p1) + rw_len1); @@ -245,7 +245,7 @@ mpls_output_inline (vlib_main_t * vm, vlib_increment_combined_counter (&adjacency_counters, - cpu_index, + thread_index, adj_index0, 1, vlib_buffer_length_in_chain (vm, p0) + rw_len0); diff --git a/src/vnet/pg/input.c b/src/vnet/pg/input.c index 2649798b..597ae060 100644 --- a/src/vnet/pg/input.c +++ b/src/vnet/pg/input.c @@ -893,7 +893,7 @@ pg_generate_set_lengths (pg_main_t * pg, vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), + vlib_get_thread_index (), si->sw_if_index, n_buffers, length_sum); } @@ -1266,7 +1266,7 @@ pg_stream_fill_helper (pg_main_t * pg, l += vlib_buffer_index_length_in_chain (vm, buffers[i]); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number (), + vlib_get_thread_index (), si->sw_if_index, n_alloc, l); s->current_replay_packet_index += n_alloc; s->current_replay_packet_index %= diff --git a/src/vnet/replication.c b/src/vnet/replication.c index 86d922b5..233a8c2f 100644 --- a/src/vnet/replication.c +++ b/src/vnet/replication.c @@ -31,16 +31,16 @@ replication_prep (vlib_main_t * vm, { replication_main_t *rm = &replication_main; replication_context_t *ctx; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; ip4_header_t *ip; u32 ctx_id; /* Allocate a context, reserve context 0 */ - if (PREDICT_FALSE (rm->contexts[cpu_number] == 0)) - pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES); + if (PREDICT_FALSE (rm->contexts[thread_index] == 0)) + pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES); - pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES); - ctx_id = ctx - rm->contexts[cpu_number]; + pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES); + ctx_id = ctx - rm->contexts[thread_index]; /* Save state from vlib buffer */ ctx->saved_free_list_index = b0->free_list_index; @@ -94,11 +94,11 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last) { replication_main_t *rm = &replication_main; replication_context_t *ctx; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; ip4_header_t *ip; /* Get access to the replication context */ - ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count); + ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); /* Restore vnet buffer state */ clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer, @@ -133,7 +133,7 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last) b0->flags &= ~VLIB_BUFFER_RECYCLE; /* Free context back to its pool */ - pool_put (rm->contexts[cpu_number], ctx); + pool_put (rm->contexts[thread_index], ctx); } return ctx; @@ -160,7 +160,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) replication_main_t *rm = &replication_main; replication_context_t *ctx; u32 feature_node_index = 0; - uword cpu_number = vm->cpu_index; + uword thread_index = vm->thread_index; /* * All buffers in the list are destined to the same recycle node. @@ -172,7 +172,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) { bi0 = fl->buffers[0]; b0 = vlib_get_buffer (vm, bi0); - ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count); + ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count); feature_node_index = ctx->recycle_node_index; } diff --git a/src/vnet/replication.h b/src/vnet/replication.h index 5dc554c9..ce4b3ff1 100644 --- a/src/vnet/replication.h +++ b/src/vnet/replication.h @@ -100,7 +100,7 @@ replication_get_ctx (vlib_buffer_t * b0) replication_main_t *rm = &replication_main; return replication_is_recycled (b0) ? - pool_elt_at_index (rm->contexts[os_get_cpu_number ()], + pool_elt_at_index (rm->contexts[vlib_get_thread_index ()], b0->recycle_count) : 0; } diff --git a/src/vnet/session/node.c b/src/vnet/session/node.c index b86e87d9..dd211c51 100644 --- a/src/vnet/session/node.c +++ b/src/vnet/session/node.c @@ -311,7 +311,7 @@ session_queue_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, unix_shared_memory_queue_t *q; application_t *app; int n_tx_packets = 0; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; int i, rv; f64 now = vlib_time_now (vm); diff --git a/src/vnet/sr/sr_localsid.c b/src/vnet/sr/sr_localsid.c index 2e3d56de..6d72a506 100755 --- a/src/vnet/sr/sr_localsid.c +++ b/src/vnet/sr/sr_localsid.c @@ -887,7 +887,7 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -974,26 +974,26 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (((next1 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls1 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b1)); + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter (((next2 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls2 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b2)); + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter (((next3 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls3 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b3)); + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, @@ -1062,8 +1062,8 @@ sr_localsid_d_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); @@ -1103,7 +1103,7 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; next_index = node->cached_next_index; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); while (n_left_from > 0) { @@ -1205,26 +1205,26 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_increment_combined_counter (((next1 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls1 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b1)); + &(sm->sr_ls_valid_counters)), thread_index, ls1 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b1)); vlib_increment_combined_counter (((next2 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls2 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b2)); + &(sm->sr_ls_valid_counters)), thread_index, ls2 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b2)); vlib_increment_combined_counter (((next3 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls3 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b3)); + &(sm->sr_ls_valid_counters)), thread_index, ls3 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b3)); vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, @@ -1295,8 +1295,8 @@ sr_localsid_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (((next0 == SR_LOCALSID_NEXT_ERROR) ? &(sm->sr_ls_invalid_counters) : - &(sm->sr_ls_valid_counters)), cpu_index, ls0 - sm->localsids, 1, - vlib_buffer_length_in_chain (vm, b0)); + &(sm->sr_ls_valid_counters)), thread_index, ls0 - sm->localsids, + 1, vlib_buffer_length_in_chain (vm, b0)); vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); diff --git a/src/vnet/tcp/builtin_client.c b/src/vnet/tcp/builtin_client.c index e3705060..c1567aa0 100644 --- a/src/vnet/tcp/builtin_client.c +++ b/src/vnet/tcp/builtin_client.c @@ -174,7 +174,7 @@ tclient_thread_fn (void *arg) pthread_sigmask (SIG_SETMASK, &s, 0); } - clib_per_cpu_mheaps[os_get_cpu_number ()] = clib_per_cpu_mheaps[0]; + clib_per_cpu_mheaps[vlib_get_thread_index ()] = clib_per_cpu_mheaps[0]; while (1) { diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c index b2a371e2..b6c34828 100644 --- a/src/vnet/tcp/tcp.c +++ b/src/vnet/tcp/tcp.c @@ -646,10 +646,10 @@ const static transport_proto_vft_t tcp6_proto = { void tcp_timer_keep_handler (u32 conn_index) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; - tc = tcp_connection_get (conn_index, cpu_index); + tc = tcp_connection_get (conn_index, thread_index); tc->timers[TCP_TIMER_KEEP] = TCP_TIMER_HANDLE_INVALID; tcp_connection_close (tc); @@ -675,10 +675,10 @@ tcp_timer_establish_handler (u32 conn_index) void tcp_timer_waitclose_handler (u32 conn_index) { - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; - tc = tcp_connection_get (conn_index, cpu_index); + tc = tcp_connection_get (conn_index, thread_index); tc->timers[TCP_TIMER_WAITCLOSE] = TCP_TIMER_HANDLE_INVALID; /* Session didn't come back with a close(). Send FIN either way diff --git a/src/vnet/tcp/tcp_debug.h b/src/vnet/tcp/tcp_debug.h index 0090e15e..eaca672c 100644 --- a/src/vnet/tcp/tcp_debug.h +++ b/src/vnet/tcp/tcp_debug.h @@ -343,7 +343,7 @@ typedef enum _tcp_dbg_evt } \ else \ { \ - u32 _thread_index = os_get_cpu_number (); \ + u32 _thread_index = vlib_get_thread_index (); \ _tc = tcp_connection_get (_tc_index, _thread_index); \ } \ ELOG_TYPE_DECLARE (_e) = \ diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c index a8224dc2..7e9fa47b 100644 --- a/src/vnet/tcp/tcp_input.c +++ b/src/vnet/tcp/tcp_input.c @@ -1142,7 +1142,7 @@ tcp46_established_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; tcp_main_t *tm = vnet_get_tcp_main (); from = vlib_frame_vector_args (from_frame); @@ -1332,7 +1332,7 @@ tcp46_syn_sent_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; from = vlib_frame_vector_args (from_frame); @@ -1634,7 +1634,7 @@ tcp46_rcv_process_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { tcp_main_t *tm = vnet_get_tcp_main (); u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index, errors = 0; + u32 my_thread_index = vm->thread_index, errors = 0; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1989,7 +1989,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; tcp_main_t *tm = vnet_get_tcp_main (); u8 sst = is_ip4 ? SESSION_TYPE_IP4_TCP : SESSION_TYPE_IP6_TCP; @@ -2243,7 +2243,7 @@ tcp46_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; tcp_main_t *tm = vnet_get_tcp_main (); from = vlib_frame_vector_args (from_frame); diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c index ea157bd7..e18bfad7 100644 --- a/src/vnet/tcp/tcp_output.c +++ b/src/vnet/tcp/tcp_output.c @@ -387,8 +387,8 @@ tcp_make_options (tcp_connection_t * tc, tcp_options_t * opts, #define tcp_get_free_buffer_index(tm, bidx) \ do { \ u32 *my_tx_buffers, n_free_buffers; \ - u32 cpu_index = os_get_cpu_number(); \ - my_tx_buffers = tm->tx_buffers[cpu_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ if (PREDICT_FALSE(vec_len (my_tx_buffers) == 0)) \ { \ n_free_buffers = 32; /* TODO config or macro */ \ @@ -396,7 +396,7 @@ do { \ _vec_len(my_tx_buffers) = vlib_buffer_alloc_from_free_list ( \ tm->vlib_main, my_tx_buffers, n_free_buffers, \ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); \ - tm->tx_buffers[cpu_index] = my_tx_buffers; \ + tm->tx_buffers[thread_index] = my_tx_buffers; \ } \ /* buffer shortage */ \ if (PREDICT_FALSE (vec_len (my_tx_buffers) == 0)) \ @@ -408,8 +408,8 @@ do { \ #define tcp_return_buffer(tm) \ do { \ u32 *my_tx_buffers; \ - u32 cpu_index = os_get_cpu_number(); \ - my_tx_buffers = tm->tx_buffers[cpu_index]; \ + u32 thread_index = vlib_get_thread_index(); \ + my_tx_buffers = tm->tx_buffers[thread_index]; \ _vec_len (my_tx_buffers) +=1; \ } while (0) @@ -942,7 +942,7 @@ tcp_send_ack (tcp_connection_t * tc) void tcp_timer_delack_handler (u32 index) { - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; tc = tcp_connection_get (index, thread_index); @@ -1022,7 +1022,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; u32 bi, snd_space, n_bytes; @@ -1152,7 +1152,7 @@ tcp_timer_persist_handler (u32 index) { tcp_main_t *tm = vnet_get_tcp_main (); vlib_main_t *vm = vlib_get_main (); - u32 thread_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); tcp_connection_t *tc; vlib_buffer_t *b; u32 bi, n_bytes; @@ -1313,7 +1313,7 @@ tcp46_output_inline (vlib_main_t * vm, vlib_frame_t * from_frame, int is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; @@ -1524,7 +1524,7 @@ tcp46_send_reset_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u8 is_ip4) { u32 n_left_from, next_index, *from, *to_next; - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; diff --git a/src/vnet/udp/udp_input.c b/src/vnet/udp/udp_input.c index 4b22109b..810278e6 100644 --- a/src/vnet/udp/udp_input.c +++ b/src/vnet/udp/udp_input.c @@ -70,7 +70,7 @@ udp4_uri_input_node_fn (vlib_main_t * vm, udp4_uri_input_next_t next_index; udp_uri_main_t *um = vnet_get_udp_main (); session_manager_main_t *smm = vnet_get_session_manager_main (); - u32 my_thread_index = vm->cpu_index; + u32 my_thread_index = vm->thread_index; u8 my_enqueue_epoch; u32 *session_indices_to_enqueue; static u32 serial_number; diff --git a/src/vnet/unix/tapcli.c b/src/vnet/unix/tapcli.c index fb1a8bac..0fc62f6c 100644 --- a/src/vnet/unix/tapcli.c +++ b/src/vnet/unix/tapcli.c @@ -366,7 +366,7 @@ static uword tapcli_rx_iface(vlib_main_t * vm, vlib_increment_combined_counter ( vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number(), ti->sw_if_index, + vlib_get_thread_index(), ti->sw_if_index, 1, n_bytes_in_packet); if (PREDICT_FALSE(n_trace > 0)) { diff --git a/src/vnet/unix/tuntap.c b/src/vnet/unix/tuntap.c index 2cfcc92f..ac674653 100644 --- a/src/vnet/unix/tuntap.c +++ b/src/vnet/unix/tuntap.c @@ -189,7 +189,7 @@ tuntap_tx (vlib_main_t * vm, /* Update tuntap interface output stats. */ vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - vm->cpu_index, + vm->thread_index, tm->sw_if_index, n_packets, n_bytes); @@ -297,7 +297,7 @@ tuntap_rx (vlib_main_t * vm, vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - os_get_cpu_number(), + vlib_get_thread_index(), tm->sw_if_index, 1, n_bytes_in_packet); diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c index 22ab4b62..d4fe4231 100644 --- a/src/vnet/vxlan-gpe/decap.c +++ b/src/vnet/vxlan-gpe/decap.c @@ -115,7 +115,7 @@ vxlan_gpe_input (vlib_main_t * vm, vxlan4_gpe_tunnel_key_t last_key4; vxlan6_gpe_tunnel_key_t last_key6; u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; if (is_ip4) @@ -342,7 +342,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -427,7 +427,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len1; stats_sw_if_index = sw_if_index1; @@ -588,7 +588,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -615,7 +615,7 @@ vxlan_gpe_input (vlib_main_t * vm, if (stats_n_packets) { vlib_increment_combined_counter ( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, cpu_index, + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan-gpe/encap.c b/src/vnet/vxlan-gpe/encap.c index 3a486e56..67ed94b4 100644 --- a/src/vnet/vxlan-gpe/encap.c +++ b/src/vnet/vxlan-gpe/encap.c @@ -151,7 +151,7 @@ vxlan_gpe_encap (vlib_main_t * vm, vnet_main_t * vnm = ngm->vnet_main; vnet_interface_main_t * im = &vnm->interface_main; u32 pkts_encapsulated = 0; - u32 cpu_index = os_get_cpu_number (); + u32 thread_index = vlib_get_thread_index (); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; from = vlib_frame_vector_args (from_frame); @@ -253,7 +253,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_sw_if_index = sw_if_index0; stats_n_packets = 2; stats_n_bytes = len0 + len1; @@ -262,10 +262,10 @@ vxlan_gpe_encap (vlib_main_t * vm, { vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); + thread_index, sw_if_index0, 1, len0); vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); + thread_index, sw_if_index1, 1, len1); } } @@ -335,7 +335,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter ( im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; stats_sw_if_index = sw_if_index0; @@ -359,7 +359,7 @@ vxlan_gpe_encap (vlib_main_t * vm, if (stats_n_packets) { vlib_increment_combined_counter ( - im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, cpu_index, + im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c index 514b2c99..2acb1f6f 100644 --- a/src/vnet/vxlan/decap.c +++ b/src/vnet/vxlan/decap.c @@ -81,7 +81,7 @@ vxlan_input (vlib_main_t * vm, vxlan4_tunnel_key_t last_key4; vxlan6_tunnel_key_t last_key6; u32 pkts_decapsulated = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; if (is_ip4) @@ -314,7 +314,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -468,7 +468,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len1; @@ -674,7 +674,7 @@ vxlan_input (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -711,7 +711,7 @@ vxlan_input (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vnet/vxlan/encap.c b/src/vnet/vxlan/encap.c index 5b63064a..4cfbbc23 100644 --- a/src/vnet/vxlan/encap.c +++ b/src/vnet/vxlan/encap.c @@ -77,7 +77,7 @@ vxlan_encap_inline (vlib_main_t * vm, vnet_interface_main_t * im = &vnm->interface_main; u32 pkts_encapsulated = 0; u16 old_l0 = 0, old_l1 = 0; - u32 cpu_index = os_get_cpu_number(); + u32 thread_index = vlib_get_thread_index(); u32 stats_sw_if_index, stats_n_packets, stats_n_bytes; u32 sw_if_index0 = 0, sw_if_index1 = 0; u32 next0 = 0, next1 = 0; @@ -301,7 +301,7 @@ vxlan_encap_inline (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_sw_if_index = sw_if_index0; stats_n_packets = 2; @@ -311,10 +311,10 @@ vxlan_encap_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index0, 1, len0); + thread_index, sw_if_index0, 1, len0); vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, sw_if_index1, 1, len1); + thread_index, sw_if_index1, 1, len1); } } @@ -464,7 +464,7 @@ vxlan_encap_inline (vlib_main_t * vm, if (stats_n_packets) vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); stats_n_packets = 1; stats_n_bytes = len0; @@ -496,7 +496,7 @@ vxlan_encap_inline (vlib_main_t * vm, { vlib_increment_combined_counter (im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX, - cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); + thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes); node->runtime_data[0] = stats_sw_if_index; } diff --git a/src/vpp/stats/stats.c b/src/vpp/stats/stats.c index 042d02e2..4309cd51 100644 --- a/src/vpp/stats/stats.c +++ b/src/vpp/stats/stats.c @@ -66,14 +66,14 @@ _(VNET_IP6_NBR_COUNTERS, vnet_ip6_nbr_counters) void dslock (stats_main_t * sm, int release_hint, int tag) { - u32 thread_id; + u32 thread_index; data_structure_lock_t *l = sm->data_structure_lock; if (PREDICT_FALSE (l == 0)) return; - thread_id = os_get_cpu_number (); - if (l->lock && l->thread_id == thread_id) + thread_index = vlib_get_thread_index (); + if (l->lock && l->thread_index == thread_index) { l->count++; return; @@ -85,7 +85,7 @@ dslock (stats_main_t * sm, int release_hint, int tag) while (__sync_lock_test_and_set (&l->lock, 1)) /* zzzz */ ; l->tag = tag; - l->thread_id = thread_id; + l->thread_index = thread_index; l->count = 1; } @@ -99,14 +99,14 @@ stats_dslock_with_hint (int hint, int tag) void dsunlock (stats_main_t * sm) { - u32 thread_id; + u32 thread_index; data_structure_lock_t *l = sm->data_structure_lock; if (PREDICT_FALSE (l == 0)) return; - thread_id = os_get_cpu_number (); - ASSERT (l->lock && l->thread_id == thread_id); + thread_index = vlib_get_thread_index (); + ASSERT (l->lock && l->thread_index == thread_index); l->count--; if (l->count == 0) { diff --git a/src/vpp/stats/stats.h b/src/vpp/stats/stats.h index 118115be..024dc78e 100644 --- a/src/vpp/stats/stats.h +++ b/src/vpp/stats/stats.h @@ -30,7 +30,7 @@ typedef struct { volatile u32 lock; volatile u32 release_hint; - u32 thread_id; + u32 thread_index; u32 count; int tag; } data_structure_lock_t; -- cgit 1.2.3-korg From a0558307187ef2317f31e3e876a1a5e1faa2541c Mon Sep 17 00:00:00 2001 From: Neale Ranns Date: Thu, 13 Apr 2017 00:44:52 -0700 Subject: Remove unsed parameter from fib_table_entry_special_add() (only used in FIB tests). The DPO was incorrectly initialised with FIB_PROTO_MAX Change-Id: I962df9e162e4dfb6837a5ce79ea795d5ff2d7315 Signed-off-by: Neale Ranns --- src/plugins/ila/ila.c | 3 +-- src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c | 4 +--- src/plugins/lb/lb.c | 3 +-- src/vnet/dhcp/client.c | 3 +-- src/vnet/dhcp/dhcp4_proxy_node.c | 3 +-- src/vnet/fib/fib_bfd.c | 3 +-- src/vnet/fib/fib_path.c | 3 +-- src/vnet/fib/fib_table.c | 15 ++------------- src/vnet/fib/fib_table.h | 14 ++++++-------- src/vnet/fib/fib_test.c | 16 ++++++++-------- src/vnet/fib/ip4_fib.c | 3 +-- src/vnet/fib/ip6_fib.c | 6 ++---- src/vnet/gre/interface.c | 3 +-- src/vnet/ip/ip4_forward.c | 6 ++---- src/vnet/ip/ip4_source_check.c | 2 +- src/vnet/lisp-gpe/lisp_gpe_tunnel.c | 3 +-- src/vnet/map/map.c | 4 +--- src/vnet/vxlan/vxlan.c | 2 +- 18 files changed, 33 insertions(+), 63 deletions(-) (limited to 'src/vnet/map') diff --git a/src/plugins/ila/ila.c b/src/plugins/ila/ila.c index edbf3017..fd56043e 100644 --- a/src/plugins/ila/ila.c +++ b/src/plugins/ila/ila.c @@ -736,8 +736,7 @@ ila_add_del_entry (ila_add_del_entry_args_t * args) fib_table_entry_special_add(0, &next_hop, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); e->next_hop_child_index = fib_entry_child_add(e->next_hop_fib_entry_index, ila_fib_node_type, diff --git a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c index 88d7d205..cfc550cd 100644 --- a/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c +++ b/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam.c @@ -336,9 +336,7 @@ vxlan_gpe_enable_disable_ioam_for_dest (vlib_main_t * vm, t1->fib_entry_index = fib_table_entry_special_add (outer_fib_index, &tun_dst_pfx, - FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE); t1->sibling_index = fib_entry_child_add (t1->fib_entry_index, hm->fib_entry_type, t1 - hm->dst_tunnels); diff --git a/src/plugins/lb/lb.c b/src/plugins/lb/lb.c index addc2a42..cc3f8532 100644 --- a/src/plugins/lb/lb.c +++ b/src/plugins/lb/lb.c @@ -510,8 +510,7 @@ next: fib_table_entry_special_add(0, &nh, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); as->next_hop_child_index = fib_entry_child_add(as->next_hop_fib_entry_index, lbm->fib_node_type, diff --git a/src/vnet/dhcp/client.c b/src/vnet/dhcp/client.c index 29749a33..7c3f7f6a 100644 --- a/src/vnet/dhcp/client.c +++ b/src/vnet/dhcp/client.c @@ -781,8 +781,7 @@ int dhcp_client_add_del (dhcp_client_add_del_args_t * a) c->sw_if_index), &all_1s, FIB_SOURCE_DHCP, - FIB_ENTRY_FLAG_LOCAL, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_LOCAL); /* * enable the interface to RX IPv4 packets diff --git a/src/vnet/dhcp/dhcp4_proxy_node.c b/src/vnet/dhcp/dhcp4_proxy_node.c index 1c84881a..26e1e65c 100644 --- a/src/vnet/dhcp/dhcp4_proxy_node.c +++ b/src/vnet/dhcp/dhcp4_proxy_node.c @@ -807,8 +807,7 @@ dhcp4_proxy_set_server (ip46_address_t *addr, fib_table_entry_special_add(rx_fib_index, &all_1s, FIB_SOURCE_DHCP, - FIB_ENTRY_FLAG_LOCAL, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_LOCAL); fib_table_lock (rx_fib_index, FIB_PROTOCOL_IP4); } } diff --git a/src/vnet/fib/fib_bfd.c b/src/vnet/fib/fib_bfd.c index e5affb8d..734ee8cc 100644 --- a/src/vnet/fib/fib_bfd.c +++ b/src/vnet/fib/fib_bfd.c @@ -109,8 +109,7 @@ fib_bfd_notify (bfd_listen_event_e event, fei = fib_table_entry_special_add(key->fib_index, &pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); fib_entry_lock(fei); fed = fib_entry_delegate_find_or_add(fib_entry_get(fei), diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index cd7d9278..70c87905 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -1621,8 +1621,7 @@ fib_path_resolve (fib_node_index_t path_index) fei = fib_table_entry_special_add(path->recursive.fp_tbl_id, &pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); path = fib_path_get(path_index); path->fp_via_fib = fei; diff --git a/src/vnet/fib/fib_table.c b/src/vnet/fib/fib_table.c index b31f35e3..0938ce9b 100644 --- a/src/vnet/fib/fib_table.c +++ b/src/vnet/fib/fib_table.c @@ -371,23 +371,12 @@ fib_node_index_t fib_table_entry_special_add (u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_entry_flag_t flags, - adj_index_t adj_index) + fib_entry_flag_t flags) { fib_node_index_t fib_entry_index; dpo_id_t tmp_dpo = DPO_INVALID; - if (ADJ_INDEX_INVALID != adj_index) - { - dpo_set(&tmp_dpo, - DPO_ADJACENCY, - FIB_PROTOCOL_MAX, - adj_index); - } - else - { - dpo_copy(&tmp_dpo, drop_dpo_get(fib_proto_to_dpo(prefix->fp_proto))); - } + dpo_copy(&tmp_dpo, drop_dpo_get(fib_proto_to_dpo(prefix->fp_proto))); fib_entry_index = fib_table_entry_special_dpo_add(fib_index, prefix, source, flags, &tmp_dpo); diff --git a/src/vnet/fib/fib_table.h b/src/vnet/fib/fib_table.h index b310aea6..f24d28b7 100644 --- a/src/vnet/fib/fib_table.h +++ b/src/vnet/fib/fib_table.h @@ -126,14 +126,16 @@ extern fib_node_index_t fib_table_get_less_specific(u32 fib_index, /** * @brief - * Add a 'special' entry to the FIB that links to the adj passed + * Add a 'special' entry to the FIB. * A special entry is an entry that the FIB is not expect to resolve * via the usual mechanisms (i.e. recurisve or neighbour adj DB lookup). - * Instead the client/source provides the adj to link to. + * Instead the will link to a DPO valid for the source and/or the flags. * This add is reference counting per-source. So n 'removes' are required * for n 'adds', if the entry is no longer required. + * If the source needs to provide non-default forwarding use: + * fib_table_entry_special_dpo_add() * - * @param fib_index + * @param fib_index * The index of the FIB * * @param prefix @@ -145,17 +147,13 @@ extern fib_node_index_t fib_table_get_less_specific(u32 fib_index, * @param flags * Flags for the entry. * - * @param adj_index - * The adjacency to link to. - * * @return * the index of the fib_entry_t that is created (or exists already). */ extern fib_node_index_t fib_table_entry_special_add(u32 fib_index, const fib_prefix_t *prefix, fib_source_t source, - fib_entry_flag_t flags, - adj_index_t adj_index); + fib_entry_flag_t flags); /** * @brief diff --git a/src/vnet/fib/fib_test.c b/src/vnet/fib/fib_test.c index e4a8a70e..c58dc5a1 100644 --- a/src/vnet/fib/fib_test.c +++ b/src/vnet/fib/fib_test.c @@ -1378,8 +1378,8 @@ fib_test_v4 (void) fib_entry_pool_size()); /* - * An EXCLUSIVE route; one where the user (me) provides the exclusive - * adjacency through which the route will resovle + * An special route; one where the user (me) provides the + * adjacency through which the route will resovle by setting the flags */ fib_prefix_t ex_pfx = { .fp_len = 32, @@ -1393,11 +1393,12 @@ fib_test_v4 (void) fib_table_entry_special_add(fib_index, &ex_pfx, FIB_SOURCE_SPECIAL, - FIB_ENTRY_FLAG_EXCLUSIVE, - locked_ai); + FIB_ENTRY_FLAG_LOCAL); fei = fib_table_lookup_exact_match(fib_index, &ex_pfx); - FIB_TEST((ai == fib_entry_get_adj(fei)), - "Exclusive route links to user adj"); + dpo = fib_entry_contribute_ip_forwarding(fei); + dpo = load_balance_get_bucket(dpo->dpoi_index, 0); + FIB_TEST((DPO_RECEIVE == dpo->dpoi_type), + "local interface adj is local"); fib_table_entry_special_remove(fib_index, &ex_pfx, @@ -3675,8 +3676,7 @@ fib_test_v4 (void) fei = fib_table_entry_special_add(fib_index, &pfx_4_1_1_1_s_32, FIB_SOURCE_URPF_EXEMPT, - FIB_ENTRY_FLAG_DROP, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_DROP); dpo = fib_entry_contribute_ip_forwarding(fei); FIB_TEST(load_balance_is_drop(dpo), "uRPF exempt 4.1.1.1/32 DROP"); diff --git a/src/vnet/fib/ip4_fib.c b/src/vnet/fib/ip4_fib.c index b03186e8..8e92d851 100644 --- a/src/vnet/fib/ip4_fib.c +++ b/src/vnet/fib/ip4_fib.c @@ -149,8 +149,7 @@ ip4_create_fib_with_table_id (u32 table_id) fib_table_entry_special_add(fib_table->ft_index, &prefix, ip4_specials[ii].ift_source, - ip4_specials[ii].ift_flag, - ADJ_INDEX_INVALID); + ip4_specials[ii].ift_flag); } return (fib_table->ft_index); diff --git a/src/vnet/fib/ip6_fib.c b/src/vnet/fib/ip6_fib.c index 00297140..d00f4c55 100644 --- a/src/vnet/fib/ip6_fib.c +++ b/src/vnet/fib/ip6_fib.c @@ -35,8 +35,7 @@ vnet_ip6_fib_init (u32 fib_index) fib_table_entry_special_add(fib_index, &pfx, FIB_SOURCE_DEFAULT_ROUTE, - FIB_ENTRY_FLAG_DROP, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_DROP); /* * all link local for us @@ -47,8 +46,7 @@ vnet_ip6_fib_init (u32 fib_index) fib_table_entry_special_add(fib_index, &pfx, FIB_SOURCE_SPECIAL, - FIB_ENTRY_FLAG_LOCAL, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_LOCAL); } static u32 diff --git a/src/vnet/gre/interface.c b/src/vnet/gre/interface.c index 91a3899f..d574e596 100644 --- a/src/vnet/gre/interface.c +++ b/src/vnet/gre/interface.c @@ -427,8 +427,7 @@ vnet_gre_tunnel_add (vnet_gre_add_del_tunnel_args_t *a, fib_table_entry_special_add(outer_fib_index, &t->tunnel_dst, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); t->sibling_index = fib_entry_child_add(t->fib_entry_index, FIB_NODE_TYPE_GRE_TUNNEL, diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c index e42b3637..0f562037 100644 --- a/src/vnet/ip/ip4_forward.c +++ b/src/vnet/ip/ip4_forward.c @@ -732,16 +732,14 @@ ip4_add_interface_routes (u32 sw_if_index, &net_pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_DROP | - FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT), - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT)); net_pfx.fp_addr.ip4.as_u32 |= ~im->fib_masks[pfx.fp_len]; if (net_pfx.fp_addr.ip4.as_u32 != pfx.fp_addr.ip4.as_u32) fib_table_entry_special_add(fib_index, &net_pfx, FIB_SOURCE_INTERFACE, (FIB_ENTRY_FLAG_DROP | - FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT), - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT)); } else if (pfx.fp_len == 31) { diff --git a/src/vnet/ip/ip4_source_check.c b/src/vnet/ip/ip4_source_check.c index 63b7594d..17a1cb1b 100644 --- a/src/vnet/ip/ip4_source_check.c +++ b/src/vnet/ip/ip4_source_check.c @@ -509,7 +509,7 @@ ip_source_check_accept (vlib_main_t * vm, fib_table_entry_special_add (fib_index, &pfx, FIB_SOURCE_URPF_EXEMPT, - FIB_ENTRY_FLAG_DROP, ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_DROP); } else { diff --git a/src/vnet/lisp-gpe/lisp_gpe_tunnel.c b/src/vnet/lisp-gpe/lisp_gpe_tunnel.c index 444bfe14..dd6c6fdd 100644 --- a/src/vnet/lisp-gpe/lisp_gpe_tunnel.c +++ b/src/vnet/lisp-gpe/lisp_gpe_tunnel.c @@ -179,8 +179,7 @@ lisp_gpe_tunnel_find_or_create_and_lock (const locator_pair_t * pair, lgt->fib_entry_index = fib_table_entry_special_add (rloc_fib_index, &pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); hash_set_mem (lisp_gpe_tunnel_db, &lgt->key, (lgt - lisp_gpe_tunnel_pool)); diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 811a0abc..6a707df1 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -518,9 +518,7 @@ map_fib_resolve (map_main_pre_resolved_t * pr, pr->fei = fib_table_entry_special_add (0, // default fib &pfx, - FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_SOURCE_RR, FIB_ENTRY_FLAG_NONE); pr->sibling = fib_entry_child_add (pr->fei, FIB_NODE_TYPE_MAP_E, proto); map_stack (pr); } diff --git a/src/vnet/vxlan/vxlan.c b/src/vnet/vxlan/vxlan.c index 61cb13c9..1b3df2a8 100644 --- a/src/vnet/vxlan/vxlan.c +++ b/src/vnet/vxlan/vxlan.c @@ -486,7 +486,7 @@ int vnet_vxlan_add_del_tunnel vtep_addr_ref(&t->src); t->fib_entry_index = fib_table_entry_special_add (t->encap_fib_index, &tun_dst_pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); t->sibling_index = fib_entry_child_add (t->fib_entry_index, FIB_NODE_TYPE_VXLAN_TUNNEL, t - vxm->tunnels); vxlan_tunnel_restack_dpo(t); -- cgit 1.2.3-korg From 11b8dbf78af49d270a0e72abe7dea73eec30d85f Mon Sep 17 00:00:00 2001 From: Dave Barach Date: Mon, 24 Apr 2017 10:46:54 -0400 Subject: "autoreply" flag: autogenerate standard xxx_reply_t messages Change-Id: I72298aaae7d172082ece3a8edea4217c11b28d79 Signed-off-by: Dave Barach --- src/examples/sample-plugin/sample/sample.api | 10 +- src/plugins/acl/acl.api | 60 +--- src/plugins/dpdk/api/dpdk.api | 35 +- src/plugins/flowperpkt/flowperpkt.api | 23 +- .../export-vxlan-gpe/vxlan_gpe_ioam_export.api | 10 +- src/plugins/ioam/export/ioam_export.api | 10 +- src/plugins/ioam/ip6/ioam_cache.api | 10 +- src/plugins/ioam/lib-pot/pot.api | 34 +- src/plugins/ioam/lib-trace/trace.api | 26 +- src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api | 82 +---- src/plugins/lb/lb.api | 21 +- src/plugins/memif/memif.api | 12 +- src/plugins/snat/snat.api | 88 +---- src/tools/vppapigen/gram.y | 3 +- src/tools/vppapigen/lex.c | 57 +++- src/tools/vppapigen/lex.h | 1 + src/tools/vppapigen/node.c | 5 + src/tools/vppapigen/node.h | 2 + src/vlibmemory/memclnt.api | 7 +- src/vlibmemory/memory_vlib.c | 8 +- src/vnet/bfd/bfd.api | 132 +------- src/vnet/classify/classify.api | 37 +-- src/vnet/cop/cop.api | 28 +- src/vnet/devices/af_packet/af_packet.api | 12 +- src/vnet/devices/netmap/netmap.api | 24 +- src/vnet/devices/virtio/vhost_user.api | 24 +- src/vnet/dhcp/dhcp.api | 38 +-- src/vnet/flow/flow.api | 32 +- src/vnet/interface.api | 108 +----- src/vnet/ip/ip.api | 108 +----- src/vnet/ipsec/ipsec.api | 224 ++----------- src/vnet/l2/l2.api | 96 +----- src/vnet/l2tp/l2tp.api | 28 +- src/vnet/lisp-cp/lisp.api | 164 +-------- src/vnet/lisp-cp/one.api | 185 +---------- src/vnet/lisp-gpe/lisp_gpe.api | 48 +-- src/vnet/map/map.api | 22 +- src/vnet/mpls/mpls.api | 26 +- src/vnet/session/session.api | 68 +--- src/vnet/span/span.api | 10 +- src/vnet/sr/sr.api | 60 +--- src/vnet/unix/tap.api | 12 +- src/vnet/vxlan/vxlan.api | 12 +- src/vpp/api/vpe.api | 367 ++------------------- 44 files changed, 271 insertions(+), 2098 deletions(-) (limited to 'src/vnet/map') diff --git a/src/examples/sample-plugin/sample/sample.api b/src/examples/sample-plugin/sample/sample.api index f99cdb38..d565c0b1 100644 --- a/src/examples/sample-plugin/sample/sample.api +++ b/src/examples/sample-plugin/sample/sample.api @@ -16,7 +16,7 @@ /* Define a simple binary API to control the feature */ -define sample_macswap_enable_disable { +autoreply define sample_macswap_enable_disable { /* Client identifier, set from api_main.my_client_index */ u32 client_index; @@ -29,11 +29,3 @@ define sample_macswap_enable_disable { /* Interface handle */ u32 sw_if_index; }; - -define sample_macswap_enable_disable_reply { - /* From the request */ - u32 context; - - /* Return value, zero means all OK */ - i32 retval; -}; diff --git a/src/plugins/acl/acl.api b/src/plugins/acl/acl.api index d981338d..3b334113 100644 --- a/src/plugins/acl/acl.api +++ b/src/plugins/acl/acl.api @@ -161,24 +161,13 @@ define acl_add_replace_reply @param acl_index - ACL index to delete */ -manual_print define acl_del +autoreply manual_print define acl_del { u32 client_index; u32 context; u32 acl_index; }; -/** \brief Reply to delete the ACL - @param context - returned sender context, to match reply w/ request - @param retval 0 - no error -*/ - -define acl_del_reply -{ - u32 context; - i32 retval; -}; - /* acl_interface_add_del(_reply) to be deprecated in lieu of acl_interface_set_acl_list */ /** \brief Use acl_interface_set_acl_list instead Append/remove an ACL index to/from the list of ACLs checked for an interface @@ -190,7 +179,7 @@ define acl_del_reply @param acl_index - index of ACL for the operation */ -manual_print define acl_interface_add_del +autoreply manual_print define acl_interface_add_del { u32 client_index; u32 context; @@ -204,17 +193,6 @@ manual_print define acl_interface_add_del u32 acl_index; }; -/** \brief Reply to alter the ACL list - @param context - returned sender context, to match reply w/ request - @param retval 0 - no error -*/ - -define acl_interface_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set the vector of input/output ACLs checked for an interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -224,7 +202,7 @@ define acl_interface_add_del_reply @param acls - vector of ACL indices */ -manual_print define acl_interface_set_acl_list +autoreply manual_print define acl_interface_set_acl_list { u32 client_index; u32 context; @@ -239,12 +217,6 @@ manual_print define acl_interface_set_acl_list @param retval 0 - no error */ -define acl_interface_set_acl_list_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump the specific ACL contents or all of the ACLs' contents @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -341,24 +313,13 @@ define macip_acl_add_reply @param acl_index - MACIP ACL index to delete */ -manual_print define macip_acl_del +autoreply manual_print define macip_acl_del { u32 client_index; u32 context; u32 acl_index; }; -/** \brief Reply to delete the MACIP ACL - @param context - returned sender context, to match reply w/ request - @param retval 0 - no error -*/ - -define macip_acl_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add or delete a MACIP ACL to/from interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -367,7 +328,7 @@ define macip_acl_del_reply @param acl_index - MACIP ACL index */ -manual_print define macip_acl_interface_add_del +autoreply manual_print define macip_acl_interface_add_del { u32 client_index; u32 context; @@ -377,17 +338,6 @@ manual_print define macip_acl_interface_add_del u32 acl_index; }; -/** \brief Reply to apply/unapply the MACIP ACL - @param context - returned sender context, to match reply w/ request - @param retval 0 - no error -*/ - -define macip_acl_interface_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump one or all defined MACIP ACLs @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/plugins/dpdk/api/dpdk.api b/src/plugins/dpdk/api/dpdk.api index 21215d45..d43f8a36 100644 --- a/src/plugins/dpdk/api/dpdk.api +++ b/src/plugins/dpdk/api/dpdk.api @@ -21,7 +21,7 @@ @param pipe - pipe ID within its subport @param profile - pipe profile ID */ -define sw_interface_set_dpdk_hqos_pipe { +autoreply define sw_interface_set_dpdk_hqos_pipe { u32 client_index; u32 context; u32 sw_if_index; @@ -30,15 +30,6 @@ define sw_interface_set_dpdk_hqos_pipe { u32 profile; }; -/** \brief DPDK interface HQoS pipe profile set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_pipe_reply { - u32 context; - i32 retval; -}; - /** \brief DPDK interface HQoS subport parameters set request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -49,7 +40,7 @@ define sw_interface_set_dpdk_hqos_pipe_reply { @param tc_rate - subport traffic class 0 .. 3 rates (measured in bytes/second) @param tc_period - enforcement period for rates (measured in milliseconds) */ -define sw_interface_set_dpdk_hqos_subport { +autoreply define sw_interface_set_dpdk_hqos_subport { u32 client_index; u32 context; u32 sw_if_index; @@ -60,15 +51,6 @@ define sw_interface_set_dpdk_hqos_subport { u32 tc_period; }; -/** \brief DPDK interface HQoS subport parameters set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_subport_reply { - u32 context; - i32 retval; -}; - /** \brief DPDK interface HQoS tctbl entry set request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -77,7 +59,7 @@ define sw_interface_set_dpdk_hqos_subport_reply { @param tc - traffic class (0 .. 3) @param queue - traffic class queue (0 .. 3) */ -define sw_interface_set_dpdk_hqos_tctbl { +autoreply define sw_interface_set_dpdk_hqos_tctbl { u32 client_index; u32 context; u32 sw_if_index; @@ -86,18 +68,9 @@ define sw_interface_set_dpdk_hqos_tctbl { u32 queue; }; -/** \brief DPDK interface HQoS tctbl entry set reply - @param context - sender context, to match reply w/ request - @param retval - request return code -*/ -define sw_interface_set_dpdk_hqos_tctbl_reply { - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") * End: */ - \ No newline at end of file + diff --git a/src/plugins/flowperpkt/flowperpkt.api b/src/plugins/flowperpkt/flowperpkt.api index 1cf62c54..3ff92dca 100644 --- a/src/plugins/flowperpkt/flowperpkt.api +++ b/src/plugins/flowperpkt/flowperpkt.api @@ -12,7 +12,7 @@ @param is_ipv6 - if non-zero the address is ipv6, else ipv4 @param sw_if_index - index of the interface */ -manual_print define flowperpkt_tx_interface_add_del +autoreply manual_print define flowperpkt_tx_interface_add_del { /* Client identifier, set from api_main.my_client_index */ u32 client_index; @@ -28,20 +28,7 @@ manual_print define flowperpkt_tx_interface_add_del u32 sw_if_index; }; -/** \brief Reply to enable/disable per-packet IPFIX recording messages - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define flowperpkt_tx_interface_add_del_reply -{ - /* From the request */ - u32 context; - - /* Return value, zero means all OK */ - i32 retval; -}; - -define flowperpkt_params +autoreply define flowperpkt_params { u32 client_index; u32 context; @@ -51,9 +38,3 @@ define flowperpkt_params u32 active_timer; /* ~0 is off, 0 is default */ u32 passive_timer; /* ~0 is off, 0 is default */ }; - -define flowperpkt_params_reply -{ - u32 context; - i32 retval; -}; diff --git a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api index 7b17c3f7..caa97e6e 100644 --- a/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api +++ b/src/plugins/ioam/export-vxlan-gpe/vxlan_gpe_ioam_export.api @@ -16,7 +16,7 @@ /* Define a simple binary API to control the feature */ -define vxlan_gpe_ioam_export_enable_disable { +autoreply define vxlan_gpe_ioam_export_enable_disable { /* Client identifier, set from api_main.my_client_index */ u32 client_index; @@ -32,11 +32,3 @@ define vxlan_gpe_ioam_export_enable_disable { /* Src ip address */ }; - -define vxlan_gpe_ioam_export_enable_disable_reply { - /* From the request */ - u32 context; - - /* Return value, zero means all OK */ - i32 retval; -}; \ No newline at end of file diff --git a/src/plugins/ioam/export/ioam_export.api b/src/plugins/ioam/export/ioam_export.api index f22d9fc8..bb830561 100644 --- a/src/plugins/ioam/export/ioam_export.api +++ b/src/plugins/ioam/export/ioam_export.api @@ -16,7 +16,7 @@ /* Define a simple binary API to control the feature */ -define ioam_export_ip6_enable_disable { +autoreply define ioam_export_ip6_enable_disable { /* Client identifier, set from api_main.my_client_index */ u32 client_index; @@ -32,11 +32,3 @@ define ioam_export_ip6_enable_disable { /* Src ip address */ }; - -define ioam_export_ip6_enable_disable_reply { - /* From the request */ - u32 context; - - /* Return value, zero means all OK */ - i32 retval; -}; diff --git a/src/plugins/ioam/ip6/ioam_cache.api b/src/plugins/ioam/ip6/ioam_cache.api index de50d57d..dd9c0186 100644 --- a/src/plugins/ioam/ip6/ioam_cache.api +++ b/src/plugins/ioam/ip6/ioam_cache.api @@ -16,7 +16,7 @@ /* API to control ioam caching */ -define ioam_cache_ip6_enable_disable { +autoreply define ioam_cache_ip6_enable_disable { /* Client identifier, set from api_main.my_client_index */ u32 client_index; @@ -27,11 +27,3 @@ define ioam_cache_ip6_enable_disable { u8 is_disable; }; - -define ioam_cache_ip6_enable_disable_reply { - /* From the request */ - u32 context; - - /* Return value, zero means all OK */ - i32 retval; -}; diff --git a/src/plugins/ioam/lib-pot/pot.api b/src/plugins/ioam/lib-pot/pot.api index fa2fc126..c377cde0 100644 --- a/src/plugins/ioam/lib-pot/pot.api +++ b/src/plugins/ioam/lib-pot/pot.api @@ -27,7 +27,7 @@ @param list_name_len - length of the name of this profile list @param list_name - name of this profile list */ -define pot_profile_add { +autoreply define pot_profile_add { u32 client_index; u32 context; u8 id; @@ -42,22 +42,12 @@ define pot_profile_add { u8 list_name[0]; }; -/** \brief Proof of Transit profile add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define pot_profile_add_reply { - u32 context; - i32 retval; -}; - - /** \brief Proof of Transit(POT): Activate POT profile in the list @param id - id of the profile @param list_name_len - length of the name of this profile list @param list_name - name of this profile list */ -define pot_profile_activate { +autoreply define pot_profile_activate { u32 client_index; u32 context; u8 id; @@ -65,37 +55,19 @@ define pot_profile_activate { u8 list_name[0]; }; -/** \brief Proof of Transit profile activate response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define pot_profile_activate_reply { - u32 context; - i32 retval; -}; - /** \brief Delete POT Profile @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param list_name_len - length of the name of the profile list @param list_name - name of profile list to delete */ -define pot_profile_del { +autoreply define pot_profile_del { u32 client_index; u32 context; u8 list_name_len; u8 list_name[0]; }; -/** \brief Proof of Transit profile add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define pot_profile_del_reply { - u32 context; - i32 retval; -}; - /** \brief Show POT Profiles @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/plugins/ioam/lib-trace/trace.api b/src/plugins/ioam/lib-trace/trace.api index cb958325..2f45c6e2 100644 --- a/src/plugins/ioam/lib-trace/trace.api +++ b/src/plugins/ioam/lib-trace/trace.api @@ -22,7 +22,7 @@ @param trace_tsp- Timestamp resolution @param app_data - Application specific opaque */ -define trace_profile_add { +autoreply define trace_profile_add { u32 client_index; u32 context; u8 trace_type; @@ -32,37 +32,15 @@ define trace_profile_add { u32 app_data; }; -/** \brief Trace profile add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define trace_profile_add_reply { - u32 context; - i32 retval; -}; - - - /** \brief Delete trace Profile @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request */ -define trace_profile_del { +autoreply define trace_profile_del { u32 client_index; u32 context; }; -/** \brief Trace profile add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define trace_profile_del_reply { - u32 context; - i32 retval; -}; - - - /** \brief Show trace Profile @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api b/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api index 056529a4..a6761f07 100644 --- a/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api +++ b/src/plugins/ioam/lib-vxlan-gpe/ioam_vxlan_gpe.api @@ -24,7 +24,7 @@ @param trace_enable - iOAM Trace enabled or not flag */ -define vxlan_gpe_ioam_enable { +autoreply define vxlan_gpe_ioam_enable { u32 client_index; u32 context; u16 id; @@ -33,38 +33,18 @@ define vxlan_gpe_ioam_enable { u8 trace_enable; }; -/** \brief iOAM Over VxLAN-GPE - Set iOAM transport for VXLAN-GPE reply - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define vxlan_gpe_ioam_enable_reply { - u32 context; - i32 retval; -}; - - /** \brief iOAM for VxLAN-GPE disable @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param id - profile id */ -define vxlan_gpe_ioam_disable +autoreply define vxlan_gpe_ioam_disable { u32 client_index; u32 context; u16 id; }; -/** \brief vxlan_gpe_ioam disable response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define vxlan_gpe_ioam_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Enable iOAM for a VNI (VXLAN-GPE) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -73,7 +53,7 @@ define vxlan_gpe_ioam_disable_reply @param remote - IPv4/6 Address of the remote VTEP */ -define vxlan_gpe_ioam_vni_enable { +autoreply define vxlan_gpe_ioam_vni_enable { u32 client_index; u32 context; u32 vni; @@ -82,18 +62,6 @@ define vxlan_gpe_ioam_vni_enable { u8 is_ipv6; }; -/** \brief Reply to enable iOAM for a VNI (VXLAN-GPE) - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param retval - return value for request - -*/ -define vxlan_gpe_ioam_vni_enable_reply { - u32 client_index; - u32 context; - i32 retval; -}; - /** \brief Disable iOAM for a VNI (VXLAN-GPE) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -102,7 +70,7 @@ define vxlan_gpe_ioam_vni_enable_reply { @param remote - IPv4/6 Address of the remote VTEP */ -define vxlan_gpe_ioam_vni_disable { +autoreply define vxlan_gpe_ioam_vni_disable { u32 client_index; u32 context; u32 vni; @@ -111,19 +79,6 @@ define vxlan_gpe_ioam_vni_disable { u8 is_ipv6; }; -/** \brief Reply to disable iOAM for a VNI (VXLAN-GPE) - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param retval - return value for request - -*/ -define vxlan_gpe_ioam_vni_disable_reply { - u32 client_index; - u32 context; - i32 retval; -}; - - /** \brief Enable iOAM for a VXLAN-GPE transit @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -131,7 +86,7 @@ define vxlan_gpe_ioam_vni_disable_reply { @param outer_fib_index- FIB index */ -define vxlan_gpe_ioam_transit_enable { +autoreply define vxlan_gpe_ioam_transit_enable { u32 client_index; u32 context; u32 outer_fib_index; @@ -139,18 +94,6 @@ define vxlan_gpe_ioam_transit_enable { u8 is_ipv6; }; -/** \brief Reply to enable iOAM for VXLAN-GPE transit - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param retval - return value for request - -*/ -define vxlan_gpe_ioam_transit_enable_reply { - u32 client_index; - u32 context; - i32 retval; -}; - /** \brief Disable iOAM for VXLAN-GPE transit @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -158,7 +101,7 @@ define vxlan_gpe_ioam_transit_enable_reply { @param outer_fib_index- FIB index */ -define vxlan_gpe_ioam_transit_disable { +autoreply define vxlan_gpe_ioam_transit_disable { u32 client_index; u32 context; u32 outer_fib_index; @@ -166,16 +109,3 @@ define vxlan_gpe_ioam_transit_disable { u8 is_ipv6; }; -/** \brief Reply to disable iOAM for VXLAN-GPE transit - @param client_index - opaque cookie to identify the sender - @param context - sender context, to match reply w/ request - @param retval - return value for request - -*/ -define vxlan_gpe_ioam_transit_disable_reply { - u32 client_index; - u32 context; - i32 retval; -}; - - diff --git a/src/plugins/lb/lb.api b/src/plugins/lb/lb.api index 39ee3c8f..32cc669b 100644 --- a/src/plugins/lb/lb.api +++ b/src/plugins/lb/lb.api @@ -8,7 +8,7 @@ @param flow_timeout - Time in seconds after which, if no packet is received for a given flow, the flow is removed from the established flow table. */ -define lb_conf +autoreply define lb_conf { u32 client_index; u32 context; @@ -18,11 +18,6 @@ define lb_conf u32 flow_timeout; }; -define lb_conf_reply { - u32 context; - i32 retval; -}; - /** \brief Add a virtual address (or prefix) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -33,7 +28,7 @@ define lb_conf_reply { for this VIP (must be power of 2). @param is_del - The VIP should be removed. */ -define lb_add_del_vip { +autoreply define lb_add_del_vip { u32 client_index; u32 context; u8 ip_prefix[16]; @@ -43,11 +38,6 @@ define lb_add_del_vip { u8 is_del; }; -define lb_add_del_vip_reply { - u32 context; - i32 retval; -}; - /** \brief Add an application server for a given VIP @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -56,7 +46,7 @@ define lb_add_del_vip_reply { @param as_address - The application server address (IPv4 in lower order 32 bits). @param is_del - The AS should be removed. */ -define lb_add_del_as { +autoreply define lb_add_del_as { u32 client_index; u32 context; u8 vip_ip_prefix[16]; @@ -64,8 +54,3 @@ define lb_add_del_as { u8 as_address[16]; u8 is_del; }; - -define lb_add_del_as_reply { - u32 context; - i32 retval; -}; diff --git a/src/plugins/memif/memif.api b/src/plugins/memif/memif.api index 6f946421..95e016c3 100644 --- a/src/plugins/memif/memif.api +++ b/src/plugins/memif/memif.api @@ -57,7 +57,7 @@ define memif_create_reply @param context - sender context, to match reply w/ request @param sw_if_index - software index of the interface to delete */ -define memif_delete +autoreply define memif_delete { u32 client_index; u32 context; @@ -65,16 +65,6 @@ define memif_delete u32 sw_if_index; }; -/** \brief Delete host-interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define memif_delete_reply -{ - u32 context; - i32 retval; -}; - /** \brief Memory interface details structure @param context - sender context, to match reply w/ request (memif_dump) @param sw_if_index - index of the interface diff --git a/src/plugins/snat/snat.api b/src/plugins/snat/snat.api index 9689f5f9..573b6753 100644 --- a/src/plugins/snat/snat.api +++ b/src/plugins/snat/snat.api @@ -29,7 +29,7 @@ @param vrf_id - VRF id of tenant, ~0 means independent of VRF @param is_add - 1 if add, 0 if delete */ -define snat_add_address_range { +autoreply define snat_add_address_range { u32 client_index; u32 context; u8 is_ip4; @@ -39,15 +39,6 @@ define snat_add_address_range { u8 is_add; }; -/** \brief Add S-NAT address range reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_add_address_range_reply { - u32 context; - i32 retval; -}; - /** \brief Dump S-NAT addresses @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -77,7 +68,7 @@ define snat_address_details { @param is_inside - 1 if inside, 0 if outside @param sw_if_index - software index of the interface */ -define snat_interface_add_del_feature { +autoreply define snat_interface_add_del_feature { u32 client_index; u32 context; u8 is_add; @@ -85,15 +76,6 @@ define snat_interface_add_del_feature { u32 sw_if_index; }; -/** \brief Enable/disable S-NAT feature on the interface reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_interface_add_del_feature_reply { - u32 context; - i32 retval; -}; - /** \brief Dump interfaces with S-NAT feature @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -130,7 +112,7 @@ define snat_interface_details { used) @param vfr_id - VRF ID */ -define snat_add_static_mapping { +autoreply define snat_add_static_mapping { u32 client_index; u32 context; u8 is_add; @@ -145,15 +127,6 @@ define snat_add_static_mapping { u32 vrf_id; }; -/** \brief Add/delete S-NAT static mapping reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_add_static_mapping_reply { - u32 context; - i32 retval; -}; - /** \brief Dump S-NAT static mappings @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -257,21 +230,12 @@ define snat_show_config_reply @param context - sender context, to match reply w/ request @param worker_mask - S-NAT workers mask */ -define snat_set_workers { +autoreply define snat_set_workers { u32 client_index; u32 context; u64 worker_mask; }; -/** \brief Set S-NAT workers reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_set_workers_reply { - u32 context; - i32 retval; -}; - /** \brief Dump S-NAT workers @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -300,7 +264,7 @@ define snat_worker_details { @param is_add - 1 if add, 0 if delete @param sw_if_index - software index of the interface */ -define snat_add_del_interface_addr { +autoreply define snat_add_del_interface_addr { u32 client_index; u32 context; u8 is_add; @@ -308,15 +272,6 @@ define snat_add_del_interface_addr { u32 sw_if_index; }; -/** \brief Add/delete S-NAT pool address from specific interfce reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_add_del_interface_addr_reply { - u32 context; - i32 retval; -}; - /** \brief Dump S-NAT pool addresses interfaces @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -342,7 +297,7 @@ define snat_interface_addr_details { @param src_port - source port number @param enable - 1 if enable, 0 if disable */ -define snat_ipfix_enable_disable { +autoreply define snat_ipfix_enable_disable { u32 client_index; u32 context; u32 domain_id; @@ -350,15 +305,6 @@ define snat_ipfix_enable_disable { u8 enable; }; -/** \brief Enable/disable S-NAT IPFIX logging reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_ipfix_enable_disable_reply { - u32 context; - i32 retval; -}; - /** \brief Dump S-NAT users @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -437,7 +383,7 @@ define snat_user_session_details { @param out_addr - outside IP address @param out_addr - outside IP address prefix length */ -define snat_add_det_map { +autoreply define snat_add_det_map { u32 client_index; u32 context; u8 is_add; @@ -449,15 +395,6 @@ define snat_add_det_map { u8 out_plen; }; -/** \brief Add/delete S-NAT deterministic mapping reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_add_det_map_reply { - u32 context; - i32 retval; -}; - /** \brief Get outside address and port range from inside address @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -556,7 +493,7 @@ define snat_det_map_details { @param tcp_transitory - TCP transitory timeout (default 240sec) @param icmp - ICMP timeout (default 60sec) */ -define snat_det_set_timeouts { +autoreply define snat_det_set_timeouts { u32 client_index; u32 context; u32 udp; @@ -565,15 +502,6 @@ define snat_det_set_timeouts { u32 icmp; }; -/** \brief Set values of timeouts for deterministic NAT reply - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define snat_det_set_timeouts_reply { - u32 context; - i32 retval; -}; - /** \brief Get values of timeouts for deterministic NAT (seconds) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/tools/vppapigen/gram.y b/src/tools/vppapigen/gram.y index de26af8d..9cea6023 100644 --- a/src/tools/vppapigen/gram.y +++ b/src/tools/vppapigen/gram.y @@ -38,7 +38,7 @@ void generate (YYSTYPE); %token NAME RPAR LPAR SEMI LBRACK RBRACK NUMBER PRIMTYPE BARF %token TPACKED DEFINE LCURLY RCURLY STRING UNION %token HELPER_STRING COMMA -%token NOVERSION MANUAL_PRINT MANUAL_ENDIAN TYPEONLY DONT_TRACE +%token NOVERSION MANUAL_PRINT MANUAL_ENDIAN TYPEONLY DONT_TRACE AUTOREPLY %% @@ -64,6 +64,7 @@ flag: | MANUAL_ENDIAN {$$ = $1;} | DONT_TRACE {$$ = $1;} | TYPEONLY {$$ = $1;} + | AUTOREPLY {$$ = $1;} ; defn: DEFINE NAME LCURLY defbody RCURLY SEMI diff --git a/src/tools/vppapigen/lex.c b/src/tools/vppapigen/lex.c index 733942ad..e6358143 100644 --- a/src/tools/vppapigen/lex.c +++ b/src/tools/vppapigen/lex.c @@ -27,6 +27,9 @@ #include "lex.h" #include "node.h" #include "tools/vppapigen/gram.h" +#include +#include +#include FILE *ifp, *ofp, *pythonfp, *jsonfp; char *vlib_app_name = "vpp"; @@ -38,6 +41,9 @@ int current_filename_allocated; unsigned long input_crc; unsigned long message_crc; int yydebug; +char *push_input_fifo; +char saved_ungetc_char; +char have_ungetc_char; /* * lexer variable definitions @@ -469,9 +475,50 @@ static char namebuf [MAXNAME]; static inline char getc_char (FILE *ifp) { + char rv; + + if (have_ungetc_char) { + have_ungetc_char = 0; + return saved_ungetc_char; + } + + if (clib_fifo_elts (push_input_fifo)) { + clib_fifo_sub1(push_input_fifo, rv); + return (rv & 0x7f); + } return ((char)(getc(ifp) & 0x7f)); } +u32 fe (char *fifo) +{ + return clib_fifo_elts (fifo); +} + +static inline void +ungetc_char (char c, FILE *ifp) +{ + saved_ungetc_char = c; + have_ungetc_char = 1; +} + +void autoreply (void *np_arg) +{ + static u8 *s; + node_t *np = (node_t *)np_arg; + int i; + + vec_reset_length (s); + + s = format (0, " define %s_reply\n", (char *)(np->data[0])); + s = format (s, "{\n"); + s = format (s, " u32 context;\n"); + s = format (s, " i32 retval;\n"); + s = format (s, "};\n"); + + for (i = 0; i < vec_len (s); i++) + clib_fifo_add1 (push_input_fifo, s[i]); +} + /* * yylex (well, yylex_1: The real yylex below does crc-hackery) */ @@ -595,7 +642,7 @@ static int yylex_1 (void) return (EOF); if (!isalnum (c) && c != '_') { - ungetc (c, ifp); + ungetc_char (c, ifp); namebuf [nameidx] = 0; the_lexer_state = START_STATE; return (name_check (namebuf, &yylval)); @@ -616,7 +663,7 @@ static int yylex_1 (void) return (EOF); if (!isdigit (c)) { - ungetc (c, ifp); + ungetc_char (c, ifp); namebuf [nameidx] = 0; the_lexer_state = START_STATE; yylval = (void *) atol(namebuf); @@ -889,6 +936,7 @@ int yylex (void) case MANUAL_ENDIAN: code = 276; break; case TYPEONLY: code = 278; break; case DONT_TRACE: code = 279; break; + case AUTOREPLY: code = 280; break; case EOF: code = ~0; break; /* hysterical compatibility */ @@ -929,6 +977,7 @@ static struct keytab { } keytab [] = /* Keep the table sorted, binary search used below! */ { + {"autoreply", NODE_AUTOREPLY}, {"define", NODE_DEFINE}, {"dont_trace", NODE_DONT_TRACE}, {"f64", NODE_F64}, @@ -1005,6 +1054,10 @@ static int name_check (const char *s, YYSTYPE *token_value) *token_value = (YYSTYPE) NODE_FLAG_DONT_TRACE; return(DONT_TRACE); + case NODE_AUTOREPLY: + *token_value = (YYSTYPE) NODE_FLAG_AUTOREPLY; + return(AUTOREPLY); + case NODE_NOVERSION: return(NOVERSION); diff --git a/src/tools/vppapigen/lex.h b/src/tools/vppapigen/lex.h index a0fdc735..275cf685 100644 --- a/src/tools/vppapigen/lex.h +++ b/src/tools/vppapigen/lex.h @@ -24,6 +24,7 @@ extern int yylex (void); extern void yyerror (char *); extern int yyparse (void); +extern void autoreply (void *); #ifndef YYSTYPE #define YYSTYPE void * diff --git a/src/tools/vppapigen/node.c b/src/tools/vppapigen/node.c index 359ac9c9..9f234037 100644 --- a/src/tools/vppapigen/node.c +++ b/src/tools/vppapigen/node.c @@ -1050,6 +1050,11 @@ YYSTYPE set_flags(YYSTYPE a1, YYSTYPE a2) flags = (int)(uword) a1; np->flags |= flags; + + /* Generate a foo_reply_t right here */ + if (flags & NODE_FLAG_AUTOREPLY) + autoreply(np); + return (a2); } /* diff --git a/src/tools/vppapigen/node.h b/src/tools/vppapigen/node.h index 297d6036..65bd5d10 100644 --- a/src/tools/vppapigen/node.h +++ b/src/tools/vppapigen/node.h @@ -53,6 +53,7 @@ enum node_subclass { /* WARNING: indices must match the vft... */ NODE_MANUAL_PRINT, NODE_MANUAL_ENDIAN, NODE_DONT_TRACE, + NODE_AUTOREPLY, }; enum passid { @@ -84,6 +85,7 @@ typedef struct node_ { #define NODE_FLAG_MANUAL_ENDIAN (1<<1) #define NODE_FLAG_TYPEONLY (1<<3) #define NODE_FLAG_DONT_TRACE (1<<4) +#define NODE_FLAG_AUTOREPLY (1<<5) typedef struct node_vft_ { void (*print)(struct node_ *); diff --git a/src/vlibmemory/memclnt.api b/src/vlibmemory/memclnt.api index c38b483c..32e51407 100644 --- a/src/vlibmemory/memclnt.api +++ b/src/vlibmemory/memclnt.api @@ -72,7 +72,7 @@ define memclnt_read_timeout { /* * RPC */ -define rpc_call { +autoreply define rpc_call { u32 client_index; u32 context; u64 function; @@ -82,11 +82,6 @@ define rpc_call { u8 data[0]; }; -define rpc_reply { - i32 retval; - u32 context; -}; - /* * Lookup message-ID base by name */ diff --git a/src/vlibmemory/memory_vlib.c b/src/vlibmemory/memory_vlib.c index 7a536ee8..43574dea 100644 --- a/src/vlibmemory/memory_vlib.c +++ b/src/vlibmemory/memory_vlib.c @@ -1275,7 +1275,7 @@ VLIB_CLI_COMMAND (cli_show_api_plugin_command, static) = { static void vl_api_rpc_call_t_handler (vl_api_rpc_call_t * mp) { - vl_api_rpc_reply_t *rmp; + vl_api_rpc_call_reply_t *rmp; int (*fp) (void *); i32 rv = 0; vlib_main_t *vm = vlib_get_main (); @@ -1305,7 +1305,7 @@ vl_api_rpc_call_t_handler (vl_api_rpc_call_t * mp) if (q) { rmp = vl_msg_api_alloc_as_if_client (sizeof (*rmp)); - rmp->_vl_msg_id = ntohs (VL_API_RPC_REPLY); + rmp->_vl_msg_id = ntohs (VL_API_RPC_CALL_REPLY); rmp->context = mp->context; rmp->retval = rv; vl_msg_api_send_shmem (q, (u8 *) & rmp); @@ -1318,7 +1318,7 @@ vl_api_rpc_call_t_handler (vl_api_rpc_call_t * mp) } static void -vl_api_rpc_reply_t_handler (vl_api_rpc_reply_t * mp) +vl_api_rpc_call_reply_t_handler (vl_api_rpc_call_reply_t * mp) { clib_warning ("unimplemented"); } @@ -1415,7 +1415,7 @@ vl_api_trace_plugin_msg_ids_t_handler (vl_api_trace_plugin_msg_ids_t * mp) #define foreach_rpc_api_msg \ _(RPC_CALL,rpc_call) \ -_(RPC_REPLY,rpc_reply) +_(RPC_CALL_REPLY,rpc_call_reply) #define foreach_plugin_trace_msg \ _(TRACE_PLUGIN_MSG_IDS,trace_plugin_msg_ids) diff --git a/src/vnet/bfd/bfd.api b/src/vnet/bfd/bfd.api index 2cdcfad3..7bcaa4c3 100644 --- a/src/vnet/bfd/bfd.api +++ b/src/vnet/bfd/bfd.api @@ -18,43 +18,23 @@ @param context - sender context, to match reply w/ request @param sw_if_index - interface to use as echo source */ -define bfd_udp_set_echo_source +autoreply define bfd_udp_set_echo_source { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief Set BFD feature response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bfd_udp_set_echo_source_reply -{ - u32 context; - i32 retval; -}; - /** \brief Delete BFD echo source @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request */ -define bfd_udp_del_echo_source +autoreply define bfd_udp_del_echo_source { u32 client_index; u32 context; }; -/** \brief Delete BFD echo source response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bfd_udp_del_echo_source_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add UDP BFD session on interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -69,7 +49,7 @@ define bfd_udp_del_echo_source_reply @param bfd_key_id - key id sent out in BFD packets (if is_authenticated) @param conf_key_id - id of already configured key (if is_authenticated) */ -define bfd_udp_add +autoreply define bfd_udp_add { u32 client_index; u32 context; @@ -85,16 +65,6 @@ define bfd_udp_add u32 conf_key_id; }; -/** \brief Add UDP BFD session response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bfd_udp_add_reply -{ - u32 context; - i32 retval; -}; - /** \brief Modify UDP BFD session on interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -106,7 +76,7 @@ define bfd_udp_add_reply @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 @param detect_mult - detect multiplier (# of packets missed before connection goes down) */ -define bfd_udp_mod +autoreply define bfd_udp_mod { u32 client_index; u32 context; @@ -119,16 +89,6 @@ define bfd_udp_mod u8 detect_mult; }; -/** \brief Modify UDP BFD session response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bfd_udp_mod_reply -{ - u32 context; - i32 retval; -}; - /** \brief Delete UDP BFD session on interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -137,7 +97,7 @@ define bfd_udp_mod_reply @param peer_addr - peer address @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 */ -define bfd_udp_del +autoreply define bfd_udp_del { u32 client_index; u32 context; @@ -147,16 +107,6 @@ define bfd_udp_del u8 is_ipv6; }; -/** \brief Delete UDP BFD session response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bfd_udp_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get all BFD sessions @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -206,7 +156,7 @@ define bfd_udp_session_details @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 @param admin_up_down - set the admin state, 1 = up, 0 = down */ -define bfd_udp_session_set_flags +autoreply define bfd_udp_session_set_flags { u32 client_index; u32 context; @@ -217,23 +167,13 @@ define bfd_udp_session_set_flags u8 admin_up_down; }; -/** \brief Reply to bfd_udp_session_set_flags - @param context - sender context which was passed in the request - @param retval - return code of the set flags request -*/ -define bfd_udp_session_set_flags_reply -{ - u32 context; - i32 retval; -}; - /** \brief Register for BFD events @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param enable_disable - 1 => register for events, 0 => cancel registration @param pid - sender's pid */ -define want_bfd_events +autoreply define want_bfd_events { u32 client_index; u32 context; @@ -241,16 +181,6 @@ define want_bfd_events u32 pid; }; -/** \brief Reply for BFD events registration - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define want_bfd_events_reply -{ - u32 context; - i32 retval; -}; - /** \brief BFD UDP - add/replace key to configuration @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -259,7 +189,7 @@ define want_bfd_events_reply @param auth_type - authentication type (RFC 5880/4.1/Auth Type) @param key - key data */ -define bfd_auth_set_key +autoreply define bfd_auth_set_key { u32 client_index; u32 context; @@ -269,16 +199,6 @@ define bfd_auth_set_key u8 key[20]; }; -/** \brief BFD UDP - add/replace key reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define bfd_auth_set_key_reply -{ - u32 context; - i32 retval; -}; - /** \brief BFD UDP - delete key from configuration @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -286,23 +206,13 @@ define bfd_auth_set_key_reply @param key_len - length of key (must be non-zero) @param key - key data */ -define bfd_auth_del_key +autoreply define bfd_auth_del_key { u32 client_index; u32 context; u32 conf_key_id; }; -/** \brief BFD UDP - delete key reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define bfd_auth_del_key_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get a list of configured authentication keys @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -338,7 +248,7 @@ define bfd_auth_keys_details @param bfd_key_id - key id sent out in BFD packets @param conf_key_id - id of already configured key */ -define bfd_udp_auth_activate +autoreply define bfd_udp_auth_activate { u32 client_index; u32 context; @@ -351,16 +261,6 @@ define bfd_udp_auth_activate u32 conf_key_id; }; -/** \brief BFD UDP - activate/change authentication reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define bfd_udp_auth_activate_reply -{ - u32 context; - i32 retval; -}; - /** \brief BFD UDP - deactivate authentication @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -370,7 +270,7 @@ define bfd_udp_auth_activate_reply @param is_ipv6 - local_addr, peer_addr are IPv6 if non-zero, otherwise IPv4 @param is_delayed - change is applied once peer applies the change (on first received non-authenticated packet) */ -define bfd_udp_auth_deactivate +autoreply define bfd_udp_auth_deactivate { u32 client_index; u32 context; @@ -381,16 +281,6 @@ define bfd_udp_auth_deactivate u8 is_delayed; }; -/** \brief BFD UDP - deactivate authentication reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define bfd_udp_auth_deactivate_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/classify/classify.api b/src/vnet/classify/classify.api index 51ebd6c8..cacb9bed 100644 --- a/src/vnet/classify/classify.api +++ b/src/vnet/classify/classify.api @@ -92,7 +92,7 @@ define classify_add_del_table_reply VRF id if action is 1 or 2. @param match[] - for add, match value for session, required */ -define classify_add_del_session +autoreply define classify_add_del_session { u32 client_index; u32 context; @@ -106,16 +106,6 @@ define classify_add_del_session u8 match[0]; }; -/** \brief Classify add / del session response - @param context - sender context, to match reply w/ request - @param retval - return code for the add/del session request -*/ -define classify_add_del_session_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set/unset policer classify interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -127,7 +117,7 @@ define classify_add_del_session_reply Note: User is recommeneded to use just one valid table_index per call. (ip4_table_index, ip6_table_index, or l2_table_index) */ -define policer_classify_set_interface +autoreply define policer_classify_set_interface { u32 client_index; u32 context; @@ -138,16 +128,6 @@ define policer_classify_set_interface u8 is_add; }; -/** \brief Set/unset policer classify interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define policer_classify_set_interface_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get list of policer classify interfaces and tables @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -308,7 +288,7 @@ define classify_session_details Note: User is recommeneded to use just one valid table_index per call. (ip4_table_index, ip6_table_index, or l2_table_index) */ -define flow_classify_set_interface { +autoreply define flow_classify_set_interface { u32 client_index; u32 context; u32 sw_if_index; @@ -317,15 +297,6 @@ define flow_classify_set_interface { u8 is_add; }; -/** \brief Set/unset flow classify interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define flow_classify_set_interface_reply { - u32 context; - i32 retval; -}; - /** \brief Get list of flow classify interfaces and tables @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -353,4 +324,4 @@ define flow_classify_details { * eval: (c-set-style "gnu") * End: */ - \ No newline at end of file + diff --git a/src/vnet/cop/cop.api b/src/vnet/cop/cop.api index b34dae80..69316001 100644 --- a/src/vnet/cop/cop.api +++ b/src/vnet/cop/cop.api @@ -20,7 +20,7 @@ @param enable_disable - 1 => enable, 0 => disable */ -define cop_interface_enable_disable +autoreply define cop_interface_enable_disable { u32 client_index; u32 context; @@ -28,17 +28,6 @@ define cop_interface_enable_disable u8 enable_disable; }; -/** \brief cop: interface enable/disable junk filtration reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define cop_interface_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief cop: enable/disable whitelist filtration features on an interface Note: the supplied fib_id must match in order to remove the feature! @@ -51,7 +40,7 @@ define cop_interface_enable_disable_reply @param default_cop - 1 => enable non-ip4, non-ip6 filtration 0=> disable it */ -define cop_whitelist_enable_disable +autoreply define cop_whitelist_enable_disable { u32 client_index; u32 context; @@ -62,17 +51,6 @@ define cop_whitelist_enable_disable u8 default_cop; }; -/** \brief cop: interface enable/disable junk filtration reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define cop_whitelist_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief get_node_graph - get a copy of the vpp node graph including the current set of graph arcs. @@ -85,4 +63,4 @@ define cop_whitelist_enable_disable_reply * eval: (c-set-style "gnu") * End: */ - \ No newline at end of file + diff --git a/src/vnet/devices/af_packet/af_packet.api b/src/vnet/devices/af_packet/af_packet.api index 9fb2a207..8d40ad60 100644 --- a/src/vnet/devices/af_packet/af_packet.api +++ b/src/vnet/devices/af_packet/af_packet.api @@ -46,7 +46,7 @@ define af_packet_create_reply @param context - sender context, to match reply w/ request @param host_if_name - interface name */ -define af_packet_delete +autoreply define af_packet_delete { u32 client_index; u32 context; @@ -54,16 +54,6 @@ define af_packet_delete u8 host_if_name[64]; }; -/** \brief Delete host-interface response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define af_packet_delete_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/devices/netmap/netmap.api b/src/vnet/devices/netmap/netmap.api index 377ccffd..8dc698b9 100644 --- a/src/vnet/devices/netmap/netmap.api +++ b/src/vnet/devices/netmap/netmap.api @@ -22,7 +22,7 @@ @param is_pipe - is pipe @param is_master - 0=slave, 1=master */ -define netmap_create +autoreply define netmap_create { u32 client_index; u32 context; @@ -34,22 +34,12 @@ define netmap_create u8 is_master; }; -/** \brief Create netmap response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define netmap_create_reply -{ - u32 context; - i32 retval; -}; - /** \brief Delete netmap @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param netmap_if_name - interface name */ -define netmap_delete +autoreply define netmap_delete { u32 client_index; u32 context; @@ -57,16 +47,6 @@ define netmap_delete u8 netmap_if_name[64]; }; -/** \brief Delete netmap response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define netmap_delete_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/devices/virtio/vhost_user.api b/src/vnet/devices/virtio/vhost_user.api index 4f604e45..df7ce7ab 100644 --- a/src/vnet/devices/virtio/vhost_user.api +++ b/src/vnet/devices/virtio/vhost_user.api @@ -53,7 +53,7 @@ define create_vhost_user_if_reply @param sock_filename - unix socket filename, used to speak with frontend @param operation_mode - polling=0, interrupt=1, or adaptive=2 */ -define modify_vhost_user_if +autoreply define modify_vhost_user_if { u32 client_index; u32 context; @@ -65,36 +65,16 @@ define modify_vhost_user_if u8 operation_mode; }; -/** \brief vhost-user interface modify response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define modify_vhost_user_if_reply -{ - u32 context; - i32 retval; -}; - /** \brief vhost-user interface delete request @param client_index - opaque cookie to identify the sender */ -define delete_vhost_user_if +autoreply define delete_vhost_user_if { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief vhost-user interface delete response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define delete_vhost_user_if_reply -{ - u32 context; - i32 retval; -}; - /** \brief Vhost-user interface details structure (fix this) @param sw_if_index - index of the interface @param interface_name - name of interface diff --git a/src/vnet/dhcp/dhcp.api b/src/vnet/dhcp/dhcp.api index 2db85a79..eb0b070d 100644 --- a/src/vnet/dhcp/dhcp.api +++ b/src/vnet/dhcp/dhcp.api @@ -24,7 +24,7 @@ @param dhcp_server[] - server address @param dhcp_src_address[] - */ -define dhcp_proxy_config +autoreply define dhcp_proxy_config { u32 client_index; u32 context; @@ -36,16 +36,6 @@ define dhcp_proxy_config u8 dhcp_src_address[16]; }; -/** \brief DHCP Proxy config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_proxy_config_reply -{ - u32 context; - i32 retval; -}; - /** \brief DHCP Proxy set / unset vss request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -55,7 +45,7 @@ define dhcp_proxy_config_reply @param is_ipv6 - ip6 if non-zero, else ip4 @param is_add - set vss if non-zero, else delete */ -define dhcp_proxy_set_vss +autoreply define dhcp_proxy_set_vss { u32 client_index; u32 context; @@ -66,16 +56,6 @@ define dhcp_proxy_set_vss u8 is_add; }; -/** \brief DHCP proxy set / unset vss response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_proxy_set_vss_reply -{ - u32 context; - i32 retval; -}; - /** \brief DHCP Client config add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -86,7 +66,7 @@ define dhcp_proxy_set_vss_reply via dhcp_compl_event API message if non-zero @param pid - sender's pid */ -define dhcp_client_config +autoreply define dhcp_client_config { u32 client_index; u32 context; @@ -97,16 +77,6 @@ define dhcp_client_config u32 pid; }; -/** \brief DHCP Client config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define dhcp_client_config_reply -{ - u32 context; - i32 retval; -}; - /** \brief Tell client about a DHCP completion event @param client_index - opaque cookie to identify the sender @param pid - client pid registered to receive notification @@ -162,4 +132,4 @@ manual_endian manual_print define dhcp_proxy_details * Local Variables: * eval: (c-set-style "gnu") * End: - */ \ No newline at end of file + */ diff --git a/src/vnet/flow/flow.api b/src/vnet/flow/flow.api index 0e0f99bf..1c5e8c5c 100644 --- a/src/vnet/flow/flow.api +++ b/src/vnet/flow/flow.api @@ -24,7 +24,7 @@ @param template_interval - number of seconds after which to resend template @param udp_checksum - UDP checksum calculation enable flag */ -define set_ipfix_exporter +autoreply define set_ipfix_exporter { u32 client_index; u32 context; @@ -37,15 +37,6 @@ define set_ipfix_exporter u8 udp_checksum; }; -/** \brief Reply to IPFIX exporter configure request - @param context - sender context which was passed in the request -*/ -define set_ipfix_exporter_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPFIX exporter dump request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -84,22 +75,13 @@ define ipfix_exporter_details @param domain_id - domain ID reported in IPFIX messages for classify stream @param src_port - source port of UDP session for classify stream */ -define set_ipfix_classify_stream { +autoreply define set_ipfix_classify_stream { u32 client_index; u32 context; u32 domain_id; u16 src_port; }; -/** \brief IPFIX classify stream configure response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define set_ipfix_classify_stream_reply { - u32 context; - i32 retval; -}; - /** \brief IPFIX classify stream dump request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -127,7 +109,7 @@ define ipfix_classify_stream_details { @param ip_version - version of IP used in the classifier table @param transport_protocol - transport protocol used in the classifier table or 255 for unspecified */ -define ipfix_classify_table_add_del { +autoreply define ipfix_classify_table_add_del { u32 client_index; u32 context; u32 table_id; @@ -136,14 +118,6 @@ define ipfix_classify_table_add_del { u8 is_add; }; -/** \brief IPFIX add classifier table response - @param context - sender context which was passed in the request -*/ -define ipfix_classify_table_add_del_reply { - u32 context; - i32 retval; -}; - /** \brief IPFIX classify tables dump request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vnet/interface.api b/src/vnet/interface.api index 85fd73fb..9df63f18 100644 --- a/src/vnet/interface.api +++ b/src/vnet/interface.api @@ -6,7 +6,7 @@ @param link_up_down - Oper state sent on change event, not used in config. @param deleted - interface was deleted */ -define sw_interface_set_flags +autoreply define sw_interface_set_flags { u32 client_index; u32 context; @@ -17,23 +17,13 @@ define sw_interface_set_flags u8 deleted; }; -/** \brief Reply to sw_interface_set_flags - @param context - sender context which was passed in the request - @param retval - return code of the set flags request -*/ -define sw_interface_set_flags_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set interface MTU @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - index of the interface to set MTU on @param mtu - MTU */ -define sw_interface_set_mtu +autoreply define sw_interface_set_mtu { u32 client_index; u32 context; @@ -41,23 +31,13 @@ define sw_interface_set_mtu u16 mtu; }; -/** \brief Reply to sw_interface_set_mtu - @param context - sender context which was passed in the request - @param retval - return code of the set flags request -*/ -define sw_interface_set_mtu_reply -{ - u32 context; - i32 retval; -}; - /** \brief Register for interface events @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param enable_disable - 1 => register for events, 0 => cancel registration @param pid - sender's pid */ -define want_interface_events +autoreply define want_interface_events { u32 client_index; u32 context; @@ -65,16 +45,6 @@ define want_interface_events u32 pid; }; -/** \brief Reply for interface events registration - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define want_interface_events_reply -{ - u32 context; - i32 retval; -}; - /** \brief Interface details structure (fix this) @param sw_if_index - index of the interface @param sup_sw_if_index - index of parent interface if any, else same as sw_if_index @@ -184,7 +154,7 @@ define sw_interface_dump @param address_length - address length in bytes, 4 for ip4, 16 for ip6 @param address - array of address bytes */ -define sw_interface_add_del_address +autoreply define sw_interface_add_del_address { u32 client_index; u32 context; @@ -196,16 +166,6 @@ define sw_interface_add_del_address u8 address[16]; }; -/** \brief Reply to sw_interface_add_del_address - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define sw_interface_add_del_address_reply -{ - u32 context; - i32 retval; -}; - /** \brief Associate the specified interface with a fib table @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -213,7 +173,7 @@ define sw_interface_add_del_address_reply @param is_ipv6 - if non-zero ipv6, else ipv4 @param vrf_id - fib table/vrd id to associate the interface with */ -define sw_interface_set_table +autoreply define sw_interface_set_table { u32 client_index; u32 context; @@ -222,16 +182,6 @@ define sw_interface_set_table u32 vrf_id; }; -/** \brief Reply to sw_interface_set_table - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define sw_interface_set_table_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get VRF id assigned to interface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -280,7 +230,7 @@ define vnet_interface_counters @param unnumbered_sw_if_index - interface which will use the address @param is_add - if non-zero set the association, else unset it */ -define sw_interface_set_unnumbered +autoreply define sw_interface_set_unnumbered { u32 client_index; u32 context; @@ -289,38 +239,18 @@ define sw_interface_set_unnumbered u8 is_add; }; -/** \brief Set unnumbered interface add / del response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_set_unnumbered_reply -{ - u32 context; - i32 retval; -}; - /** \brief Clear interface statistics @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - index of the interface to clear statistics */ -define sw_interface_clear_stats +autoreply define sw_interface_clear_stats { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief Reply to sw_interface_clear_stats - @param context - sender context which was passed in the request - @param retval - return code of the set flags request -*/ -define sw_interface_clear_stats_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set / clear software interface tag @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -328,7 +258,7 @@ define sw_interface_clear_stats_reply @param add_del - 1 = add, 0 = delete @param tag - an ascii tag */ -define sw_interface_tag_add_del +autoreply define sw_interface_tag_add_del { u32 client_index; u32 context; @@ -337,23 +267,13 @@ define sw_interface_tag_add_del u8 tag[64]; }; -/** \brief Reply to set / clear software interface tag - @param context - sender context which was passed in the request - @param retval - return code for the request -*/ -define sw_interface_tag_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set an interface's MAC address @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - the interface whose MAC will be set @param mac_addr - the new MAC address */ -define sw_interface_set_mac_address +autoreply define sw_interface_set_mac_address { u32 client_index; u32 context; @@ -361,16 +281,6 @@ define sw_interface_set_mac_address u8 mac_address[6]; }; -/** \brief Reply to setting an interface MAC address request - @param context - sender context which was passed in the request - @param retval - return code for the request -*/ -define sw_interface_set_mac_address_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/ip/ip.api b/src/vnet/ip/ip.api index 6af1714f..7097a130 100644 --- a/src/vnet/ip/ip.api +++ b/src/vnet/ip/ip.api @@ -136,7 +136,7 @@ define ip_neighbor_details { @param mac_address - l2 address of the neighbor @param dst_address - ip4 or ip6 address of the neighbor */ -define ip_neighbor_add_del +autoreply define ip_neighbor_add_del { u32 client_index; u32 context; @@ -150,16 +150,6 @@ define ip_neighbor_add_del u8 dst_address[16]; }; -/** \brief Reply for IP Neighbor add / delete request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ip_neighbor_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set the ip flow hash config for a fib request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -172,7 +162,7 @@ define ip_neighbor_add_del_reply @param proto -if non-zero include proto in flow hash @param reverse - if non-zero include reverse in flow hash */ -define set_ip_flow_hash +autoreply define set_ip_flow_hash { u32 client_index; u32 context; @@ -186,16 +176,6 @@ define set_ip_flow_hash u8 reverse; }; -/** \brief Set the ip flow hash config for a fib response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define set_ip_flow_hash_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 router advertisement config request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -213,7 +193,7 @@ define set_ip_flow_hash_reply @param initial_count - @param initial_interval - */ -define sw_interface_ip6nd_ra_config +autoreply define sw_interface_ip6nd_ra_config { u32 client_index; u32 context; @@ -233,16 +213,6 @@ define sw_interface_ip6nd_ra_config u32 initial_interval; }; -/** \brief IPv6 router advertisement config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_ip6nd_ra_config_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 router advertisement prefix config request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -272,7 +242,7 @@ define sw_interface_ip6nd_ra_config_reply preferred [ADDRCONF]. A value of all one bits (0xffffffff) represents infinity. */ -define sw_interface_ip6nd_ra_prefix +autoreply define sw_interface_ip6nd_ra_prefix { u32 client_index; u32 context; @@ -289,16 +259,6 @@ define sw_interface_ip6nd_ra_prefix u32 pref_lifetime; }; -/** \brief IPv6 router advertisement prefix config response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_ip6nd_ra_prefix_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 ND proxy config @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -306,7 +266,7 @@ define sw_interface_ip6nd_ra_prefix_reply @param address - The address of the host for which to proxy for @param is_add - Adding or deleting */ -define ip6nd_proxy_add_del +autoreply define ip6nd_proxy_add_del { u32 client_index; u32 context; @@ -315,16 +275,6 @@ define ip6nd_proxy_add_del u8 address[16]; }; -/** \brief IPv6 ND proxy response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define ip6nd_proxy_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 ND proxy details returned after request @param context - sender context, to match reply w/ request @param retval - return code for the request @@ -355,7 +305,7 @@ define ip6nd_proxy_dump @param sw_if_index - interface used to reach neighbor @param enable - if non-zero enable ip6 on interface, else disable */ -define sw_interface_ip6_enable_disable +autoreply define sw_interface_ip6_enable_disable { u32 client_index; u32 context; @@ -363,23 +313,13 @@ define sw_interface_ip6_enable_disable u8 enable; /* set to true if enable */ }; -/** \brief IPv6 interface enable / disable response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_ip6_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 set link local address on interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - interface to set link local on @param address[] - the new link local address */ -define sw_interface_ip6_set_link_local_address +autoreply define sw_interface_ip6_set_link_local_address { u32 client_index; u32 context; @@ -387,16 +327,6 @@ define sw_interface_ip6_set_link_local_address u8 address[16]; }; -/** \brief IPv6 set link local address on interface response - @param context - sender context, to match reply w/ request - @param retval - error code for the request -*/ -define sw_interface_ip6_set_link_local_address_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add / del route request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -422,7 +352,7 @@ define sw_interface_ip6_set_link_local_address_reply @param next_hop_out_label_stack - the next-hop output label stack, outer most first @param next_hop_via_label - The next-hop is a resolved via a local label */ -define ip_add_del_route +autoreply define ip_add_del_route { u32 client_index; u32 context; @@ -452,16 +382,6 @@ define ip_add_del_route u32 next_hop_out_label_stack[next_hop_n_out_labels]; }; -/** \brief Reply for add / del route request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ip_add_del_route_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add / del route request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -470,7 +390,7 @@ define ip_add_del_route_reply FIXME */ -define ip_mroute_add_del +autoreply define ip_mroute_add_del { u32 client_index; u32 context; @@ -488,16 +408,6 @@ define ip_mroute_add_del u8 src_address[16]; }; -/** \brief Reply for add / del mroute request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ip_mroute_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump IP multicast fib table @param client_index - opaque cookie to identify the sender */ diff --git a/src/vnet/ipsec/ipsec.api b/src/vnet/ipsec/ipsec.api index ef090f84..203c5272 100644 --- a/src/vnet/ipsec/ipsec.api +++ b/src/vnet/ipsec/ipsec.api @@ -20,7 +20,7 @@ @param spd_id - SPD instance id (control plane allocated) */ -define ipsec_spd_add_del +autoreply define ipsec_spd_add_del { u32 client_index; u32 context; @@ -28,17 +28,6 @@ define ipsec_spd_add_del u32 spd_id; }; -/** \brief Reply for IPsec: Add/delete Security Policy Database entry - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define ipsec_spd_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPsec: Add/delete SPD from interface @param client_index - opaque cookie to identify the sender @@ -49,7 +38,7 @@ define ipsec_spd_add_del_reply */ -define ipsec_interface_add_del_spd +autoreply define ipsec_interface_add_del_spd { u32 client_index; u32 context; @@ -59,17 +48,6 @@ define ipsec_interface_add_del_spd u32 spd_id; }; -/** \brief Reply for IPsec: Add/delete SPD from interface - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define ipsec_interface_add_del_spd_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPsec: Add/delete Security Policy Database entry See RFC 4301, 4.4.1.1 on how to match packet to selectors @@ -95,7 +73,7 @@ define ipsec_interface_add_del_spd_reply */ -define ipsec_spd_add_del_entry +autoreply define ipsec_spd_add_del_entry { u32 client_index; u32 context; @@ -125,17 +103,6 @@ define ipsec_spd_add_del_entry u32 sa_id; }; -/** \brief Reply for IPsec: Add/delete Security Policy Database entry - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define ipsec_spd_add_del_entry_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPsec: Add/delete Security Association Database entry @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -167,7 +134,7 @@ define ipsec_spd_add_del_entry_reply IPsec tunnel address copy mode (to support GDOI) */ -define ipsec_sad_add_del_entry +autoreply define ipsec_sad_add_del_entry { u32 client_index; u32 context; @@ -195,17 +162,6 @@ define ipsec_sad_add_del_entry u8 tunnel_dst_address[16]; }; -/** \brief Reply for IPsec: Add/delete Security Association Database entry - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define ipsec_sad_add_del_entry_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPsec: Update Security Association keys @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -219,7 +175,7 @@ define ipsec_sad_add_del_entry_reply @param integrity_key - integrity keying material */ -define ipsec_sa_set_key +autoreply define ipsec_sa_set_key { u32 client_index; u32 context; @@ -233,17 +189,6 @@ define ipsec_sa_set_key u8 integrity_key[128]; }; -/** \brief Reply for IPsec: Update Security Association keys - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define ipsec_sa_set_key_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Add/delete profile @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -251,7 +196,7 @@ define ipsec_sa_set_key_reply @param name - IKEv2 profile name @param is_add - Add IKEv2 profile if non-zero, else delete */ -define ikev2_profile_add_del +autoreply define ikev2_profile_add_del { u32 client_index; u32 context; @@ -260,16 +205,6 @@ define ikev2_profile_add_del u8 is_add; }; -/** \brief Reply for IKEv2: Add/delete profile - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_profile_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 profile authentication method @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -280,7 +215,7 @@ define ikev2_profile_add_del_reply @param data_len - Authentication data length @param data - Authentication data (for rsa-sig cert file path) */ -define ikev2_profile_set_auth +autoreply define ikev2_profile_set_auth { u32 client_index; u32 context; @@ -292,16 +227,6 @@ define ikev2_profile_set_auth u8 data[0]; }; -/** \brief Reply for IKEv2: Set IKEv2 profile authentication method - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_profile_set_auth_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 profile local/remote identification @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -312,7 +237,7 @@ define ikev2_profile_set_auth_reply @param data_len - Identification data length @param data - Identification data */ -define ikev2_profile_set_id +autoreply define ikev2_profile_set_id { u32 client_index; u32 context; @@ -324,16 +249,6 @@ define ikev2_profile_set_id u8 data[0]; }; -/** \brief Reply for IKEv2: - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_profile_set_id_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 profile traffic selector parameters @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -346,7 +261,7 @@ define ikev2_profile_set_id_reply @param start_addr - The smallest address included in traffic selector @param end_addr - The largest address included in traffic selector */ -define ikev2_profile_set_ts +autoreply define ikev2_profile_set_ts { u32 client_index; u32 context; @@ -360,23 +275,13 @@ define ikev2_profile_set_ts u32 end_addr; }; -/** \brief Reply for IKEv2: Set IKEv2 profile traffic selector parameters - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_profile_set_ts_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 local RSA private key @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param key_file - Key file absolute path */ -define ikev2_set_local_key +autoreply define ikev2_set_local_key { u32 client_index; u32 context; @@ -384,16 +289,6 @@ define ikev2_set_local_key u8 key_file[256]; }; -/** \brief Reply for IKEv2: Set IKEv2 local key - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_set_local_key_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 responder interface and IP address @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -402,7 +297,7 @@ define ikev2_set_local_key_reply @param sw_if_index - interface index @param address - interface address */ -define ikev2_set_responder +autoreply define ikev2_set_responder { u32 client_index; u32 context; @@ -412,17 +307,6 @@ define ikev2_set_responder u8 address[4]; }; -/** \brief Reply for IKEv2: Set IKEv2 responder interface and IP address - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_set_responder_reply -{ - u32 context; - i32 retval; -}; - - /** \brief IKEv2: Set IKEv2 IKE transforms in SA_INIT proposal (RFC 7296) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -434,7 +318,7 @@ define ikev2_set_responder_reply @param dh_group - Diffie-Hellman group */ -define ikev2_set_ike_transforms +autoreply define ikev2_set_ike_transforms { u32 client_index; u32 context; @@ -446,16 +330,6 @@ define ikev2_set_ike_transforms u32 dh_group; }; -/** \brief Reply for IKEv2: Set IKEv2 IKE transforms - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_set_ike_transforms_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set IKEv2 ESP transforms in SA_INIT proposal (RFC 7296) @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -467,7 +341,7 @@ define ikev2_set_ike_transforms_reply @param dh_group - Diffie-Hellman group */ -define ikev2_set_esp_transforms +autoreply define ikev2_set_esp_transforms { u32 client_index; u32 context; @@ -479,16 +353,6 @@ define ikev2_set_esp_transforms u32 dh_group; }; -/** \brief Reply for IKEv2: Set IKEv2 ESP transforms - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_set_esp_transforms_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Set Child SA lifetime, limited by time and/or data @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -500,7 +364,7 @@ define ikev2_set_esp_transforms_reply @param lifetime_maxdata - SA maximum life time in bytes (0 to disable) */ -define ikev2_set_sa_lifetime +autoreply define ikev2_set_sa_lifetime { u32 client_index; u32 context; @@ -512,16 +376,6 @@ define ikev2_set_sa_lifetime u64 lifetime_maxdata; }; -/** \brief Reply for IKEv2: Set Child SA lifetime - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_set_sa_lifetime_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Initiate the SA_INIT exchange @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -529,7 +383,7 @@ define ikev2_set_sa_lifetime_reply @param name - IKEv2 profile name */ -define ikev2_initiate_sa_init +autoreply define ikev2_initiate_sa_init { u32 client_index; u32 context; @@ -537,16 +391,6 @@ define ikev2_initiate_sa_init u8 name[64]; }; -/** \brief Reply for IKEv2: Initiate the SA_INIT exchange - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_initiate_sa_init_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Initiate the delete IKE SA exchange @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -554,7 +398,7 @@ define ikev2_initiate_sa_init_reply @param ispi - IKE SA initiator SPI */ -define ikev2_initiate_del_ike_sa +autoreply define ikev2_initiate_del_ike_sa { u32 client_index; u32 context; @@ -562,16 +406,6 @@ define ikev2_initiate_del_ike_sa u64 ispi; }; -/** \brief Reply for IKEv2: Initiate the delete IKE SA exchange - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_initiate_del_ike_sa_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Initiate the delete Child SA exchange @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -579,7 +413,7 @@ define ikev2_initiate_del_ike_sa_reply @param ispi - Child SA initiator SPI */ -define ikev2_initiate_del_child_sa +autoreply define ikev2_initiate_del_child_sa { u32 client_index; u32 context; @@ -587,16 +421,6 @@ define ikev2_initiate_del_child_sa u32 ispi; }; -/** \brief Reply for IKEv2: Initiate the delete Child SA exchange - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_initiate_del_child_sa_reply -{ - u32 context; - i32 retval; -}; - /** \brief IKEv2: Initiate the rekey Child SA exchange @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -604,7 +428,7 @@ define ikev2_initiate_del_child_sa_reply @param ispi - Child SA initiator SPI */ -define ikev2_initiate_rekey_child_sa +autoreply define ikev2_initiate_rekey_child_sa { u32 client_index; u32 context; @@ -612,16 +436,6 @@ define ikev2_initiate_rekey_child_sa u32 ispi; }; -/** \brief Reply for IKEv2: Initiate the rekey Child SA exchange - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ikev2_initiate_rekey_child_sa_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump ipsec policy database data @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -682,4 +496,4 @@ define ipsec_spd_details { * eval: (c-set-style "gnu") * End: */ - \ No newline at end of file + diff --git a/src/vnet/l2/l2.api b/src/vnet/l2/l2.api index c23eebec..db42d635 100644 --- a/src/vnet/l2/l2.api +++ b/src/vnet/l2/l2.api @@ -70,66 +70,36 @@ define l2_fib_table_dump @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request */ -define l2_fib_clear_table +autoreply define l2_fib_clear_table { u32 client_index; u32 context; }; -/** \brief L2 fib clear table response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_fib_clear_table_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 FIB flush bridge domain entries @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param bd_id - the entry's bridge domain id */ -define l2fib_flush_bd +autoreply define l2fib_flush_bd { u32 client_index; u32 context; u32 bd_id; }; -/** \brief L2 FIB flush bridge domain entries response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2fib_flush_bd_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 FIB flush interface entries @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param bd_id - the entry's bridge domain id */ -define l2fib_flush_int +autoreply define l2fib_flush_int { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief L2 FIB flush interface entries response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2fib_flush_int_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 FIB add entry request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -140,7 +110,7 @@ define l2fib_flush_int_reply @param static_mac - @param filter_mac - */ -define l2fib_add_del +autoreply define l2fib_add_del { u32 client_index; u32 context; @@ -153,16 +123,6 @@ define l2fib_add_del u8 bvi_mac; }; -/** \brief L2 FIB add entry response - @param context - sender context, to match reply w/ request - @param retval - return code for the add l2fib entry request -*/ -define l2fib_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set L2 flags request !!! TODO - need more info, feature bits in l2_input.h @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -196,7 +156,7 @@ define l2_flags_reply @param bd_id - the bridge domain to create @param mac_age - mac aging time in min, 0 for disabled */ -define bridge_domain_set_mac_age +autoreply define bridge_domain_set_mac_age { u32 client_index; u32 context; @@ -204,16 +164,6 @@ define bridge_domain_set_mac_age u8 mac_age; }; -/** \brief Set bridge domain response - @param context - sender context, to match reply w/ request - @param retval - return code for the set l2 bits request -*/ -define bridge_domain_set_mac_age_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 bridge domain add or delete request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -226,7 +176,7 @@ define bridge_domain_set_mac_age_reply @param mac_age - mac aging time in min, 0 for disabled @param is_add - add or delete flag */ -define bridge_domain_add_del +autoreply define bridge_domain_add_del { u32 client_index; u32 context; @@ -240,16 +190,6 @@ define bridge_domain_add_del u8 is_add; }; -/** \brief L2 bridge domain add or delete response - @param context - sender context, to match reply w/ request - @param retval - return code for the set bridge flags request -*/ -define bridge_domain_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 bridge domain request operational state details @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -337,7 +277,7 @@ define bridge_flags_reply @param tag1 - Needed for any push or translate vtr op @param tag2 - Needed for any push 2 or translate x-2 vtr ops */ -define l2_interface_vlan_tag_rewrite +autoreply define l2_interface_vlan_tag_rewrite { u32 client_index; u32 context; @@ -348,16 +288,6 @@ define l2_interface_vlan_tag_rewrite u32 tag2; // second pushed tag }; -/** \brief L2 interface vlan tag rewrite response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_interface_vlan_tag_rewrite_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 interface pbb tag rewrite configure request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -370,7 +300,7 @@ define l2_interface_vlan_tag_rewrite_reply @param b_vlanid - B-tag vlanid, needed for any push or translate qinq vtr op @param i_sid - I-tag service id, needed for any push or translate qinq vtr op */ -define l2_interface_pbb_tag_rewrite +autoreply define l2_interface_pbb_tag_rewrite { u32 client_index; u32 context; @@ -383,16 +313,6 @@ define l2_interface_pbb_tag_rewrite u32 i_sid; }; -/** \brief L2 interface pbb tag rewrite response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_interface_pbb_tag_rewrite_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/l2tp/l2tp.api b/src/vnet/l2tp/l2tp.api index 5a5a5a48..4587a807 100644 --- a/src/vnet/l2tp/l2tp.api +++ b/src/vnet/l2tp/l2tp.api @@ -52,7 +52,7 @@ define l2tpv3_create_tunnel_reply u32 sw_if_index; }; -define l2tpv3_set_tunnel_cookies +autoreply define l2tpv3_set_tunnel_cookies { u32 client_index; u32 context; @@ -61,16 +61,6 @@ define l2tpv3_set_tunnel_cookies u64 new_remote_cookie; }; -/** \brief L2TP tunnel set cookies response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2tpv3_set_tunnel_cookies_reply -{ - u32 context; - i32 retval; -}; - define sw_if_l2tpv3_tunnel_details { u32 context; @@ -91,7 +81,7 @@ define sw_if_l2tpv3_tunnel_dump u32 context; }; -define l2tpv3_interface_enable_disable +autoreply define l2tpv3_interface_enable_disable { u32 client_index; u32 context; @@ -99,13 +89,7 @@ define l2tpv3_interface_enable_disable u32 sw_if_index; }; -define l2tpv3_interface_enable_disable_reply -{ - u32 context; - i32 retval; -}; - -define l2tpv3_set_lookup_key +autoreply define l2tpv3_set_lookup_key { u32 client_index; u32 context; @@ -113,12 +97,6 @@ define l2tpv3_set_lookup_key u8 key; }; -define l2tpv3_set_lookup_key_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") diff --git a/src/vnet/lisp-cp/lisp.api b/src/vnet/lisp-cp/lisp.api index a50a5ccb..8bed71b3 100644 --- a/src/vnet/lisp-cp/lisp.api +++ b/src/vnet/lisp-cp/lisp.api @@ -59,7 +59,7 @@ define lisp_add_del_locator_set_reply @param priority - priority of the lisp locator @param weight - weight of the lisp locator */ -define lisp_add_del_locator +autoreply define lisp_add_del_locator { u32 client_index; u32 context; @@ -70,16 +70,6 @@ define lisp_add_del_locator u8 weight; }; -/** \brief Reply for locator add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_locator_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete lisp eid-table @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -98,7 +88,7 @@ define lisp_add_del_locator_reply HMAC_SHA_256_128 2 @param key - secret key */ -define lisp_add_del_local_eid +autoreply define lisp_add_del_local_eid { u32 client_index; u32 context; @@ -112,16 +102,6 @@ define lisp_add_del_local_eid u8 key[64]; }; -/** \brief Reply for local_eid add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_local_eid_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add/delete map server @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -129,7 +109,7 @@ define lisp_add_del_local_eid_reply @param is_ipv6 - if non-zero the address is ipv6, else ipv4 @param ip_address - map server IP address */ -define lisp_add_del_map_server +autoreply define lisp_add_del_map_server { u32 client_index; u32 context; @@ -138,16 +118,6 @@ define lisp_add_del_map_server u8 ip_address[16]; }; -/** \brief Reply for lisp_add_del_map_server - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_map_server_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete map-resolver @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -155,7 +125,7 @@ define lisp_add_del_map_server_reply @param is_ipv6 - if non-zero the address is ipv6, else ipv4 @param ip_address - array of address bytes */ -define lisp_add_del_map_resolver +autoreply define lisp_add_del_map_resolver { u32 client_index; u32 context; @@ -164,45 +134,25 @@ define lisp_add_del_map_resolver u8 ip_address[16]; }; -/** \brief Reply for map_resolver add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_map_resolver_reply -{ - u32 context; - i32 retval; -}; - /** \brief enable or disable LISP feature @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_en - enable protocol if non-zero, else disable */ -define lisp_enable_disable +autoreply define lisp_enable_disable { u32 client_index; u32 context; u8 is_en; }; -/** \brief Reply for gpe enable/disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief configure or disable LISP PITR node @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param ls_name - locator set name @param is_add - add locator set if non-zero, else disable pitr */ -define lisp_pitr_set_locator_set +autoreply define lisp_pitr_set_locator_set { u32 client_index; u32 context; @@ -210,16 +160,6 @@ define lisp_pitr_set_locator_set u8 ls_name[64]; }; -/** \brief Reply for lisp_pitr_set_locator_set - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_pitr_set_locator_set_reply -{ - u32 context; - i32 retval; -}; - /** \brief configure or disable use of PETR @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -227,7 +167,7 @@ define lisp_pitr_set_locator_set_reply @param address - PETR IP address @param is_add - add locator set if non-zero, else disable pitr */ -define lisp_use_petr +autoreply define lisp_use_petr { u32 client_index; u32 context; @@ -236,16 +176,6 @@ define lisp_use_petr u8 is_add; }; -/** \brief Reply for lisp_pitr_set_locator_set - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_use_petr_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for LISP PETR status @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -298,45 +228,25 @@ define show_lisp_rloc_probe_state_reply @param context - sender context, to match reply w/ request @param is_enable - enable if non-zero; disable otherwise */ -define lisp_rloc_probe_enable_disable +autoreply define lisp_rloc_probe_enable_disable { u32 client_index; u32 context; u8 is_enabled; }; -/** \brief Reply for lisp_rloc_probe_enable_disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_rloc_probe_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief enable/disable LISP map-register @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_enable - enable if non-zero; disable otherwise */ -define lisp_map_register_enable_disable +autoreply define lisp_map_register_enable_disable { u32 client_index; u32 context; u8 is_enabled; }; -/** \brief Reply for lisp_map_register_enable_disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_map_register_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get state of LISP map-register @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -366,23 +276,13 @@ define show_lisp_map_register_state_reply 0 - destination only 1 - source/destaination */ -define lisp_map_request_mode +autoreply define lisp_map_request_mode { u32 client_index; u32 context; u8 mode; }; -/** \brief Reply for lisp_map_request_mode - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_map_request_mode_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for LISP map-request mode @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -430,7 +330,7 @@ typeonly manual_endian manual_print define remote_locator @param rloc_num - number of remote locators @param rlocs - remote locator records */ -manual_print manual_endian define lisp_add_del_remote_mapping +autoreply manual_print manual_endian define lisp_add_del_remote_mapping { u32 client_index; u32 context; @@ -448,16 +348,6 @@ manual_print manual_endian define lisp_add_del_remote_mapping vl_api_remote_locator_t rlocs[rloc_num]; }; -/** \brief Reply for lisp_add_del_remote_mapping - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_remote_mapping_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete LISP adjacency adjacency @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -470,7 +360,7 @@ define lisp_add_del_remote_mapping_reply @param reid - remote EID @param leid - local EID */ -define lisp_add_del_adjacency +autoreply define lisp_add_del_adjacency { u32 client_index; u32 context; @@ -483,23 +373,13 @@ define lisp_add_del_adjacency u8 leid_len; }; -/** \brief Reply for lisp_add_del_adjacency - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_add_del_adjacency_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete map request itr rlocs @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_add - add address if non-zero, else delete @param locator_set_name - locator set name */ -define lisp_add_del_map_request_itr_rlocs +autoreply define lisp_add_del_map_request_itr_rlocs { u32 client_index; u32 context; @@ -512,12 +392,6 @@ define lisp_add_del_map_request_itr_rlocs @param retval - return code */ -define lisp_add_del_map_request_itr_rlocs_reply -{ - u32 context; - i32 retval; -}; - /** \brief map/unmap vni/bd_index to vrf @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -525,7 +399,7 @@ define lisp_add_del_map_request_itr_rlocs_reply @param dp_table - virtual network id/bridge domain index @param vrf - vrf */ -define lisp_eid_table_add_del_map +autoreply define lisp_eid_table_add_del_map { u32 client_index; u32 context; @@ -535,16 +409,6 @@ define lisp_eid_table_add_del_map u8 is_l2; }; -/** \brief Reply for lisp_eid_table_add_del_map - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define lisp_eid_table_add_del_map_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for map lisp locator status @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vnet/lisp-cp/one.api b/src/vnet/lisp-cp/one.api index ca82f694..2fa1edf6 100644 --- a/src/vnet/lisp-cp/one.api +++ b/src/vnet/lisp-cp/one.api @@ -59,7 +59,7 @@ define one_add_del_locator_set_reply @param priority - priority of the locator @param weight - weight of the locator */ -define one_add_del_locator +autoreply define one_add_del_locator { u32 client_index; u32 context; @@ -70,16 +70,6 @@ define one_add_del_locator u8 weight; }; -/** \brief Reply for locator add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_locator_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete ONE eid-table @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -98,7 +88,7 @@ define one_add_del_locator_reply HMAC_SHA_256_128 2 @param key - secret key */ -define one_add_del_local_eid +autoreply define one_add_del_local_eid { u32 client_index; u32 context; @@ -112,16 +102,6 @@ define one_add_del_local_eid u8 key[64]; }; -/** \brief Reply for local_eid add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_local_eid_reply -{ - u32 context; - i32 retval; -}; - /** \brief Add/delete map server @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -129,7 +109,7 @@ define one_add_del_local_eid_reply @param is_ipv6 - if non-zero the address is ipv6, else ipv4 @param ip_address - map server IP address */ -define one_add_del_map_server +autoreply define one_add_del_map_server { u32 client_index; u32 context; @@ -138,16 +118,6 @@ define one_add_del_map_server u8 ip_address[16]; }; -/** \brief Reply for one_add_del_map_server - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_map_server_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete map-resolver @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -155,7 +125,7 @@ define one_add_del_map_server_reply @param is_ipv6 - if non-zero the address is ipv6, else ipv4 @param ip_address - array of address bytes */ -define one_add_del_map_resolver +autoreply define one_add_del_map_resolver { u32 client_index; u32 context; @@ -164,45 +134,25 @@ define one_add_del_map_resolver u8 ip_address[16]; }; -/** \brief Reply for map_resolver add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_map_resolver_reply -{ - u32 context; - i32 retval; -}; - /** \brief enable or disable ONE feature @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_en - enable protocol if non-zero, else disable */ -define one_enable_disable +autoreply define one_enable_disable { u32 client_index; u32 context; u8 is_en; }; -/** \brief Reply for gpe enable/disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief configure or disable ONE PITR node @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param ls_name - locator set name @param is_add - add locator set if non-zero, else disable pitr */ -define one_pitr_set_locator_set +autoreply define one_pitr_set_locator_set { u32 client_index; u32 context; @@ -210,16 +160,6 @@ define one_pitr_set_locator_set u8 ls_name[64]; }; -/** \brief Reply for one_pitr_set_locator_set - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_pitr_set_locator_set_reply -{ - u32 context; - i32 retval; -}; - /** \brief configure or disable use of PETR @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -227,7 +167,7 @@ define one_pitr_set_locator_set_reply @param address - PETR IP address @param is_add - add locator set if non-zero, else disable PETR */ -define one_use_petr +autoreply define one_use_petr { u32 client_index; u32 context; @@ -236,16 +176,6 @@ define one_use_petr u8 is_add; }; -/** \brief Reply for one_use_petr - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_use_petr_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for ONE PETR status @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -298,45 +228,25 @@ define show_one_rloc_probe_state_reply @param context - sender context, to match reply w/ request @param is_enable - enable if non-zero; disable otherwise */ -define one_rloc_probe_enable_disable +autoreply define one_rloc_probe_enable_disable { u32 client_index; u32 context; u8 is_enabled; }; -/** \brief Reply for one_rloc_probe_enable_disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_rloc_probe_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief enable/disable ONE map-register @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_enable - enable if non-zero; disable otherwise */ -define one_map_register_enable_disable +autoreply define one_map_register_enable_disable { u32 client_index; u32 context; u8 is_enabled; }; -/** \brief Reply for one_map_register_enable_disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_map_register_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get state of ONE map-register @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -366,23 +276,13 @@ define show_one_map_register_state_reply 0 - destination only 1 - source/destaination */ -define one_map_request_mode +autoreply define one_map_request_mode { u32 client_index; u32 context; u8 mode; }; -/** \brief Reply for one_map_request_mode - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_map_request_mode_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for ONE map-request mode @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -430,7 +330,7 @@ typeonly manual_endian manual_print define one_remote_locator @param rloc_num - number of remote locators @param rlocs - remote locator records */ -manual_print manual_endian define one_add_del_remote_mapping +autoreply manual_print manual_endian define one_add_del_remote_mapping { u32 client_index; u32 context; @@ -448,16 +348,6 @@ manual_print manual_endian define one_add_del_remote_mapping vl_api_one_remote_locator_t rlocs[rloc_num]; }; -/** \brief Reply for one_add_del_remote_mapping - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_remote_mapping_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete ONE adjacency adjacency @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -470,7 +360,7 @@ define one_add_del_remote_mapping_reply @param reid - remote EID @param leid - local EID */ -define one_add_del_adjacency +autoreply define one_add_del_adjacency { u32 client_index; u32 context; @@ -483,23 +373,13 @@ define one_add_del_adjacency u8 leid_len; }; -/** \brief Reply for one_add_del_adjacency - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_add_del_adjacency_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete map request itr rlocs @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_add - add address if non-zero, else delete @param locator_set_name - locator set name */ -define one_add_del_map_request_itr_rlocs +autoreply define one_add_del_map_request_itr_rlocs { u32 client_index; u32 context; @@ -507,17 +387,6 @@ define one_add_del_map_request_itr_rlocs u8 locator_set_name[64]; }; -/** \brief Reply for one_add_del_map_request_itr_rlocs - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ - -define one_add_del_map_request_itr_rlocs_reply -{ - u32 context; - i32 retval; -}; - /** \brief map/unmap vni/bd_index to vrf @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -525,7 +394,7 @@ define one_add_del_map_request_itr_rlocs_reply @param dp_table - virtual network id/bridge domain index @param vrf - vrf */ -define one_eid_table_add_del_map +autoreply define one_eid_table_add_del_map { u32 client_index; u32 context; @@ -535,16 +404,6 @@ define one_eid_table_add_del_map u8 is_l2; }; -/** \brief Reply for one_eid_table_add_del_map - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define one_eid_table_add_del_map_reply -{ - u32 context; - i32 retval; -}; - /** \brief Request for map one locator status @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -901,31 +760,19 @@ define one_stats_details u32 bytes; }; -define one_stats_flush +autoreply define one_stats_flush { u32 client_index; u32 context; }; -define one_stats_flush_reply -{ - u32 context; - i32 retval; -}; - -define one_stats_enable_disable +autoreply define one_stats_enable_disable { u32 client_index; u32 context; u8 is_en; }; -define one_stats_enable_disable_reply -{ - u32 context; - i32 retval; -}; - define show_one_stats_enable_disable { u32 client_index; diff --git a/src/vnet/lisp-gpe/lisp_gpe.api b/src/vnet/lisp-gpe/lisp_gpe.api index 43a6a6cd..f79d18c1 100644 --- a/src/vnet/lisp-gpe/lisp_gpe.api +++ b/src/vnet/lisp-gpe/lisp_gpe.api @@ -43,7 +43,7 @@ typeonly manual_print manual_endian define gpe_locator @param loc_num - number of locators @param locs - array of remote locators */ -manual_print manual_endian define gpe_add_del_fwd_entry +autoreply manual_print manual_endian define gpe_add_del_fwd_entry { u32 client_index; u32 context; @@ -60,44 +60,24 @@ manual_print manual_endian define gpe_add_del_fwd_entry vl_api_gpe_locator_t locs[loc_num]; }; -/** \brief Reply for gpe_fwd_entry add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define gpe_add_del_fwd_entry_reply -{ - u32 context; - i32 retval; -}; - /** \brief enable or disable gpe protocol @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_en - enable protocol if non-zero, else disable */ -define gpe_enable_disable +autoreply define gpe_enable_disable { u32 client_index; u32 context; u8 is_en; }; -/** \brief Reply for gpe enable/disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define gpe_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief add or delete gpe_iface @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_add - add address if non-zero, else delete */ -define gpe_add_del_iface +autoreply define gpe_add_del_iface { u32 client_index; u32 context; @@ -107,16 +87,6 @@ define gpe_add_del_iface u32 vni; }; -/** \brief Reply for gpe_iface add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define gpe_add_del_iface_reply -{ - u32 context; - i32 retval; -}; - define gpe_fwd_entries_get { u32 client_index; @@ -163,23 +133,13 @@ manual_endian manual_print define gpe_fwd_entry_path_details @param context - sender context, to match reply w/ request @param mode - LISP (value 0) or VXLAN (value 1) */ -define gpe_set_encap_mode +autoreply define gpe_set_encap_mode { u32 client_index; u32 context; u8 mode; }; -/** \brief Reply for set_encap_mode - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define gpe_set_encap_mode_reply -{ - u32 context; - i32 retval; -}; - /** \brief get GPE encapsulation mode @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vnet/map/map.api b/src/vnet/map/map.api index 4e4be85e..d68f13f0 100644 --- a/src/vnet/map/map.api +++ b/src/vnet/map/map.api @@ -62,22 +62,13 @@ define map_add_domain_reply @param context - sender context, to match reply w/ request @param index - MAP Domain index */ -define map_del_domain +autoreply define map_del_domain { u32 client_index; u32 context; u32 index; }; -/** \brief Reply for MAP domain del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define map_del_domain_reply -{ - u32 context; - i32 retval; -}; /** \brief Add or Delete MAP rule from a domain (Only used for shared IPv4 per subscriber) @param client_index - opaque cookie to identify the sender @@ -87,7 +78,7 @@ define map_del_domain_reply @param ip6_dst - MAP CE IPv6 address @param psid - Rule PSID */ -define map_add_del_rule +autoreply define map_add_del_rule { u32 client_index; u32 context; @@ -97,15 +88,6 @@ define map_add_del_rule u16 psid; }; -/** \brief Reply for MAP rule add/del - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define map_add_del_rule_reply -{ - u32 context; - i32 retval; -}; /** \brief Get list of map domains @param client_index - opaque cookie to identify the sender diff --git a/src/vnet/mpls/mpls.api b/src/vnet/mpls/mpls.api index a1e1270a..c8a3ffb7 100644 --- a/src/vnet/mpls/mpls.api +++ b/src/vnet/mpls/mpls.api @@ -26,7 +26,7 @@ @param mb_address_length - Length of IP prefix @param mb_address[16] - IP prefix/ */ -define mpls_ip_bind_unbind +autoreply define mpls_ip_bind_unbind { u32 client_index; u32 context; @@ -40,16 +40,6 @@ define mpls_ip_bind_unbind u8 mb_address[16]; }; -/** \brief Reply for MPLS IP bind/unbind request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define mpls_ip_bind_unbind_reply -{ - u32 context; - i32 retval; -}; - /** \brief MPLS tunnel Add / del route @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -172,7 +162,7 @@ manual_endian manual_print define mpls_tunnel_details @param mr_next_hop_out_label_stack - the next-hop output label stack, outer most first @param next_hop_via_label - The next-hop is a resolved via a local label */ -define mpls_route_add_del +autoreply define mpls_route_add_del { u32 client_index; u32 context; @@ -199,16 +189,6 @@ define mpls_route_add_del u32 mr_next_hop_out_label_stack[mr_next_hop_n_out_labels]; }; -/** \brief Reply for MPLS route add / del request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define mpls_route_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump MPLS fib table @param client_index - opaque cookie to identify the sender */ @@ -240,4 +220,4 @@ manual_endian manual_print define mpls_fib_details * eval: (c-set-style "gnu") * End: */ - \ No newline at end of file + diff --git a/src/vnet/session/session.api b/src/vnet/session/session.api index e207e46f..4aef09da 100644 --- a/src/vnet/session/session.api +++ b/src/vnet/session/session.api @@ -49,26 +49,17 @@ define application_attach_reply { @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request */ - define application_detach { +autoreply define application_detach { u32 client_index; u32 context; }; - /** \brief detach reply - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define application_detach_reply { - u32 context; - i32 retval; -}; - /** \brief vpp->client, please map an additional shared memory segment @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param segment_name - */ -define map_another_segment { +autoreply define map_another_segment { u32 client_index; u32 context; u32 segment_size; @@ -83,7 +74,7 @@ define map_another_segment { "tcp://::/0/80" [ipv6] etc. @param options - socket options, fifo sizes, etc. */ -define bind_uri { +autoreply define bind_uri { u32 client_index; u32 context; u32 accept_cookie; @@ -97,7 +88,7 @@ define bind_uri { "tcp://::/0/80" [ipv6], etc. @param options - socket options, fifo sizes, etc. */ -define unbind_uri { +autoreply define unbind_uri { u32 client_index; u32 context; u8 uri[128]; @@ -122,24 +113,6 @@ define connect_uri { u64 options[16]; }; -/** \brief Bind reply - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define bind_uri_reply { - u32 context; - i32 retval; -}; - -/** \brief unbind reply - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define unbind_uri_reply { - u32 context; - i32 retval; -}; - /** \brief vpp->client, connect reply @param context - sender context, to match reply w/ request @param retval - return code for the request @@ -165,15 +138,6 @@ define connect_uri_reply { u8 segment_name[128]; }; -/** \brief client->vpp - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define map_another_segment_reply { - u32 context; - i32 retval; -}; - /** \brief vpp->client, accept this session @param context - sender context, to match reply w/ request @param listener_handle - tells client which listener this pertains to @@ -290,7 +254,7 @@ define bind_sock { @param context - sender context, to match reply w/ request @param handle - bind handle obtained from bind reply */ -define unbind_sock { +autoreply define unbind_sock { u32 client_index; u32 context; u64 handle; @@ -339,15 +303,6 @@ define bind_sock_reply { u8 segment_name[128]; }; -/** \brief unbind reply - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define unbind_sock_reply { - u32 context; - i32 retval; -}; - /** \brief vpp/server->client, connect reply @param context - sender context, to match reply w/ request @param retval - return code for the request @@ -378,23 +333,14 @@ define connect_sock_reply { @param context - sender context, to match reply w/ request @param is_enable - disable session layer if 0, enable otherwise */ -define session_enable_disable { +autoreply define session_enable_disable { u32 client_index; u32 context; u8 is_enable; }; -/** \brief Reply for session enable/disable - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define session_enable_disable_reply { - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") * End: - */ \ No newline at end of file + */ diff --git a/src/vnet/span/span.api b/src/vnet/span/span.api index 4babdd83..914fd8d0 100644 --- a/src/vnet/span/span.api +++ b/src/vnet/span/span.api @@ -21,7 +21,7 @@ @param sw_if_index_to - interface where the traffic is mirrored @param state - 0 = disabled, 1 = rx enabled, 2 = tx enabled, 3 tx & rx enabled */ -define sw_interface_span_enable_disable { +autoreply define sw_interface_span_enable_disable { u32 client_index; u32 context; u32 sw_if_index_from; @@ -29,14 +29,6 @@ define sw_interface_span_enable_disable { u8 state; }; -/** \brief Reply to SPAN enable/disable request - @param context - sender context which was passed in the request -*/ -define sw_interface_span_enable_disable_reply { - u32 context; - i32 retval; -}; - /** \brief SPAN dump request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vnet/sr/sr.api b/src/vnet/sr/sr.api index 5feadcb0..9e900741 100644 --- a/src/vnet/sr/sr.api +++ b/src/vnet/sr/sr.api @@ -25,7 +25,7 @@ @param fib_table FIB table in which we should install the localsid entry @param nh_addr Next Hop IPv4/IPv6 address. Only for L2/L3 xconnect. */ -define sr_localsid_add_del +autoreply define sr_localsid_add_del { u32 client_index; u32 context; @@ -39,16 +39,6 @@ define sr_localsid_add_del u8 nh_addr[16]; }; -/** \brief IPv6 SR LocalSID add/del request response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_localsid_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 SR policy add @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -59,7 +49,7 @@ define sr_localsid_add_del_reply @param fib_table is the VRF where to install the FIB entry for the BSID @param segments is a vector of IPv6 address composing the segment list */ -define sr_policy_add +autoreply define sr_policy_add { u32 client_index; u32 context; @@ -72,16 +62,6 @@ define sr_policy_add u8 segments[0]; }; -/** \brief IPv6 SR Policy add request response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_policy_add_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 SR policy modification @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -94,7 +74,7 @@ define sr_policy_add_reply @param weight is the weight of the sid list. optional. @param is_encap Mode. Encapsulation or SRH insertion. */ -define sr_policy_mod +autoreply define sr_policy_mod { u32 client_index; u32 context; @@ -108,23 +88,13 @@ define sr_policy_mod u8 segments[0]; }; -/** \brief IPv6 SR Policy modification request response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_policy_mod_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 SR policy deletion @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param bsid is the bindingSID of the SR Policy @param index is the index of the SR policy */ -define sr_policy_del +autoreply define sr_policy_del { u32 client_index; u32 context; @@ -132,16 +102,6 @@ define sr_policy_del u32 sr_policy_index; }; -/** \brief IPv6 SR Policy deletion request response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_policy_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief IPv6 SR steering add/del @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -154,7 +114,7 @@ define sr_policy_del_reply @param sw_if_index is the incoming interface for L2 traffic @param traffic_type describes the type of traffic */ -define sr_steering_add_del +autoreply define sr_steering_add_del { u32 client_index; u32 context; @@ -168,16 +128,6 @@ define sr_steering_add_del u8 traffic_type; }; -/** \brief IPv6 SR steering add/del request response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define sr_steering_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump the list of SR LocalSIDs @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request diff --git a/src/vnet/unix/tap.api b/src/vnet/unix/tap.api index 1fd0bb09..d9fba371 100644 --- a/src/vnet/unix/tap.api +++ b/src/vnet/unix/tap.api @@ -93,23 +93,13 @@ define tap_modify_reply @param context - sender context, to match reply w/ request @param sw_if_index - interface index of existing tap interface */ -define tap_delete +autoreply define tap_delete { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief Reply for tap delete request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define tap_delete_reply -{ - u32 context; - i32 retval; -}; - /** \brief Dump tap interfaces request */ define sw_interface_tap_dump { diff --git a/src/vnet/vxlan/vxlan.api b/src/vnet/vxlan/vxlan.api index 048220fb..6c331a58 100644 --- a/src/vnet/vxlan/vxlan.api +++ b/src/vnet/vxlan/vxlan.api @@ -61,7 +61,7 @@ define vxlan_tunnel_details @param is_ipv6 - if non-zero, enable ipv6-vxlan-bypass, else ipv4-vxlan-bypass @param enable - if non-zero enable, else disable */ -define sw_interface_set_vxlan_bypass +autoreply define sw_interface_set_vxlan_bypass { u32 client_index; u32 context; @@ -69,13 +69,3 @@ define sw_interface_set_vxlan_bypass u8 is_ipv6; u8 enable; }; - -/** \brief Interface set vxlan-bypass response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_set_vxlan_bypass_reply -{ - u32 context; - i32 retval; -}; \ No newline at end of file diff --git a/src/vpp/api/vpe.api b/src/vpp/api/vpe.api index a4ba180d..7c07c822 100644 --- a/src/vpp/api/vpe.api +++ b/src/vpp/api/vpe.api @@ -80,7 +80,7 @@ define create_vlan_subif_reply @param sw_if_index - index of the interface @param enable - if non-zero enable, else disable */ -define sw_interface_set_mpls_enable +autoreply define sw_interface_set_mpls_enable { u32 client_index; u32 context; @@ -88,16 +88,6 @@ define sw_interface_set_mpls_enable u8 enable; }; -/** \brief Reply for MPLS state on an interface - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define sw_interface_set_mpls_enable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Proxy ARP add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -106,7 +96,7 @@ define sw_interface_set_mpls_enable_reply @param low_address[4] - Low address of the Proxy ARP range @param hi_address[4] - High address of the Proxy ARP range */ -define proxy_arp_add_del +autoreply define proxy_arp_add_del { u32 client_index; u32 context; @@ -116,23 +106,13 @@ define proxy_arp_add_del u8 hi_address[4]; }; -/** \brief Reply for proxy arp add / del request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define proxy_arp_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Proxy ARP add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - Which interface to enable / disable Proxy Arp on @param enable_disable - 1 to enable Proxy ARP on interface, 0 to disable */ -define proxy_arp_intfc_enable_disable +autoreply define proxy_arp_intfc_enable_disable { u32 client_index; u32 context; @@ -141,23 +121,13 @@ define proxy_arp_intfc_enable_disable u8 enable_disable; }; -/** \brief Reply for Proxy ARP interface enable / disable request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define proxy_arp_intfc_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Reset VRF (remove all routes etc) request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_ipv6 - 1 for IPv6 neighbor, 0 for IPv4 @param vrf_id - ID of th FIB table / VRF to reset */ -define reset_vrf +autoreply define reset_vrf { u32 client_index; u32 context; @@ -165,16 +135,6 @@ define reset_vrf u32 vrf_id; }; -/** \brief Reply for Reset VRF request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define reset_vrf_reply -{ - u32 context; - i32 retval; -}; - /** \brief Is Address Reachable request - DISABLED @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -200,7 +160,7 @@ define is_address_reachable @param enable_disable - 1 = enable stats, 0 = disable @param pid - pid of process requesting stats updates */ -define want_stats +autoreply define want_stats { u32 client_index; u32 context; @@ -208,16 +168,6 @@ define want_stats u32 pid; }; -/** \brief Reply for Want Stats request - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define want_stats_reply -{ - u32 context; - i32 retval; -}; - typeonly manual_print manual_endian define ip4_fib_counter { u32 address; @@ -331,7 +281,7 @@ define oam_event @param enable_disable- enable if non-zero, else disable @param pid - pid of the requesting process */ -define want_oam_events +autoreply define want_oam_events { u32 client_index; u32 context; @@ -339,16 +289,6 @@ define want_oam_events u32 pid; }; -/** \brief Want OAM events response - @param context - sender context, to match reply w/ request - @param retval - return code for the want oam stats request -*/ -define want_oam_events_reply -{ - u32 context; - i32 retval; -}; - /** \brief OAM add / del target request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -357,7 +297,7 @@ define want_oam_events_reply @param dst_address[] - destination address of the target @param is_add - add target if non-zero, else delete */ -define oam_add_del +autoreply define oam_add_del { u32 client_index; u32 context; @@ -367,23 +307,13 @@ define oam_add_del u8 is_add; }; -/** \brief OAM add / del target response - @param context - sender context, to match reply w/ request - @param retval - return code of the request -*/ -define oam_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Reset fib table request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param vrf_id - vrf/table id of the fib table to reset @param is_ipv6 - an ipv6 fib to reset if non-zero, else ipv4 */ -define reset_fib +autoreply define reset_fib { u32 client_index; u32 context; @@ -391,16 +321,6 @@ define reset_fib u8 is_ipv6; }; -/** \brief Reset fib response - @param context - sender context, to match reply w/ request - @param retval - return code for the reset bfib request -*/ -define reset_fib_reply -{ - u32 context; - i32 retval; -}; - /** \brief Create loopback interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -458,23 +378,13 @@ define create_loopback_instance_reply @param context - sender context, to match reply w/ request @param sw_if_index - sw index of the interface that was created */ -define delete_loopback +autoreply define delete_loopback { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief Delete loopback interface response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define delete_loopback_reply -{ - u32 context; - i32 retval; -}; - /** \brief Control ping from client to api server request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -543,7 +453,7 @@ define cli_inband_reply @param is_ipv6 - neighbor limit if non-zero, else ARP limit @param arp_neighbor_limit - the new limit, defaults are ~ 50k */ -define set_arp_neighbor_limit +autoreply define set_arp_neighbor_limit { u32 client_index; u32 context; @@ -551,16 +461,6 @@ define set_arp_neighbor_limit u32 arp_neighbor_limit; }; -/** \brief Set max allowed ARP or ip6 neighbor entries response - @param context - sender context, to match reply w/ request - @param retval - return code for request -*/ -define set_arp_neighbor_limit_reply -{ - u32 context; - i32 retval; -}; - /** \brief L2 interface patch add / del request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -568,7 +468,7 @@ define set_arp_neighbor_limit_reply @param tx_sw_if_index - transmit side interface @param is_add - if non-zero set up the interface patch, else remove it */ -define l2_patch_add_del +autoreply define l2_patch_add_del { u32 client_index; u32 context; @@ -577,23 +477,13 @@ define l2_patch_add_del u8 is_add; }; -/** \brief L2 interface patch add / del response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_patch_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Interface set vpath request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - interface used to reach neighbor @param enable - if non-zero enable, else disable */ -define sw_interface_set_vpath +autoreply define sw_interface_set_vpath { u32 client_index; u32 context; @@ -601,16 +491,6 @@ define sw_interface_set_vpath u8 enable; }; -/** \brief Interface set vpath response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define sw_interface_set_vpath_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set L2 XConnect between two interfaces request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -618,7 +498,7 @@ define sw_interface_set_vpath_reply @param tx_sw_if_index - Transmit interface index @param enable - enable xconnect if not 0, else set to L3 mode */ -define sw_interface_set_l2_xconnect +autoreply define sw_interface_set_l2_xconnect { u32 client_index; u32 context; @@ -627,16 +507,6 @@ define sw_interface_set_l2_xconnect u8 enable; }; -/** \brief Set L2 XConnect response - @param context - sender context, to match reply w/ request - @param retval - L2 XConnect request return code -*/ -define sw_interface_set_l2_xconnect_reply -{ - u32 context; - i32 retval; -}; - /** \brief Interface bridge mode request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -646,7 +516,7 @@ define sw_interface_set_l2_xconnect_reply @param shg - Shared horizon group, for bridge mode only @param enable - Enable beige mode if not 0, else set to L3 mode */ -define sw_interface_set_l2_bridge +autoreply define sw_interface_set_l2_bridge { u32 client_index; u32 context; @@ -657,16 +527,6 @@ define sw_interface_set_l2_bridge u8 enable; }; -/** \brief Interface bridge mode response - @param context - sender context, to match reply w/ request - @param retval - Bridge mode request return code -*/ -define sw_interface_set_l2_bridge_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set bridge domain ip to mac entry request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -676,7 +536,7 @@ define sw_interface_set_l2_bridge_reply @param mac_address - MAC address @param */ -define bd_ip_mac_add_del +autoreply define bd_ip_mac_add_del { u32 client_index; u32 context; @@ -687,16 +547,6 @@ define bd_ip_mac_add_del u8 mac_address[6]; }; -/** \brief Set bridge domain ip to mac entry response - @param context - sender context, to match reply w/ request - @param retval - return code for the set bridge flags request -*/ -define bd_ip_mac_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set/unset the classification table for an interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -704,7 +554,7 @@ define bd_ip_mac_add_del_reply @param sw_if_index - interface to associate with the table @param table_index - index of the table, if ~0 unset the table */ -define classify_set_interface_ip_table +autoreply define classify_set_interface_ip_table { u32 client_index; u32 context; @@ -713,16 +563,6 @@ define classify_set_interface_ip_table u32 table_index; /* ~0 => off */ }; -/** \brief Set/unset interface classification table response - @param context - sender context, to match reply w/ request - @param retval - return code -*/ -define classify_set_interface_ip_table_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set/unset l2 classification tables for an interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -731,7 +571,7 @@ define classify_set_interface_ip_table_reply @param ip6_table_index - ip6 index @param other_table_index - other index */ -define classify_set_interface_l2_tables +autoreply define classify_set_interface_l2_tables { u32 client_index; u32 context; @@ -743,16 +583,6 @@ define classify_set_interface_l2_tables u8 is_input; }; -/** \brief Set/unset l2 classification tables for an interface response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define classify_set_interface_l2_tables_reply -{ - u32 context; - i32 retval; -}; - /** \brief Get node index using name request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -809,7 +639,7 @@ define add_node_next_reply @param sw_if_index - interface to enable/disable filtering on @param enable_disable - if non-zero enable filtering, else disable */ -define l2_interface_efp_filter +autoreply define l2_interface_efp_filter { u32 client_index; u32 context; @@ -817,16 +647,6 @@ define l2_interface_efp_filter u32 enable_disable; }; -/** \brief L2 interface ethernet flow point filtering response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define l2_interface_efp_filter_reply -{ - u32 context; - i32 retval; -}; - define create_subif { u32 client_index; @@ -882,7 +702,7 @@ define show_version_reply }; /* Gross kludge, DGMS */ -define interface_name_renumber +autoreply define interface_name_renumber { u32 client_index; u32 context; @@ -890,12 +710,6 @@ define interface_name_renumber u32 new_show_dev_instance; }; -define interface_name_renumber_reply -{ - u32 context; - i32 retval; -}; - /** \brief Register for ip4 arp resolution events @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -903,7 +717,7 @@ define interface_name_renumber_reply @param pid - sender's pid @param address - the exact ip4 address of interest */ -define want_ip4_arp_events +autoreply define want_ip4_arp_events { u32 client_index; u32 context; @@ -912,16 +726,6 @@ define want_ip4_arp_events u32 address; }; -/** \brief Reply for interface events registration - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define want_ip4_arp_events_reply -{ - u32 context; - i32 retval; -}; - /** \brief Tell client about an ip4 arp resolution event @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -949,7 +753,7 @@ define ip4_arp_event @param pid - sender's pid @param address - the exact ip6 address of interest */ -define want_ip6_nd_events +autoreply define want_ip6_nd_events { u32 client_index; u32 context; @@ -958,16 +762,6 @@ define want_ip6_nd_events u8 address[16]; }; -/** \brief Reply for ip6 nd resolution events registration - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define want_ip6_nd_events_reply -{ - u32 context; - i32 retval; -}; - /** \brief Tell client about an ip6 nd resolution or mac/ip event @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -999,7 +793,7 @@ define ip6_nd_event Note: User is recommeneded to use just one valid table_index per call. (ip4_table_index, ip6_table_index, or l2_table_index) */ -define input_acl_set_interface +autoreply define input_acl_set_interface { u32 client_index; u32 context; @@ -1010,16 +804,6 @@ define input_acl_set_interface u8 is_add; }; -/** \brief Set/unset input ACL interface response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define input_acl_set_interface_reply -{ - u32 context; - i32 retval; -}; - define get_node_graph { u32 client_index; @@ -1048,7 +832,7 @@ define get_node_graph_reply @param pow_enable - Proof of Work enabled or not flag @param trace_enable - iOAM Trace enabled or not flag */ -define ioam_enable +autoreply define ioam_enable { u32 client_index; u32 context; @@ -1060,38 +844,18 @@ define ioam_enable u32 node_id; }; -/** \brief iOAM Trace profile add / del response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define ioam_enable_reply -{ - u32 context; - i32 retval; -}; - /** \brief iOAM disable @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param index - MAP Domain index */ -define ioam_disable +autoreply define ioam_disable { u32 client_index; u32 context; u16 id; }; -/** \brief iOAM disable response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define ioam_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Query relative index via node names @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1149,7 +913,7 @@ define pg_create_interface_reply @param count - number of packets to be captured @param pcap_file - pacp file name to store captured packets */ -define pg_capture +autoreply define pg_capture { u32 client_index; u32 context; @@ -1160,23 +924,13 @@ define pg_capture u8 pcap_file_name[pcap_name_length]; }; -/** \brief PacketGenerator capture packets response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define pg_capture_reply -{ - u32 context; - i32 retval; -}; - /** \brief Enable / disable packet generator request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param is_enabled - 1 if enabling streams, 0 if disabling @param stream - stream name to be enable/disabled, if not specified handle all streams */ -define pg_enable_disable +autoreply define pg_enable_disable { u32 client_index; u32 context; @@ -1185,16 +939,6 @@ define pg_enable_disable u8 stream_name[stream_name_length]; }; -/** \brief Reply for enable / disable packet generator - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define pg_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /** \brief Configure IP source and L4 port-range check @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1208,7 +952,7 @@ define pg_enable_disable_reply @param vrf_id - fib table/vrf id to associate the source and port-range check with @note To specify a single port set low_port and high_port entry the same */ -define ip_source_and_port_range_check_add_del +autoreply define ip_source_and_port_range_check_add_del { u32 client_index; u32 context; @@ -1222,16 +966,6 @@ define ip_source_and_port_range_check_add_del u32 vrf_id; }; -/** \brief Configure IP source and L4 port-range check reply - @param context - returned sender context, to match reply w/ request - @param retval - return code -*/ -define ip_source_and_port_range_check_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Set interface source and L4 port-range request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1239,7 +973,7 @@ define ip_source_and_port_range_check_add_del_reply @param tcp_vrf_id - VRF associated with source and TCP port-range check @param udp_vrf_id - VRF associated with source and TCP port-range check */ -define ip_source_and_port_range_check_interface_add_del +autoreply define ip_source_and_port_range_check_interface_add_del { u32 client_index; u32 context; @@ -1251,36 +985,17 @@ define ip_source_and_port_range_check_interface_add_del u32 udp_out_vrf_id; }; -/** \brief Set interface source and L4 port-range response - @param context - sender context, to match reply w/ request - @param retval - return value for request -*/ -define ip_source_and_port_range_check_interface_add_del_reply -{ - u32 context; - i32 retval; -}; - /** \brief Delete sub interface request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - sw index of the interface that was created by create_subif */ -define delete_subif { +autoreply define delete_subif { u32 client_index; u32 context; u32 sw_if_index; }; -/** \brief Delete sub interface response - @param context - sender context, to match reply w/ request - @param retval - return code for the request -*/ -define delete_subif_reply { - u32 context; - i32 retval; -}; - /** \brief Punt traffic to the host @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @@ -1289,7 +1004,7 @@ define delete_subif_reply { @param l4_protocol - L4 protocol to be punted, only UDP (0x11) is supported @param l4_port - TCP/UDP port to be punted */ -define punt { +autoreply define punt { u32 client_index; u32 context; u8 is_add; @@ -1298,23 +1013,13 @@ define punt { u16 l4_port; }; -/** \brief Reply to the punt request - @param context - sender context which was passed in the request - @param retval - return code of punt request -*/ -define punt_reply -{ - u32 context; - i32 retval; -}; - /** \brief Feature path enable/disable request @param client_index - opaque cookie to identify the sender @param context - sender context, to match reply w/ request @param sw_if_index - the interface @param enable - 1 = on, 0 = off */ -define feature_enable_disable { +autoreply define feature_enable_disable { u32 client_index; u32 context; u32 sw_if_index; @@ -1323,16 +1028,6 @@ define feature_enable_disable { u8 feature_name[64]; }; -/** \brief Reply to the eature path enable/disable request - @param context - sender context which was passed in the request - @param retval - return code for the request -*/ -define feature_enable_disable_reply -{ - u32 context; - i32 retval; -}; - /* * Local Variables: * eval: (c-set-style "gnu") -- cgit 1.2.3-korg From a774b53623f60b5e8ea8ed634d6a41e847743715 Mon Sep 17 00:00:00 2001 From: Matus Fabian Date: Tue, 2 May 2017 03:15:22 -0700 Subject: NAT64: Move IPv6-IPv4 virtual reassembly code from MAP-T to common library (VPP-708) Change-Id: I9ad636f80bf109ffac9ca1b6d80d5f2c31f2076a Signed-off-by: Matus Fabian --- src/vnet.am | 2 + src/vnet/ip/ip4_to_ip6.h | 577 +++++++++++++++++++++++++++++++++ src/vnet/ip/ip6_to_ip4.h | 571 +++++++++++++++++++++++++++++++++ src/vnet/map/ip4_map.c | 53 +-- src/vnet/map/ip4_map_t.c | 820 ++++++++--------------------------------------- src/vnet/map/ip6_map.c | 23 +- src/vnet/map/ip6_map_t.c | 806 ++++++++++------------------------------------ src/vnet/map/map.c | 85 ----- src/vnet/map/map.h | 29 -- 9 files changed, 1463 insertions(+), 1503 deletions(-) create mode 100644 src/vnet/ip/ip4_to_ip6.h create mode 100644 src/vnet/ip/ip6_to_ip4.h (limited to 'src/vnet/map') diff --git a/src/vnet.am b/src/vnet.am index 9f3aedba..6e35df87 100644 --- a/src/vnet.am +++ b/src/vnet.am @@ -52,6 +52,8 @@ nobase_include_HEADERS += \ vnet/interface.h \ vnet/interface.api.h \ vnet/interface_funcs.h \ + vnet/ip/ip4_to_ip6.h \ + vnet/ip/ip6_to_ip4.h \ vnet/l3_types.h \ vnet/pipeline.h \ vnet/replication.h \ diff --git a/src/vnet/ip/ip4_to_ip6.h b/src/vnet/ip/ip4_to_ip6.h new file mode 100644 index 00000000..96b8bf1e --- /dev/null +++ b/src/vnet/ip/ip4_to_ip6.h @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief IPv4 to IPv6 translation + */ +#ifndef __included_ip4_to_ip6_h__ +#define __included_ip4_to_ip6_h__ + +#include + + +/** + * IPv4 to IPv6 set call back function type + */ +typedef int (*ip4_to_ip6_set_fn_t) (ip4_header_t * ip4, ip6_header_t * ip6, + void *ctx); + +/* *INDENT-OFF* */ +static u8 icmp_to_icmp6_updater_pointer_table[] = + { 0, 1, 4, 4, ~0, + ~0, ~0, ~0, 7, 6, + ~0, ~0, 8, 8, 8, + 8, 24, 24, 24, 24 + }; +/* *INDENT-ON* */ + +#define frag_id_4to6(id) (id) + +/** + * @brief Get TCP/UDP port number or ICMP id from IPv4 packet. + * + * @param ip4 IPv4 header. + * @param sender 1 get sender port, 0 get receiver port. + * + * @returns Port number on success, 0 otherwise. + */ +always_inline u16 +ip4_get_port (ip4_header_t * ip, u8 sender) +{ + if (ip->ip_version_and_header_length != 0x45 || + ip4_get_fragment_offset (ip)) + return 0; + + if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || + (ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (ip + 1); + return (sender) ? udp->src_port : udp->dst_port; + } + else if (ip->protocol == IP_PROTOCOL_ICMP) + { + icmp46_header_t *icmp = (void *) (ip + 1); + if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) + { + return *((u16 *) (icmp + 1)); + } + else if (clib_net_to_host_u16 (ip->length) >= 64) + { + ip = (ip4_header_t *) (icmp + 2); + if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || + (ip->protocol == IP_PROTOCOL_UDP))) + { + udp_header_t *udp = (void *) (ip + 1); + return (sender) ? udp->dst_port : udp->src_port; + } + else if (ip->protocol == IP_PROTOCOL_ICMP) + { + icmp46_header_t *icmp = (void *) (ip + 1); + if (icmp->type == ICMP4_echo_request || + icmp->type == ICMP4_echo_reply) + { + return *((u16 *) (icmp + 1)); + } + } + } + } + return 0; +} + +/** + * @brief Convert type and code value from ICMP4 to ICMP6. + * + * @param icmp ICMP header. + * @param inner_ip4 Inner IPv4 header if present, 0 otherwise. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +icmp_to_icmp6_header (icmp46_header_t * icmp, ip4_header_t ** inner_ip4) +{ + *inner_ip4 = NULL; + switch (icmp->type) + { + case ICMP4_echo_reply: + icmp->type = ICMP6_echo_reply; + break; + case ICMP4_echo_request: + icmp->type = ICMP6_echo_request; + break; + case ICMP4_destination_unreachable: + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + + switch (icmp->code) + { + case ICMP4_destination_unreachable_destination_unreachable_net: //0 + case ICMP4_destination_unreachable_destination_unreachable_host: //1 + icmp->type = ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_no_route_to_destination; + break; + case ICMP4_destination_unreachable_protocol_unreachable: //2 + icmp->type = ICMP6_parameter_problem; + icmp->code = ICMP6_parameter_problem_unrecognized_next_header; + break; + case ICMP4_destination_unreachable_port_unreachable: //3 + icmp->type = ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_port_unreachable; + break; + case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4 + icmp->type = + ICMP6_packet_too_big; + icmp->code = 0; + { + u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + if (advertised_mtu) + advertised_mtu += 20; + else + advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value) + + //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20) + *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu); + } + break; + + case ICMP4_destination_unreachable_source_route_failed: //5 + case ICMP4_destination_unreachable_destination_network_unknown: //6 + case ICMP4_destination_unreachable_destination_host_unknown: //7 + case ICMP4_destination_unreachable_source_host_isolated: //8 + case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11 + case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12 + icmp->type = + ICMP6_destination_unreachable; + icmp->code = ICMP6_destination_unreachable_no_route_to_destination; + break; + case ICMP4_destination_unreachable_network_administratively_prohibited: //9 + case ICMP4_destination_unreachable_host_administratively_prohibited: //10 + case ICMP4_destination_unreachable_communication_administratively_prohibited: //13 + case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15 + icmp->type = ICMP6_destination_unreachable; + icmp->code = + ICMP6_destination_unreachable_destination_administratively_prohibited; + break; + case ICMP4_destination_unreachable_host_precedence_violation: //14 + default: + return -1; + } + break; + + case ICMP4_time_exceeded: //11 + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + icmp->type = ICMP6_time_exceeded; + break; + + case ICMP4_parameter_problem: + *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); + + switch (icmp->code) + { + case ICMP4_parameter_problem_pointer_indicates_error: + case ICMP4_parameter_problem_bad_length: + icmp->type = ICMP6_parameter_problem; + icmp->code = ICMP6_parameter_problem_erroneous_header_field; + { + u8 ptr = + icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))]; + if (ptr == 0xff) + return -1; + + *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr); + } + break; + default: + //All other codes cause error + return -1; + } + break; + + default: + //All other types cause error + return -1; + break; + } + return 0; +} + +/** + * @brief Translate ICMP4 packet to ICMP6. + * + * @param p Buffer to translate. + * @param fn The function to translate outer header. + * @param ctx A context passed in the outer header translate function. + * @param inner_fn The function to translate inner header. + * @param inner_ctx A context passed in the inner header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +icmp_to_icmp6 (vlib_buffer_t * p, ip4_to_ip6_set_fn_t fn, void *ctx, + ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx) +{ + ip4_header_t *ip4, *inner_ip4; + ip6_header_t *ip6, *inner_ip6; + u32 ip_len; + icmp46_header_t *icmp; + ip_csum_t csum; + ip6_frag_hdr_t *inner_frag; + u32 inner_frag_id; + u32 inner_frag_offset; + u8 inner_frag_more; + u16 *inner_L4_checksum = 0; + int rv; + + ip4 = vlib_buffer_get_current (p); + ip_len = clib_net_to_host_u16 (ip4->length); + ASSERT (ip_len <= p->current_length); + + icmp = (icmp46_header_t *) (ip4 + 1); + if (icmp_to_icmp6_header (icmp, &inner_ip4)) + return -1; + + if (inner_ip4) + { + //We have 2 headers to translate. + //We need to make some room in the middle of the packet + if (PREDICT_FALSE (ip4_is_fragment (inner_ip4))) + { + //Here it starts getting really tricky + //We will add a fragmentation header in the inner packet + + if (!ip4_is_first_fragment (inner_ip4)) + { + //For now we do not handle unless it is the first fragment + //Ideally we should handle the case as we are in slow path already + return -1; + } + + vlib_buffer_advance (p, + -2 * (sizeof (*ip6) - sizeof (*ip4)) - + sizeof (*inner_frag)); + ip6 = vlib_buffer_get_current (p); + clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, + 20 + 8); + ip4 = + (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + icmp = (icmp46_header_t *) (ip4 + 1); + + inner_ip6 = + (ip6_header_t *) u8_ptr_add (inner_ip4, + sizeof (*ip4) - sizeof (*ip6) - + sizeof (*inner_frag)); + inner_frag = + (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6)); + ip6->payload_length = + u16_net_add (ip4->length, + sizeof (*ip6) - 2 * sizeof (*ip4) + + sizeof (*inner_frag)); + inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id); + inner_frag_offset = ip4_get_fragment_offset (inner_ip4); + inner_frag_more = + ! !(inner_ip4->flags_and_fragment_offset & + clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)); + } + else + { + vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4))); + ip6 = vlib_buffer_get_current (p); + clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, + 20 + 8); + ip4 = + (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4)); + inner_ip6 = + (ip6_header_t *) u8_ptr_add (inner_ip4, + sizeof (*ip4) - sizeof (*ip6)); + ip6->payload_length = + u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4)); + inner_frag = NULL; + } + + if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP)) + { + inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum; + *inner_L4_checksum = + ip_csum_fold (ip_csum_sub_even + (*inner_L4_checksum, + *((u64 *) (&inner_ip4->src_address)))); + } + else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP)) + { + inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum; + if (!*inner_L4_checksum) + { + return -1; + } + *inner_L4_checksum = + ip_csum_fold (ip_csum_sub_even + (*inner_L4_checksum, + *((u64 *) (&inner_ip4->src_address)))); + } + else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) + { + //We have an ICMP inside an ICMP + //It needs to be translated, but not for error ICMP messages + icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1); + csum = inner_icmp->checksum; + //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by icmp_to_icmp6_header + csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); + inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ? + ICMP6_echo_request : ICMP6_echo_reply; + csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); + csum = + ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); + csum = + ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4)); + inner_icmp->checksum = ip_csum_fold (csum); + inner_L4_checksum = &inner_icmp->checksum; + inner_ip4->protocol = IP_PROTOCOL_ICMP6; + } + else + { + /* To shut up Coverity */ + os_panic (); + } + + csum = *inner_L4_checksum; //Initial checksum of the inner L4 header + + inner_ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20)); + inner_ip6->payload_length = + u16_net_add (inner_ip4->length, -sizeof (*inner_ip4)); + inner_ip6->hop_limit = inner_ip4->ttl; + inner_ip6->protocol = inner_ip4->protocol; + + if ((rv = inner_fn (inner_ip4, inner_ip6, inner_ctx)) != 0) + return rv; + + if (PREDICT_FALSE (inner_frag != NULL)) + { + inner_frag->next_hdr = inner_ip6->protocol; + inner_frag->identification = inner_frag_id; + inner_frag->rsv = 0; + inner_frag->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more); + inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + inner_ip6->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 + (inner_ip6->payload_length) + + sizeof (*inner_frag)); + } + + csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]); + csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]); + csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]); + csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]); + *inner_L4_checksum = ip_csum_fold (csum); + } + else + { + vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6)); + ip6 = vlib_buffer_get_current (p); + ip6->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) - + sizeof (*ip4)); + } + + //Translate outer IPv6 + ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20)); + + ip6->hop_limit = ip4->ttl; + ip6->protocol = IP_PROTOCOL_ICMP6; + + if ((rv = fn (ip4, ip6, ctx)) != 0) + return rv; + + //Truncate when the packet exceeds the minimal IPv6 MTU + if (p->current_length > 1280) + { + ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6)); + p->current_length = 1280; //Looks too simple to be correct... + } + + //Recompute ICMP checksum + icmp->checksum = 0; + csum = ip_csum_with_carry (0, ip6->payload_length); + csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol)); + csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]); + csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]); + csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]); + csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]); + csum = + ip_incremental_checksum (csum, icmp, + clib_net_to_host_u16 (ip6->payload_length)); + icmp->checksum = ~ip_csum_fold (csum); + + return 0; +} + +/** + * @brief Translate IPv4 fragmented packet to IPv6. + * + * @param p Buffer to translate. + * @param fn The function to translate header. + * @param ctx A context passed in the header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +ip4_to_ip6_fragmented (vlib_buffer_t * p, ip4_to_ip6_set_fn_t fn, void *ctx) +{ + ip4_header_t *ip4; + ip6_header_t *ip6; + ip6_frag_hdr_t *frag; + int rv; + + ip4 = vlib_buffer_get_current (p); + frag = (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag)); + ip6 = + (ip6_header_t *) u8_ptr_add (ip4, + sizeof (*ip4) - sizeof (*frag) - + sizeof (*ip6)); + vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag)); + + //We know that the protocol was one of ICMP, TCP or UDP + //because the first fragment was found and cached + frag->next_hdr = + (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol; + frag->identification = frag_id_4to6 (ip4->fragment_id); + frag->rsv = 0; + frag->fragment_offset_and_more = + ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip4), + clib_net_to_host_u16 + (ip4->flags_and_fragment_offset) & + IP4_HEADER_FLAG_MORE_FRAGMENTS); + + ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20)); + ip6->payload_length = + clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) - + sizeof (*ip4) + sizeof (*frag)); + ip6->hop_limit = ip4->ttl; + ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + + if ((rv = fn (ip4, ip6, ctx)) != 0) + return rv; + + return 0; +} + +/** + * @brief Translate IPv4 UDP/TCP packet to IPv6. + * + * @param p Buffer to translate. + * @param fn The function to translate header. + * @param ctx A context passed in the header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +ip4_to_ip6_tcp_udp (vlib_buffer_t * p, ip4_to_ip6_set_fn_t fn, void *ctx) +{ + ip4_header_t *ip4; + ip6_header_t *ip6; + ip_csum_t csum; + u16 *checksum; + ip6_frag_hdr_t *frag; + u32 frag_id; + int rv; + + ip4 = vlib_buffer_get_current (p); + + if (ip4->protocol == IP_PROTOCOL_UDP) + { + udp_header_t *udp = ip4_next_header (ip4); + checksum = &udp->checksum; + + //UDP checksum is optional over IPv4 but mandatory for IPv6 + //We do not check udp->length sanity but use our safe computed value instead + if (PREDICT_FALSE (!checksum)) + { + u16 udp_len = clib_host_to_net_u16 (ip4->length) - sizeof (*ip4); + csum = ip_incremental_checksum (0, udp, udp_len); + csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); + csum = + ip_csum_with_carry (csum, clib_host_to_net_u16 (IP_PROTOCOL_UDP)); + csum = ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address))); + *checksum = ~ip_csum_fold (csum); + } + } + else + { + tcp_header_t *tcp = ip4_next_header (ip4); + checksum = &tcp->checksum; + } + + csum = ip_csum_sub_even (*checksum, ip4->src_address.as_u32); + csum = ip_csum_sub_even (csum, ip4->dst_address.as_u32); + + // Deal with fragmented packets + if (PREDICT_FALSE (ip4->flags_and_fragment_offset & + clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + { + ip6 = + (ip6_header_t *) u8_ptr_add (ip4, + sizeof (*ip4) - sizeof (*ip6) - + sizeof (*frag)); + frag = + (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag)); + frag_id = frag_id_4to6 (ip4->fragment_id); + vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag)); + } + else + { + ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6)); + vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6)); + frag = NULL; + } + + ip6->ip_version_traffic_class_and_flow_label = + clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20)); + ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4)); + ip6->hop_limit = ip4->ttl; + ip6->protocol = ip4->protocol; + + if (PREDICT_FALSE (frag != NULL)) + { + frag->next_hdr = ip6->protocol; + frag->identification = frag_id; + frag->rsv = 0; + frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more (0, 1); + ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; + ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag)); + } + + if ((rv = fn (ip4, ip6, ctx)) != 0) + return rv; + + csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]); + csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]); + csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]); + csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]); + *checksum = ip_csum_fold (csum); + + return 0; +} + +#endif /* __included_ip4_to_ip6_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/ip/ip6_to_ip4.h b/src/vnet/ip/ip6_to_ip4.h new file mode 100644 index 00000000..f5d56883 --- /dev/null +++ b/src/vnet/ip/ip6_to_ip4.h @@ -0,0 +1,571 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * @file + * @brief IPv6 to IPv4 translation + */ +#ifndef __included_ip6_to_ip4_h__ +#define __included_ip6_to_ip4_h__ + +#include + +/** + * IPv6 to IPv4 set call back function type + */ +typedef int (*ip6_to_ip4_set_fn_t) (ip6_header_t * ip6, ip4_header_t * ip4, + void *ctx); + +/* *INDENT-OFF* */ +static u8 icmp6_to_icmp_updater_pointer_table[] = + { 0, 1, ~0, ~0, + 2, 2, 9, 8, + 12, 12, 12, 12, + 12, 12, 12, 12, + 12, 12, 12, 12, + 12, 12, 12, 12, + 24, 24, 24, 24, + 24, 24, 24, 24, + 24, 24, 24, 24, + 24, 24, 24, 24 + }; +/* *INDENT-ON* */ + +#define frag_id_6to4(id) ((id) ^ ((id) >> 16)) + +/** + * @brief Parse some useful information from IPv6 header. + * + * @param ip6 IPv6 header. + * @param buff_len Buffer length. + * @param l4_protocol L4 protocol number. + * @param l4_offset L4 header offset. + * @param frag_hdr_offset Fragment header offset if present, 0 otherwise. + * + * @returns 0 on success, non-zero value otherwise. + */ +static_always_inline int +ip6_parse (const ip6_header_t * ip6, u32 buff_len, + u8 * l4_protocol, u16 * l4_offset, u16 * frag_hdr_offset) +{ + if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) + { + *l4_protocol = ((ip6_frag_hdr_t *) (ip6 + 1))->next_hdr; + *frag_hdr_offset = sizeof (*ip6); + *l4_offset = sizeof (*ip6) + sizeof (ip6_frag_hdr_t); + } + else + { + *l4_protocol = ip6->protocol; + *frag_hdr_offset = 0; + *l4_offset = sizeof (*ip6); + } + + return (buff_len < (*l4_offset + 4)) || + (clib_net_to_host_u16 (ip6->payload_length) < + (*l4_offset + 4 - sizeof (*ip6))); +} + +/** + * @brief Get TCP/UDP port number or ICMP id from IPv6 packet. + * + * @param ip6 IPv6 header. + * @param sender 1 get sender port, 0 get receiver port. + * @param buffer_len Buffer length. + * + * @returns Port number on success, 0 otherwise. + */ +always_inline u16 +ip6_get_port (ip6_header_t * ip6, u8 sender, u16 buffer_len) +{ + u8 l4_protocol; + u16 l4_offset; + u16 frag_offset; + u8 *l4; + + if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset)) + return 0; + + if (frag_offset && + ip6_frag_hdr_offset (((ip6_frag_hdr_t *) + u8_ptr_add (ip6, frag_offset)))) + return 0; //Can't deal with non-first fragment for now + + l4 = u8_ptr_add (ip6, l4_offset); + if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP) + { + return (sender) ? ((udp_header_t *) (l4))->src_port : ((udp_header_t + *) + (l4))->dst_port; + } + else if (l4_protocol == IP_PROTOCOL_ICMP6) + { + icmp46_header_t *icmp = (icmp46_header_t *) (l4); + if (icmp->type == ICMP6_echo_request) + { + return (sender) ? ((u16 *) (icmp))[2] : -1; + } + else if (icmp->type == ICMP6_echo_reply) + { + return (sender) ? -1 : ((u16 *) (icmp))[2]; + } + } + return 0; +} + +/** + * @brief Convert type and code value from ICMP6 to ICMP4. + * + * @param icmp ICMP header. + * @param inner_ip6 Inner IPv6 header if present, 0 otherwise. + * + * @returns 0 on success, non-zero value otherwise. + */ +static_always_inline int +icmp6_to_icmp_header (icmp46_header_t * icmp, ip6_header_t ** inner_ip6) +{ + *inner_ip6 = NULL; + switch (icmp->type) + { + case ICMP6_echo_request: + icmp->type = ICMP4_echo_request; + break; + case ICMP6_echo_reply: + icmp->type = ICMP4_echo_reply; + break; + case ICMP6_destination_unreachable: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + + switch (icmp->code) + { + case ICMP6_destination_unreachable_no_route_to_destination: //0 + case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2 + case ICMP6_destination_unreachable_address_unreachable: //3 + icmp->type = ICMP4_destination_unreachable; + icmp->code = + ICMP4_destination_unreachable_destination_unreachable_host; + break; + case ICMP6_destination_unreachable_destination_administratively_prohibited: //1 + icmp->type = + ICMP4_destination_unreachable; + icmp->code = + ICMP4_destination_unreachable_communication_administratively_prohibited; + break; + case ICMP6_destination_unreachable_port_unreachable: + icmp->type = ICMP4_destination_unreachable; + icmp->code = ICMP4_destination_unreachable_port_unreachable; + break; + default: + return -1; + } + break; + case ICMP6_packet_too_big: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + + icmp->type = ICMP4_destination_unreachable; + icmp->code = 4; + { + u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + advertised_mtu -= 20; + //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20) + ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu); + } + break; + + case ICMP6_time_exceeded: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + + icmp->type = ICMP4_time_exceeded; + break; + + case ICMP6_parameter_problem: + *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); + + switch (icmp->code) + { + case ICMP6_parameter_problem_erroneous_header_field: + icmp->type = ICMP4_parameter_problem; + icmp->code = ICMP4_parameter_problem_pointer_indicates_error; + u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); + if (pointer >= 40) + return -1; + + ((u8 *) (icmp + 1))[0] = + icmp6_to_icmp_updater_pointer_table[pointer]; + break; + case ICMP6_parameter_problem_unrecognized_next_header: + icmp->type = ICMP4_destination_unreachable; + icmp->code = ICMP4_destination_unreachable_port_unreachable; + break; + case ICMP6_parameter_problem_unrecognized_option: + default: + return -1; + } + break; + default: + return -1; + break; + } + return 0; +} + +/** + * @brief Translate TOS value from IPv6 to IPv4. + * + * @param ip6 IPv6 header. + * + * @returns IPv4 TOS value. + */ +static_always_inline u8 +ip6_translate_tos (const ip6_header_t * ip6) +{ + return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label) + & 0x0ff00000) >> 20; +} + +/** + * @brief Translate ICMP6 packet to ICMP4. + * + * @param p Buffer to translate. + * @param fn The function to translate outer header. + * @param ctx A context passed in the outer header translate function. + * @param inner_fn The function to translate inner header. + * @param inner_ctx A context passed in the inner header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +icmp6_to_icmp (vlib_buffer_t * p, ip6_to_ip4_set_fn_t fn, void *ctx, + ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx) +{ + ip6_header_t *ip6, *inner_ip6; + ip4_header_t *ip4, *inner_ip4; + u32 ip6_pay_len; + icmp46_header_t *icmp; + ip_csum_t csum; + int rv; + + ip6 = vlib_buffer_get_current (p); + ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length); + icmp = (icmp46_header_t *) (ip6 + 1); + ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length); + + //No extensions headers allowed here + if (ip6->protocol != IP_PROTOCOL_ICMP6) + return -1; + + //There are no fragmented ICMP messages, so no extension header for now + if (icmp6_to_icmp_header (icmp, &inner_ip6)) + return -1; + + if (inner_ip6) + { + u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, + inner_frag_id; + u8 *inner_l4, inner_protocol; + + //We have two headers to translate + // FROM + // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ... + // Handled cases: + // [ IPv6 ][IC][ IPv6 ][L4 header ... + // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ... + // TO + // [ IPv4][IC][ IPv4][L4 header ... + + if (ip6_parse (inner_ip6, ip6_pay_len - 8, + &inner_protocol, &inner_l4_offset, &inner_frag_offset)) + return -1; + + inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset); + inner_ip4 = + (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4)); + if (inner_frag_offset) + { + ip6_frag_hdr_t *inner_frag = + (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset); + inner_frag_id = frag_id_6to4 (inner_frag->identification); + } + else + { + inner_frag_id = 0; + } + + //Do the translation of the inner packet + if (inner_protocol == IP_PROTOCOL_TCP) + { + inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16); + } + else if (inner_protocol == IP_PROTOCOL_UDP) + { + inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6); + } + else if (inner_protocol == IP_PROTOCOL_ICMP6) + { + icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4; + csum = inner_icmp->checksum; + csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); + //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded + inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ? + ICMP4_echo_request : ICMP4_echo_reply; + csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); + inner_icmp->checksum = ip_csum_fold (csum); + inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later + inner_L4_checksum = &inner_icmp->checksum; + } + else + { + return -1; + } + + csum = *inner_L4_checksum; + csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]); + csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]); + csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]); + csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]); + + if ((rv = inner_fn (inner_ip6, inner_ip4, inner_ctx)) != 0) + return rv; + + inner_ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + inner_ip4->tos = ip6_translate_tos (inner_ip6); + inner_ip4->length = + u16_net_add (inner_ip6->payload_length, + sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset); + inner_ip4->fragment_id = inner_frag_id; + inner_ip4->flags_and_fragment_offset = + clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + inner_ip4->ttl = inner_ip6->hop_limit; + inner_ip4->protocol = inner_protocol; + inner_ip4->checksum = ip4_header_checksum (inner_ip4); + + if (inner_ip4->protocol == IP_PROTOCOL_ICMP) + { + //Remove remainings of the pseudo-header in the csum + csum = + ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); + csum = + ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4)); + } + else + { + //Update to new pseudo-header + csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32); + csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32); + } + *inner_L4_checksum = ip_csum_fold (csum); + + //Move up icmp header + ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8); + clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8); + icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8); + } + else + { + //Only one header to translate + ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); + } + + vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6))); + + if ((rv = fn (ip6, ip4, ctx)) != 0) + return rv; + + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->fragment_id = 0; + ip4->flags_and_fragment_offset = 0; + ip4->ttl = ip6->hop_limit; + ip4->protocol = IP_PROTOCOL_ICMP; + //TODO fix the length depending on offset length + ip4->length = u16_net_add (ip6->payload_length, + (inner_ip6 == + NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) - + sizeof (*ip6))); + ip4->checksum = ip4_header_checksum (ip4); + + //Recompute ICMP checksum + icmp->checksum = 0; + csum = + ip_incremental_checksum (0, icmp, + clib_net_to_host_u16 (ip4->length) - + sizeof (*ip4)); + icmp->checksum = ~ip_csum_fold (csum); + + return 0; +} + +/** + * @brief Translate IPv6 fragmented packet to IPv4. + * + * @param p Buffer to translate. + * @param fn The function to translate header. + * @param ctx A context passed in the header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +ip6_to_ip4_fragmented (vlib_buffer_t * p, ip6_to_ip4_set_fn_t fn, void *ctx) +{ + ip6_header_t *ip6; + ip6_frag_hdr_t *frag; + ip4_header_t *ip4; + u16 frag_id; + u8 frag_more; + u16 frag_offset; + u8 l4_protocol; + u16 l4_offset; + int rv; + + ip6 = vlib_buffer_get_current (p); + + if (ip6_parse + (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset)) + return -1; + + frag = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset); + ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4)); + vlib_buffer_advance (p, l4_offset - sizeof (*ip4)); + + frag_id = frag_id_6to4 (frag->identification); + frag_more = ip6_frag_hdr_more (frag); + frag_offset = ip6_frag_hdr_offset (frag); + + if ((rv = fn (ip6, ip4, ctx)) != 0) + return rv; + + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->length = u16_net_add (ip6->payload_length, + sizeof (*ip4) - l4_offset + sizeof (*ip6)); + ip4->fragment_id = frag_id; + ip4->flags_and_fragment_offset = + clib_host_to_net_u16 (frag_offset | + (frag_more ? IP4_HEADER_FLAG_MORE_FRAGMENTS : 0)); + ip4->ttl = ip6->hop_limit; + ip4->protocol = + (l4_protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : l4_protocol; + ip4->checksum = ip4_header_checksum (ip4); + + return 0; +} + +/** + * @brief Translate IPv6 UDP/TCP packet to IPv4. + * + * @param p Buffer to translate. + * @param fn The function to translate header. + * @param ctx A context passed in the header translate function. + * + * @returns 0 on success, non-zero value otherwise. + */ +always_inline int +ip6_to_ip4_tcp_udp (vlib_buffer_t * p, ip6_to_ip4_set_fn_t fn, void *ctx, + u8 udp_checksum) +{ + ip6_header_t *ip6; + u16 *checksum; + ip_csum_t csum; + ip4_header_t *ip4; + u16 fragment_id; + u16 flags; + u16 frag_offset; + u8 l4_protocol; + u16 l4_offset; + int rv; + + ip6 = vlib_buffer_get_current (p); + + if (ip6_parse + (ip6, p->current_length, &l4_protocol, &l4_offset, &frag_offset)) + return -1; + + if (l4_protocol == IP_PROTOCOL_TCP) + { + tcp_header_t *tcp = ip6_next_header (ip6); + checksum = &tcp->checksum; + } + else + { + udp_header_t *udp = ip6_next_header (ip6); + checksum = &udp->checksum; + //UDP checksum is optional over IPv4 + if (!udp_checksum) + goto no_csum; + } + + csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]); + csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]); + csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]); + csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]); + +no_csum: + ip4 = (ip4_header_t *) u8_ptr_add (ip6, l4_offset - sizeof (*ip4)); + + vlib_buffer_advance (p, l4_offset - sizeof (*ip4)); + + if (PREDICT_FALSE (frag_offset)) + { + //Only the first fragment + ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip6, frag_offset); + fragment_id = frag_id_6to4 (hdr->identification); + flags = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + } + else + { + fragment_id = 0; + flags = 0; + } + + if ((rv = fn (ip6, ip4, ctx)) != 0) + return rv; + + ip4->ip_version_and_header_length = + IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; + ip4->tos = ip6_translate_tos (ip6); + ip4->length = u16_net_add (ip6->payload_length, + sizeof (*ip4) + sizeof (*ip6) - l4_offset); + ip4->fragment_id = fragment_id; + ip4->flags_and_fragment_offset = flags; + ip4->ttl = ip6->hop_limit; + ip4->protocol = l4_protocol; + ip4->checksum = ip4_header_checksum (ip4); + + //UDP checksum is optional over IPv4 + if (!udp_checksum && l4_protocol == IP_PROTOCOL_UDP) + { + *checksum = 0; + } + else + { + csum = ip_csum_add_even (csum, ip4->dst_address.as_u32); + csum = ip_csum_add_even (csum, ip4->src_address.as_u32); + *checksum = ip_csum_fold (csum); + } + + return 0; +} + +#endif /* __included_ip6_to_ip4_h__ */ + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ diff --git a/src/vnet/map/ip4_map.c b/src/vnet/map/ip4_map.c index e39b6f14..6a3bdd51 100644 --- a/src/vnet/map/ip4_map.c +++ b/src/vnet/map/ip4_map.c @@ -19,6 +19,7 @@ #include "map.h" #include "../ip/ip_frag.h" +#include vlib_node_registration_t ip4_map_reass_node; @@ -62,52 +63,6 @@ format_ip4_map_reass_trace (u8 * s, va_list * args) t->cached ? "cached" : "forwarded"); } -/* - * ip4_map_get_port - */ -u16 -ip4_map_get_port (ip4_header_t * ip, map_dir_e dir) -{ - /* Find port information */ - if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || - (ip->protocol == IP_PROTOCOL_UDP))) - { - udp_header_t *udp = (void *) (ip + 1); - return (dir == MAP_SENDER ? udp->src_port : udp->dst_port); - } - else if (ip->protocol == IP_PROTOCOL_ICMP) - { - /* - * 1) ICMP Echo request or Echo reply - * 2) ICMP Error with inner packet being UDP or TCP - * 3) ICMP Error with inner packet being ICMP Echo request or Echo reply - */ - icmp46_header_t *icmp = (void *) (ip + 1); - if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) - { - return *((u16 *) (icmp + 1)); - } - else if (clib_net_to_host_u16 (ip->length) >= 56) - { // IP + ICMP + IP + L4 header - ip4_header_t *icmp_ip = (ip4_header_t *) (icmp + 2); - if (PREDICT_TRUE ((icmp_ip->protocol == IP_PROTOCOL_TCP) || - (icmp_ip->protocol == IP_PROTOCOL_UDP))) - { - udp_header_t *udp = (void *) (icmp_ip + 1); - return (dir == MAP_SENDER ? udp->dst_port : udp->src_port); - } - else if (icmp_ip->protocol == IP_PROTOCOL_ICMP) - { - icmp46_header_t *inner_icmp = (void *) (icmp_ip + 1); - if (inner_icmp->type == ICMP4_echo_request - || inner_icmp->type == ICMP4_echo_reply) - return (*((u16 *) (inner_icmp + 1))); - } - } - } - return (0); -} - static_always_inline u16 ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip, u32 * next, u8 * error) @@ -124,7 +79,7 @@ ip4_map_port_and_security_check (map_domain_t * d, ip4_header_t * ip, { return 0; } - port = ip4_map_get_port (ip, MAP_RECEIVER); + port = ip4_get_port (ip, 0); if (port) { /* Verify that port is not among the well-known ports */ @@ -626,9 +581,7 @@ ip4_map_reass (vlib_main_t * vm, cached = 1; } } - else - if ((port0 = - ip4_get_port (ip40, MAP_RECEIVER, p0->current_length)) < 0) + else if ((port0 = ip4_get_port (ip40, 0)) == 0) { // Could not find port. We'll free the reassembly. error0 = MAP_ERROR_BAD_PROTOCOL; diff --git a/src/vnet/map/ip4_map_t.c b/src/vnet/map/ip4_map_t.c index 5f2bcbf9..b89840cc 100644 --- a/src/vnet/map/ip4_map_t.c +++ b/src/vnet/map/ip4_map_t.c @@ -15,6 +15,7 @@ #include "map.h" #include "../ip/ip_frag.h" +#include #define IP4_MAP_T_DUAL_LOOP 1 @@ -63,18 +64,6 @@ typedef CLIB_PACKED (struct { }) ip4_mapt_pseudo_header_t; /* *INDENT-ON* */ -#define frag_id_4to6(id) (id) - -//TODO: Find the right place in memory for this. -/* *INDENT-OFF* */ -static u8 icmp_to_icmp6_updater_pointer_table[] = - { 0, 1, 4, 4, ~0, - ~0, ~0, ~0, 7, 6, - ~0, ~0, 8, 8, 8, - 8, 24, 24, 24, 24 - }; -/* *INDENT-ON* */ - static_always_inline int ip4_map_fragment_cache (ip4_header_t * ip4, u16 port) @@ -110,360 +99,41 @@ ip4_map_fragment_get_port (ip4_header_t * ip4) return ret; } - -/* Statelessly translates an ICMP packet into ICMPv6. - * - * Warning: The checksum will need to be recomputed. - * - */ -static_always_inline int -ip4_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len, - i32 * receiver_port, ip4_header_t ** inner_ip4) +typedef struct { - *inner_ip4 = NULL; - switch (icmp->type) - { - case ICMP4_echo_reply: - *receiver_port = ((u16 *) icmp)[2]; - icmp->type = ICMP6_echo_reply; - break; - case ICMP4_echo_request: - *receiver_port = ((u16 *) icmp)[2]; - icmp->type = ICMP6_echo_request; - break; - case ICMP4_destination_unreachable: - *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); - *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); - - switch (icmp->code) - { - case ICMP4_destination_unreachable_destination_unreachable_net: //0 - case ICMP4_destination_unreachable_destination_unreachable_host: //1 - icmp->type = ICMP6_destination_unreachable; - icmp->code = ICMP6_destination_unreachable_no_route_to_destination; - break; - case ICMP4_destination_unreachable_protocol_unreachable: //2 - icmp->type = ICMP6_parameter_problem; - icmp->code = ICMP6_parameter_problem_unrecognized_next_header; - break; - case ICMP4_destination_unreachable_port_unreachable: //3 - icmp->type = ICMP6_destination_unreachable; - icmp->code = ICMP6_destination_unreachable_port_unreachable; - break; - case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4 - icmp->type = - ICMP6_packet_too_big; - icmp->code = 0; - { - u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); - if (advertised_mtu) - advertised_mtu += 20; - else - advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value) - - //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20) - *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (advertised_mtu); - } - break; - - case ICMP4_destination_unreachable_source_route_failed: //5 - case ICMP4_destination_unreachable_destination_network_unknown: //6 - case ICMP4_destination_unreachable_destination_host_unknown: //7 - case ICMP4_destination_unreachable_source_host_isolated: //8 - case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11 - case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12 - icmp->type = - ICMP6_destination_unreachable; - icmp->code = ICMP6_destination_unreachable_no_route_to_destination; - break; - case ICMP4_destination_unreachable_network_administratively_prohibited: //9 - case ICMP4_destination_unreachable_host_administratively_prohibited: //10 - case ICMP4_destination_unreachable_communication_administratively_prohibited: //13 - case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15 - icmp->type = ICMP6_destination_unreachable; - icmp->code = - ICMP6_destination_unreachable_destination_administratively_prohibited; - break; - case ICMP4_destination_unreachable_host_precedence_violation: //14 - default: - return -1; - } - break; - - case ICMP4_time_exceeded: //11 - *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); - *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); - icmp->type = ICMP6_time_exceeded; - //icmp->code = icmp->code //unchanged - break; + map_domain_t *d; + u16 recv_port; +} icmp_to_icmp6_ctx_t; - case ICMP4_parameter_problem: - *inner_ip4 = (ip4_header_t *) (((u8 *) icmp) + 8); - *receiver_port = ip4_get_port (*inner_ip4, MAP_SENDER, icmp_len - 8); +static int +ip4_to_ip6_set_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *arg) +{ + icmp_to_icmp6_ctx_t *ctx = arg; - switch (icmp->code) - { - case ICMP4_parameter_problem_pointer_indicates_error: - case ICMP4_parameter_problem_bad_length: - icmp->type = ICMP6_parameter_problem; - icmp->code = ICMP6_parameter_problem_erroneous_header_field; - { - u8 ptr = - icmp_to_icmp6_updater_pointer_table[*((u8 *) (icmp + 1))]; - if (ptr == 0xff) - return -1; - - *((u32 *) (icmp + 1)) = clib_host_to_net_u32 (ptr); - } - break; - default: - //All other codes cause dropping the packet - return -1; - } - break; + ip4_map_t_embedded_address (ctx->d, &ip6->src_address, &ip4->src_address); + ip6->dst_address.as_u64[0] = + map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port); + ip6->dst_address.as_u64[1] = + map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port); - default: - //All other types cause dropping the packet - return -1; - break; - } return 0; } -static_always_inline void -_ip4_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error) +static int +ip4_to_ip6_set_inner_icmp_cb (ip4_header_t * ip4, ip6_header_t * ip6, + void *arg) { - ip4_header_t *ip4, *inner_ip4; - ip6_header_t *ip6, *inner_ip6; - u32 ip_len; - icmp46_header_t *icmp; - i32 recv_port; - ip_csum_t csum; - u16 *inner_L4_checksum = 0; - ip6_frag_hdr_t *inner_frag; - u32 inner_frag_id; - u32 inner_frag_offset; - u8 inner_frag_more; - - ip4 = vlib_buffer_get_current (p); - ip_len = clib_net_to_host_u16 (ip4->length); - ASSERT (ip_len <= p->current_length); - - icmp = (icmp46_header_t *) (ip4 + 1); - if (ip4_icmp_to_icmp6_in_place (icmp, ip_len - sizeof (*ip4), - &recv_port, &inner_ip4)) - { - *error = MAP_ERROR_ICMP; - return; - } - - if (recv_port < 0) - { - // In case of 1:1 mapping, we don't care about the port - if (d->ea_bits_len == 0 && d->rules) - { - recv_port = 0; - } - else - { - *error = MAP_ERROR_ICMP; - return; - } - } - - if (inner_ip4) - { - //We have 2 headers to translate. - //We need to make some room in the middle of the packet - - if (PREDICT_FALSE (ip4_is_fragment (inner_ip4))) - { - //Here it starts getting really tricky - //We will add a fragmentation header in the inner packet - - if (!ip4_is_first_fragment (inner_ip4)) - { - //For now we do not handle unless it is the first fragment - //Ideally we should handle the case as we are in slow path already - *error = MAP_ERROR_FRAGMENTED; - return; - } - - vlib_buffer_advance (p, - -2 * (sizeof (*ip6) - sizeof (*ip4)) - - sizeof (*inner_frag)); - ip6 = vlib_buffer_get_current (p); - clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, - 20 + 8); - ip4 = - (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); - icmp = (icmp46_header_t *) (ip4 + 1); - - inner_ip6 = - (ip6_header_t *) u8_ptr_add (inner_ip4, - sizeof (*ip4) - sizeof (*ip6) - - sizeof (*inner_frag)); - inner_frag = - (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, sizeof (*inner_ip6)); - ip6->payload_length = - u16_net_add (ip4->length, - sizeof (*ip6) - 2 * sizeof (*ip4) + - sizeof (*inner_frag)); - inner_frag_id = frag_id_4to6 (inner_ip4->fragment_id); - inner_frag_offset = ip4_get_fragment_offset (inner_ip4); - inner_frag_more = - ! !(inner_ip4->flags_and_fragment_offset & - clib_net_to_host_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS)); - } - else - { - vlib_buffer_advance (p, -2 * (sizeof (*ip6) - sizeof (*ip4))); - ip6 = vlib_buffer_get_current (p); - clib_memcpy (u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)), ip4, - 20 + 8); - ip4 = - (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); - icmp = (icmp46_header_t *) u8_ptr_add (ip4, sizeof (*ip4)); - inner_ip6 = - (ip6_header_t *) u8_ptr_add (inner_ip4, - sizeof (*ip4) - sizeof (*ip6)); - ip6->payload_length = - u16_net_add (ip4->length, sizeof (*ip6) - 2 * sizeof (*ip4)); - inner_frag = NULL; - } - - if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_TCP)) - { - inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum; - *inner_L4_checksum = - ip_csum_fold (ip_csum_sub_even - (*inner_L4_checksum, - *((u64 *) (&inner_ip4->src_address)))); - } - else if (PREDICT_TRUE (inner_ip4->protocol == IP_PROTOCOL_UDP)) - { - inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum; - if (!*inner_L4_checksum) - { - //The inner packet was first translated, and therefore came from IPv6. - //As the packet was an IPv6 packet, the UDP checksum can't be NULL - *error = MAP_ERROR_ICMP; - return; - } - *inner_L4_checksum = - ip_csum_fold (ip_csum_sub_even - (*inner_L4_checksum, - *((u64 *) (&inner_ip4->src_address)))); - } - else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) - { - //We have an ICMP inside an ICMP - //It needs to be translated, but not for error ICMP messages - icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1); - csum = inner_icmp->checksum; - //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place - csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); - inner_icmp->type = (inner_icmp->type == ICMP4_echo_request) ? - ICMP6_echo_request : ICMP6_echo_reply; - csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); - csum = - ip_csum_add_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); - csum = - ip_csum_add_even (csum, inner_ip4->length - sizeof (*inner_ip4)); - inner_icmp->checksum = ip_csum_fold (csum); - inner_L4_checksum = &inner_icmp->checksum; - inner_ip4->protocol = IP_PROTOCOL_ICMP6; - } - else - { - /* To shut up Coverity */ - os_panic (); - } - - //FIXME: Security check with the port found in the inner packet - - csum = *inner_L4_checksum; //Initial checksum of the inner L4 header - //FIXME: Shouldn't we remove ip addresses from there ? - - inner_ip6->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (inner_ip4->tos << 20)); - inner_ip6->payload_length = - u16_net_add (inner_ip4->length, -sizeof (*inner_ip4)); - inner_ip6->hop_limit = inner_ip4->ttl; - inner_ip6->protocol = inner_ip4->protocol; + icmp_to_icmp6_ctx_t *ctx = arg; - //Note that the source address is within the domain - //while the destination address is the one outside the domain - ip4_map_t_embedded_address (d, &inner_ip6->dst_address, - &inner_ip4->dst_address); - inner_ip6->src_address.as_u64[0] = - map_get_pfx_net (d, inner_ip4->src_address.as_u32, recv_port); - inner_ip6->src_address.as_u64[1] = - map_get_sfx_net (d, inner_ip4->src_address.as_u32, recv_port); + //Note that the source address is within the domain + //while the destination address is the one outside the domain + ip4_map_t_embedded_address (ctx->d, &ip6->dst_address, &ip4->dst_address); + ip6->src_address.as_u64[0] = + map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port); + ip6->src_address.as_u64[1] = + map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port); - if (PREDICT_FALSE (inner_frag != NULL)) - { - inner_frag->next_hdr = inner_ip6->protocol; - inner_frag->identification = inner_frag_id; - inner_frag->rsv = 0; - inner_frag->fragment_offset_and_more = - ip6_frag_hdr_offset_and_more (inner_frag_offset, inner_frag_more); - inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; - inner_ip6->payload_length = - clib_host_to_net_u16 (clib_net_to_host_u16 - (inner_ip6->payload_length) + - sizeof (*inner_frag)); - } - - csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]); - csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]); - csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]); - csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]); - *inner_L4_checksum = ip_csum_fold (csum); - - } - else - { - vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6)); - ip6 = vlib_buffer_get_current (p); - ip6->payload_length = - clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->length) - - sizeof (*ip4)); - } - - //Translate outer IPv6 - ip6->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20)); - - ip6->hop_limit = ip4->ttl; - ip6->protocol = IP_PROTOCOL_ICMP6; - - ip4_map_t_embedded_address (d, &ip6->src_address, &ip4->src_address); - ip6->dst_address.as_u64[0] = - map_get_pfx_net (d, ip4->dst_address.as_u32, recv_port); - ip6->dst_address.as_u64[1] = - map_get_sfx_net (d, ip4->dst_address.as_u32, recv_port); - - //Truncate when the packet exceeds the minimal IPv6 MTU - if (p->current_length > 1280) - { - ip6->payload_length = clib_host_to_net_u16 (1280 - sizeof (*ip6)); - p->current_length = 1280; //Looks too simple to be correct... - } - - //TODO: We could do an easy diff-checksum for echo requests/replies - //Recompute ICMP checksum - icmp->checksum = 0; - csum = ip_csum_with_carry (0, ip6->payload_length); - csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol)); - csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]); - csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]); - csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]); - csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]); - csum = - ip_incremental_checksum (csum, icmp, - clib_net_to_host_u16 (ip6->payload_length)); - icmp->checksum = ~ip_csum_fold (csum); + return 0; } static uword @@ -491,6 +161,8 @@ ip4_map_t_icmp (vlib_main_t * vm, u8 error0; map_domain_t *d0; u16 len0; + icmp_to_icmp6_ctx_t ctx0; + ip4_header_t *ip40; next0 = IP4_MAPT_ICMP_NEXT_IP6_LOOKUP; pi0 = to_next[0] = from[0]; @@ -508,7 +180,27 @@ ip4_map_t_icmp (vlib_main_t * vm, d0 = pool_elt_at_index (map_main.domains, vnet_buffer (p0)->map_t.map_domain_index); - _ip4_map_t_icmp (d0, p0, &error0); + + ip40 = vlib_buffer_get_current (p0); + ctx0.recv_port = ip4_get_port (ip40, 1); + ctx0.d = d0; + if (ctx0.recv_port == 0) + { + // In case of 1:1 mapping, we don't care about the port + if (!(d0->ea_bits_len == 0 && d0->rules)) + { + error0 = MAP_ERROR_ICMP; + goto err0; + } + } + + if (icmp_to_icmp6 + (p0, ip4_to_ip6_set_icmp_cb, &ctx0, + ip4_to_ip6_set_inner_icmp_cb, &ctx0)) + { + error0 = MAP_ERROR_ICMP; + goto err0; + } if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { @@ -517,12 +209,14 @@ ip4_map_t_icmp (vlib_main_t * vm, vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; next0 = IP4_MAPT_ICMP_NEXT_IP6_FRAG; } + err0: if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, - vnet_buffer (p0)->map_t. - map_domain_index, 1, len0); + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + len0); } else { @@ -538,6 +232,19 @@ ip4_map_t_icmp (vlib_main_t * vm, return frame->n_vectors; } +static int +ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx) +{ + ip4_mapt_pseudo_header_t *pheader = ctx; + + ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0]; + ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1]; + ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0]; + ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1]; + + return 0; +} + static uword ip4_map_t_fragmented (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -546,6 +253,8 @@ ip4_map_t_fragmented (vlib_main_t * vm, from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index); while (n_left_from > 0) { @@ -555,9 +264,6 @@ ip4_map_t_fragmented (vlib_main_t * vm, { u32 pi0; vlib_buffer_t *p0; - ip4_header_t *ip40; - ip6_header_t *ip60; - ip6_frag_hdr_t *frag0; ip4_mapt_pseudo_header_t *pheader0; ip4_mapt_fragmented_next_t next0; @@ -574,50 +280,21 @@ ip4_map_t_fragmented (vlib_main_t * vm, pheader0 = vlib_buffer_get_current (p0); vlib_buffer_advance (p0, sizeof (*pheader0)); - //Accessing ip4 header - ip40 = vlib_buffer_get_current (p0); - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip40, - sizeof (*ip40) - sizeof (*frag0)); - ip60 = - (ip6_header_t *) u8_ptr_add (ip40, - sizeof (*ip40) - sizeof (*frag0) - - sizeof (*ip60)); - vlib_buffer_advance (p0, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); - - //We know that the protocol was one of ICMP, TCP or UDP - //because the first fragment was found and cached - frag0->next_hdr = - (ip40->protocol == - IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol; - frag0->identification = frag_id_4to6 (ip40->fragment_id); - frag0->rsv = 0; - frag0->fragment_offset_and_more = - ip6_frag_hdr_offset_and_more (ip4_get_fragment_offset (ip40), - clib_net_to_host_u16 - (ip40->flags_and_fragment_offset) & - IP4_HEADER_FLAG_MORE_FRAGMENTS); - - ip60->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); - ip60->payload_length = - clib_host_to_net_u16 (clib_net_to_host_u16 (ip40->length) - - sizeof (*ip40) + sizeof (*frag0)); - ip60->hop_limit = ip40->ttl; - ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; - ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; - ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; - ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; - ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; - - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0)) { - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; - next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG; + p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; + next0 = IP4_MAPT_FRAGMENTED_NEXT_DROP; + } + else + { + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = + IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_FRAGMENTED_NEXT_IP6_FRAG; + } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, @@ -637,6 +314,9 @@ ip4_map_t_tcp_udp (vlib_main_t * vm, from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index); + while (n_left_from > 0) { @@ -647,12 +327,6 @@ ip4_map_t_tcp_udp (vlib_main_t * vm, { u32 pi0, pi1; vlib_buffer_t *p0, *p1; - ip4_header_t *ip40, *ip41; - ip6_header_t *ip60, *ip61; - ip_csum_t csum0, csum1; - u16 *checksum0, *checksum1; - ip6_frag_hdr_t *frag0, *frag1; - u32 frag_id0, frag_id1; ip4_mapt_pseudo_header_t *pheader0, *pheader1; ip4_mapt_tcp_udp_next_t next0, next1; @@ -674,183 +348,40 @@ ip4_map_t_tcp_udp (vlib_main_t * vm, vlib_buffer_advance (p0, sizeof (*pheader0)); vlib_buffer_advance (p1, sizeof (*pheader1)); - //Accessing ip4 header - ip40 = vlib_buffer_get_current (p0); - ip41 = vlib_buffer_get_current (p1); - checksum0 = - (u16 *) u8_ptr_add (ip40, - vnet_buffer (p0)->map_t.checksum_offset); - checksum1 = - (u16 *) u8_ptr_add (ip41, - vnet_buffer (p1)->map_t.checksum_offset); - - //UDP checksum is optional over IPv4 but mandatory for IPv6 - //We do not check udp->length sanity but use our safe computed value instead - if (PREDICT_FALSE - (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) + if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0)) { - u16 udp_len = - clib_host_to_net_u16 (ip40->length) - sizeof (*ip40); - udp_header_t *udp = - (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40)); - ip_csum_t csum; - csum = ip_incremental_checksum (0, udp, udp_len); - csum = - ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); - csum = - ip_csum_with_carry (csum, - clib_host_to_net_u16 (IP_PROTOCOL_UDP)); - csum = - ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address))); - *checksum0 = ~ip_csum_fold (csum); - } - if (PREDICT_FALSE - (!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP)) - { - u16 udp_len = - clib_host_to_net_u16 (ip41->length) - sizeof (*ip40); - udp_header_t *udp = - (udp_header_t *) u8_ptr_add (ip41, sizeof (*ip40)); - ip_csum_t csum; - csum = ip_incremental_checksum (0, udp, udp_len); - csum = - ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); - csum = - ip_csum_with_carry (csum, - clib_host_to_net_u16 (IP_PROTOCOL_UDP)); - csum = - ip_csum_with_carry (csum, *((u64 *) (&ip41->src_address))); - *checksum1 = ~ip_csum_fold (csum); - } - - csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32); - csum1 = ip_csum_sub_even (*checksum1, ip41->src_address.as_u32); - csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32); - csum1 = ip_csum_sub_even (csum1, ip41->dst_address.as_u32); - - // Deal with fragmented packets - if (PREDICT_FALSE (ip40->flags_and_fragment_offset & - clib_host_to_net_u16 - (IP4_HEADER_FLAG_MORE_FRAGMENTS))) - { - ip60 = - (ip6_header_t *) u8_ptr_add (ip40, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip40, - sizeof (*ip40) - - sizeof (*frag0)); - frag_id0 = frag_id_4to6 (ip40->fragment_id); - vlib_buffer_advance (p0, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); + p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next0 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { - ip60 = - (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) - - sizeof (*ip60)); - vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60)); - frag0 = NULL; + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = + IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } } - if (PREDICT_FALSE (ip41->flags_and_fragment_offset & - clib_host_to_net_u16 - (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1)) { - ip61 = - (ip6_header_t *) u8_ptr_add (ip41, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); - frag1 = - (ip6_frag_hdr_t *) u8_ptr_add (ip41, - sizeof (*ip40) - - sizeof (*frag0)); - frag_id1 = frag_id_4to6 (ip41->fragment_id); - vlib_buffer_advance (p1, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); + p1->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next1 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { - ip61 = - (ip6_header_t *) (((u8 *) ip41) + sizeof (*ip40) - - sizeof (*ip60)); - vlib_buffer_advance (p1, sizeof (*ip40) - sizeof (*ip60)); - frag1 = NULL; - } - - ip60->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); - ip61->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (ip41->tos << 20)); - ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40)); - ip61->payload_length = u16_net_add (ip41->length, -sizeof (*ip40)); - ip60->hop_limit = ip40->ttl; - ip61->hop_limit = ip41->ttl; - ip60->protocol = ip40->protocol; - ip61->protocol = ip41->protocol; - - if (PREDICT_FALSE (frag0 != NULL)) - { - frag0->next_hdr = ip60->protocol; - frag0->identification = frag_id0; - frag0->rsv = 0; - frag0->fragment_offset_and_more = - ip6_frag_hdr_offset_and_more (0, 1); - ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; - ip60->payload_length = - u16_net_add (ip60->payload_length, sizeof (*frag0)); - } - - if (PREDICT_FALSE (frag1 != NULL)) - { - frag1->next_hdr = ip61->protocol; - frag1->identification = frag_id1; - frag1->rsv = 0; - frag1->fragment_offset_and_more = - ip6_frag_hdr_offset_and_more (0, 1); - ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; - ip61->payload_length = - u16_net_add (ip61->payload_length, sizeof (*frag0)); - } - - //Finally copying the address - ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; - ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0]; - ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; - ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1]; - ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; - ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0]; - ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; - ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1]; - - csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]); - csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[0]); - csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]); - csum1 = ip_csum_add_even (csum1, ip61->src_address.as_u64[1]); - csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]); - csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[0]); - csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]); - csum1 = ip_csum_add_even (csum1, ip61->dst_address.as_u64[1]); - *checksum0 = ip_csum_fold (csum0); - *checksum1 = ip_csum_fold (csum1); - - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) - { - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; - next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; - } - - if (vnet_buffer (p1)->map_t.mtu < p1->current_length) - { - vnet_buffer (p1)->ip_frag.header_offset = 0; - vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; - vnet_buffer (p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; - next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.next_index = + IP6_FRAG_NEXT_IP6_LOOKUP; + next1 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, @@ -863,12 +394,6 @@ ip4_map_t_tcp_udp (vlib_main_t * vm, { u32 pi0; vlib_buffer_t *p0; - ip4_header_t *ip40; - ip6_header_t *ip60; - ip_csum_t csum0; - u16 *checksum0; - ip6_frag_hdr_t *frag0; - u32 frag_id0; ip4_mapt_pseudo_header_t *pheader0; ip4_mapt_tcp_udp_next_t next0; @@ -885,102 +410,23 @@ ip4_map_t_tcp_udp (vlib_main_t * vm, pheader0 = vlib_buffer_get_current (p0); vlib_buffer_advance (p0, sizeof (*pheader0)); - //Accessing ip4 header - ip40 = vlib_buffer_get_current (p0); - checksum0 = - (u16 *) u8_ptr_add (ip40, - vnet_buffer (p0)->map_t.checksum_offset); - - //UDP checksum is optional over IPv4 but mandatory for IPv6 - //We do not check udp->length sanity but use our safe computed value instead - if (PREDICT_FALSE - (!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) - { - u16 udp_len = - clib_host_to_net_u16 (ip40->length) - sizeof (*ip40); - udp_header_t *udp = - (udp_header_t *) u8_ptr_add (ip40, sizeof (*ip40)); - ip_csum_t csum; - csum = ip_incremental_checksum (0, udp, udp_len); - csum = - ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len)); - csum = - ip_csum_with_carry (csum, - clib_host_to_net_u16 (IP_PROTOCOL_UDP)); - csum = - ip_csum_with_carry (csum, *((u64 *) (&ip40->src_address))); - *checksum0 = ~ip_csum_fold (csum); - } - - csum0 = ip_csum_sub_even (*checksum0, ip40->src_address.as_u32); - csum0 = ip_csum_sub_even (csum0, ip40->dst_address.as_u32); - - // Deal with fragmented packets - if (PREDICT_FALSE (ip40->flags_and_fragment_offset & - clib_host_to_net_u16 - (IP4_HEADER_FLAG_MORE_FRAGMENTS))) + if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0)) { - ip60 = - (ip6_header_t *) u8_ptr_add (ip40, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip40, - sizeof (*ip40) - - sizeof (*frag0)); - frag_id0 = frag_id_4to6 (ip40->fragment_id); - vlib_buffer_advance (p0, - sizeof (*ip40) - sizeof (*ip60) - - sizeof (*frag0)); + p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next0 = IP4_MAPT_TCP_UDP_NEXT_DROP; } else { - ip60 = - (ip6_header_t *) (((u8 *) ip40) + sizeof (*ip40) - - sizeof (*ip60)); - vlib_buffer_advance (p0, sizeof (*ip40) - sizeof (*ip60)); - frag0 = NULL; + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.next_index = + IP6_FRAG_NEXT_IP6_LOOKUP; + next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; + } } - - ip60->ip_version_traffic_class_and_flow_label = - clib_host_to_net_u32 ((6 << 28) + (ip40->tos << 20)); - ip60->payload_length = u16_net_add (ip40->length, -sizeof (*ip40)); - ip60->hop_limit = ip40->ttl; - ip60->protocol = ip40->protocol; - - if (PREDICT_FALSE (frag0 != NULL)) - { - frag0->next_hdr = ip60->protocol; - frag0->identification = frag_id0; - frag0->rsv = 0; - frag0->fragment_offset_and_more = - ip6_frag_hdr_offset_and_more (0, 1); - ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION; - ip60->payload_length = - u16_net_add (ip60->payload_length, sizeof (*frag0)); - } - - //Finally copying the address - ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0]; - ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1]; - ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0]; - ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1]; - - csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[0]); - csum0 = ip_csum_add_even (csum0, ip60->src_address.as_u64[1]); - csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[0]); - csum0 = ip_csum_add_even (csum0, ip60->dst_address.as_u64[1]); - *checksum0 = ip_csum_fold (csum0); - - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP; - next0 = IP4_MAPT_TCP_UDP_NEXT_IP6_FRAG; - } - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, pi0, next0); @@ -1159,10 +605,10 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, - vnet_buffer (p0)->map_t. - map_domain_index, 1, - clib_net_to_host_u16 (ip40-> - length)); + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip40->length)); } if (PREDICT_TRUE @@ -1170,10 +616,10 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, - vnet_buffer (p1)->map_t. - map_domain_index, 1, - clib_net_to_host_u16 (ip41-> - length)); + vnet_buffer (p1)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip41->length)); } next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; @@ -1253,10 +699,10 @@ ip4_map_t (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_TX, thread_index, - vnet_buffer (p0)->map_t. - map_domain_index, 1, - clib_net_to_host_u16 (ip40-> - length)); + vnet_buffer (p0)-> + map_t.map_domain_index, 1, + clib_net_to_host_u16 + (ip40->length)); } next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0; diff --git a/src/vnet/map/ip6_map.c b/src/vnet/map/ip6_map.c index 63ada962..720d13c2 100644 --- a/src/vnet/map/ip6_map.c +++ b/src/vnet/map/ip6_map.c @@ -15,6 +15,8 @@ #include "map.h" #include "../ip/ip_frag.h" +#include +#include enum ip6_map_next_e { @@ -125,7 +127,7 @@ ip6_map_security_check (map_domain_t * d, ip4_header_t * ip4, { if (!ip4_is_fragment (ip4)) { - u16 port = ip4_map_get_port (ip4, MAP_SENDER); + u16 port = ip4_get_port (ip4, 1); if (port) { if (mm->sec_check) @@ -243,8 +245,9 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & ip40->src_address. - as_u32, &map_domain_index0, &error0); + (ip4_address_t *) & ip40-> + src_address.as_u32, &map_domain_index0, + &error0); } else if (ip60->protocol == IP_PROTOCOL_ICMP6 && clib_net_to_host_u16 (ip60->payload_length) > @@ -270,8 +273,9 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { d1 = ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & ip41->src_address. - as_u32, &map_domain_index1, &error1); + (ip4_address_t *) & ip41-> + src_address.as_u32, &map_domain_index1, + &error1); } else if (ip61->protocol == IP_PROTOCOL_ICMP6 && clib_net_to_host_u16 (ip61->payload_length) > @@ -454,8 +458,9 @@ ip6_map (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX], - (ip4_address_t *) & ip40->src_address. - as_u32, &map_domain_index0, &error0); + (ip4_address_t *) & ip40-> + src_address.as_u32, &map_domain_index0, + &error0); } else if (ip60->protocol == IP_PROTOCOL_ICMP6 && clib_net_to_host_u16 (ip60->payload_length) > @@ -891,9 +896,7 @@ ip6_map_ip4_reass (vlib_main_t * vm, cached = 1; } } - else - if ((port0 = - ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0) + else if ((port0 = ip4_get_port (ip40, 1)) == 0) { // Could not find port from first fragment. Stop reassembling. error0 = MAP_ERROR_BAD_PROTOCOL; diff --git a/src/vnet/map/ip6_map_t.c b/src/vnet/map/ip6_map_t.c index 99151678..b173bb2a 100644 --- a/src/vnet/map/ip6_map_t.c +++ b/src/vnet/map/ip6_map_t.c @@ -15,6 +15,8 @@ #include "map.h" #include "../ip/ip_frag.h" +#include +#include #define IP6_MAP_T_DUAL_LOOP @@ -94,347 +96,55 @@ ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag, return ret; } -static_always_inline u8 -ip6_translate_tos (const ip6_header_t * ip6) +typedef struct { -#ifdef IP6_MAP_T_OVERRIDE_TOS - return IP6_MAP_T_OVERRIDE_TOS; -#else - return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label) - & 0x0ff00000) >> 20; -#endif -} - -//TODO: Find right place in memory for that -/* *INDENT-OFF* */ -static u8 icmp6_to_icmp_updater_pointer_table[] = - { 0, 1, ~0, ~0, - 2, 2, 9, 8, - 12, 12, 12, 12, - 12, 12, 12, 12, - 12, 12, 12, 12, - 12, 12, 12, 12, - 24, 24, 24, 24, - 24, 24, 24, 24, - 24, 24, 24, 24, - 24, 24, 24, 24 - }; -/* *INDENT-ON* */ - -static_always_inline int -ip6_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len, - i32 * sender_port, ip6_header_t ** inner_ip6) -{ - *inner_ip6 = NULL; - switch (icmp->type) - { - case ICMP6_echo_request: - *sender_port = ((u16 *) icmp)[2]; - icmp->type = ICMP4_echo_request; - break; - case ICMP6_echo_reply: - *sender_port = ((u16 *) icmp)[2]; - icmp->type = ICMP4_echo_reply; - break; - case ICMP6_destination_unreachable: - *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); - *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); - - switch (icmp->code) - { - case ICMP6_destination_unreachable_no_route_to_destination: //0 - case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2 - case ICMP6_destination_unreachable_address_unreachable: //3 - icmp->type = ICMP4_destination_unreachable; - icmp->code = - ICMP4_destination_unreachable_destination_unreachable_host; - break; - case ICMP6_destination_unreachable_destination_administratively_prohibited: //1 - icmp->type = - ICMP4_destination_unreachable; - icmp->code = - ICMP4_destination_unreachable_communication_administratively_prohibited; - break; - case ICMP6_destination_unreachable_port_unreachable: - icmp->type = ICMP4_destination_unreachable; - icmp->code = ICMP4_destination_unreachable_port_unreachable; - break; - default: - return -1; - } - break; - case ICMP6_packet_too_big: - *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); - *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); - - icmp->type = ICMP4_destination_unreachable; - icmp->code = 4; - { - u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); - advertised_mtu -= 20; - //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20) - ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu); - } - break; - - case ICMP6_time_exceeded: - *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); - *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); - - icmp->type = ICMP4_time_exceeded; - break; - - case ICMP6_parameter_problem: - *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8); - *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len); - - switch (icmp->code) - { - case ICMP6_parameter_problem_erroneous_header_field: - icmp->type = ICMP4_parameter_problem; - icmp->code = ICMP4_parameter_problem_pointer_indicates_error; - u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1))); - if (pointer >= 40) - return -1; - - ((u8 *) (icmp + 1))[0] = - icmp6_to_icmp_updater_pointer_table[pointer]; - break; - case ICMP6_parameter_problem_unrecognized_next_header: - icmp->type = ICMP4_destination_unreachable; - icmp->code = ICMP4_destination_unreachable_port_unreachable; - break; - case ICMP6_parameter_problem_unrecognized_option: - default: - return -1; - } - break; - default: - return -1; - break; - } - return 0; -} + map_domain_t *d; + u16 sender_port; +} icmp6_to_icmp_ctx_t; -static_always_inline void -_ip6_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error) +static int +ip6_to_ip4_set_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *arg) { - ip6_header_t *ip6, *inner_ip6; - ip4_header_t *ip4, *inner_ip4; - u32 ip6_pay_len; - icmp46_header_t *icmp; - i32 sender_port; - ip_csum_t csum; - u32 ip4_sadr, inner_ip4_dadr; - - ip6 = vlib_buffer_get_current (p); - ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length); - icmp = (icmp46_header_t *) (ip6 + 1); - ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length); - - if (ip6->protocol != IP_PROTOCOL_ICMP6) - { - //No extensions headers allowed here - //TODO: SR header - *error = MAP_ERROR_MALFORMED; - return; - } - - //There are no fragmented ICMP messages, so no extension header for now - - if (ip6_icmp_to_icmp6_in_place - (icmp, ip6_pay_len, &sender_port, &inner_ip6)) - { - //TODO: In case of 1:1 mapping it is not necessary to have the sender port - *error = MAP_ERROR_ICMP; - return; - } - - if (sender_port < 0) - { - // In case of 1:1 mapping, we don't care about the port - if (d->ea_bits_len == 0 && d->rules) - { - sender_port = 0; - } - else - { - *error = MAP_ERROR_ICMP; - return; - } - } + icmp6_to_icmp_ctx_t *ctx = arg; + u32 ip4_sadr; //Security check //Note that this prevents an intermediate IPv6 router from answering the request ip4_sadr = map_get_ip4 (&ip6->src_address); - if (ip6->src_address.as_u64[0] != map_get_pfx_net (d, ip4_sadr, sender_port) - || ip6->src_address.as_u64[1] != map_get_sfx_net (d, ip4_sadr, - sender_port)) - { - *error = MAP_ERROR_SEC_CHECK; - return; - } - - if (inner_ip6) - { - u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, - inner_frag_id; - u8 *inner_l4, inner_protocol; - - //We have two headers to translate - // FROM - // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ... - // Handled cases: - // [ IPv6 ][IC][ IPv6 ][L4 header ... - // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ... - // TO - // [ IPv4][IC][ IPv4][L4 header ... - - //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place - //We shouldn't have to do it again - if (ip6_parse (inner_ip6, ip6_pay_len - 8, - &inner_protocol, &inner_l4_offset, &inner_frag_offset)) - { - *error = MAP_ERROR_MALFORMED; - return; - } - - inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset); - inner_ip4 = - (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4)); - if (inner_frag_offset) - { - ip6_frag_hdr_t *inner_frag = - (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset); - inner_frag_id = frag_id_6to4 (inner_frag->identification); - } - else - { - inner_frag_id = 0; - } - - //Do the translation of the inner packet - if (inner_protocol == IP_PROTOCOL_TCP) - { - inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16); - } - else if (inner_protocol == IP_PROTOCOL_UDP) - { - inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6); - } - else if (inner_protocol == IP_PROTOCOL_ICMP6) - { - icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4; - csum = inner_icmp->checksum; - csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp)); - //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded - inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ? - ICMP4_echo_request : ICMP4_echo_reply; - csum = ip_csum_add_even (csum, *((u16 *) inner_icmp)); - inner_icmp->checksum = ip_csum_fold (csum); - inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later - inner_L4_checksum = &inner_icmp->checksum; - } - else - { - *error = MAP_ERROR_BAD_PROTOCOL; - return; - } - - csum = *inner_L4_checksum; - csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]); - csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]); - csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]); - csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]); - - //Sanity check of the outer destination address - if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] && - ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1]) - { - *error = MAP_ERROR_SEC_CHECK; - return; - } - - //Security check of inner packet - inner_ip4_dadr = map_get_ip4 (&inner_ip6->dst_address); - if (inner_ip6->dst_address.as_u64[0] != - map_get_pfx_net (d, inner_ip4_dadr, sender_port) - || inner_ip6->dst_address.as_u64[1] != map_get_sfx_net (d, - inner_ip4_dadr, - sender_port)) - { - *error = MAP_ERROR_SEC_CHECK; - return; - } + if (ip6->src_address.as_u64[0] != + map_get_pfx_net (ctx->d, ip4_sadr, ctx->sender_port) + || ip6->src_address.as_u64[1] != map_get_sfx_net (ctx->d, ip4_sadr, + ctx->sender_port)) + return -1; + + ip4->dst_address.as_u32 = + ip6_map_t_embedded_address (ctx->d, &ip6->dst_address); + ip4->src_address.as_u32 = ip4_sadr; - inner_ip4->dst_address.as_u32 = inner_ip4_dadr; - inner_ip4->src_address.as_u32 = - ip6_map_t_embedded_address (d, &inner_ip6->src_address); - inner_ip4->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - inner_ip4->tos = ip6_translate_tos (inner_ip6); - inner_ip4->length = - u16_net_add (inner_ip6->payload_length, - sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset); - inner_ip4->fragment_id = inner_frag_id; - inner_ip4->flags_and_fragment_offset = - clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); - inner_ip4->ttl = inner_ip6->hop_limit; - inner_ip4->protocol = inner_protocol; - inner_ip4->checksum = ip4_header_checksum (inner_ip4); - - if (inner_ip4->protocol == IP_PROTOCOL_ICMP) - { - //Remove remainings of the pseudo-header in the csum - csum = - ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6)); - csum = - ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4)); - } - else - { - //Update to new pseudo-header - csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32); - csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32); - } - *inner_L4_checksum = ip_csum_fold (csum); + return 0; +} - //Move up icmp header - ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8); - clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8); - icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8); - } - else - { - //Only one header to translate - ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4)); - } - vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6))); +static int +ip6_to_ip4_set_inner_icmp_cb (ip6_header_t * ip6, ip4_header_t * ip4, + void *arg) +{ + icmp6_to_icmp_ctx_t *ctx = arg; + u32 inner_ip4_dadr; + + //Security check of inner packet + inner_ip4_dadr = map_get_ip4 (&ip6->dst_address); + if (ip6->dst_address.as_u64[0] != + map_get_pfx_net (ctx->d, inner_ip4_dadr, ctx->sender_port) + || ip6->dst_address.as_u64[1] != map_get_sfx_net (ctx->d, + inner_ip4_dadr, + ctx->sender_port)) + return -1; + + ip4->dst_address.as_u32 = inner_ip4_dadr; + ip4->src_address.as_u32 = + ip6_map_t_embedded_address (ctx->d, &ip6->src_address); - ip4->dst_address.as_u32 = ip6_map_t_embedded_address (d, &ip6->dst_address); - ip4->src_address.as_u32 = ip4_sadr; - ip4->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip4->tos = ip6_translate_tos (ip6); - ip4->fragment_id = 0; - ip4->flags_and_fragment_offset = 0; - ip4->ttl = ip6->hop_limit; - ip4->protocol = IP_PROTOCOL_ICMP; - //TODO fix the length depending on offset length - ip4->length = u16_net_add (ip6->payload_length, - (inner_ip6 == - NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) - - sizeof (*ip6))); - ip4->checksum = ip4_header_checksum (ip4); - - //TODO: We could do an easy diff-checksum for echo requests/replies - //Recompute ICMP checksum - icmp->checksum = 0; - csum = - ip_incremental_checksum (0, icmp, - clib_net_to_host_u16 (ip4->length) - - sizeof (*ip4)); - icmp->checksum = ~ip_csum_fold (csum); + return 0; } static uword @@ -462,6 +172,8 @@ ip6_map_t_icmp (vlib_main_t * vm, ip6_mapt_icmp_next_t next0; map_domain_t *d0; u16 len0; + icmp6_to_icmp_ctx_t ctx0; + ip6_header_t *ip60; pi0 = to_next[0] = from[0]; from += 1; @@ -472,14 +184,30 @@ ip6_map_t_icmp (vlib_main_t * vm, next0 = IP6_MAPT_ICMP_NEXT_IP4_LOOKUP; p0 = vlib_get_buffer (vm, pi0); - len0 = - clib_net_to_host_u16 (((ip6_header_t *) - vlib_buffer_get_current - (p0))->payload_length); + ip60 = vlib_buffer_get_current (p0); + len0 = clib_net_to_host_u16 (ip60->payload_length); d0 = pool_elt_at_index (map_main.domains, vnet_buffer (p0)->map_t.map_domain_index); - _ip6_map_t_icmp (d0, p0, &error0); + ctx0.sender_port = ip6_get_port (ip60, 0, len0); + ctx0.d = d0; + if (ctx0.sender_port == 0) + { + // In case of 1:1 mapping, we don't care about the port + if (!(d0->ea_bits_len == 0 && d0->rules)) + { + error0 = MAP_ERROR_ICMP; + goto err0; + } + } + + if (icmp6_to_icmp + (p0, ip6_to_ip4_set_icmp_cb, d0, ip6_to_ip4_set_inner_icmp_cb, + d0)) + { + error0 = MAP_ERROR_ICMP; + goto err0; + } if (vnet_buffer (p0)->map_t.mtu < p0->current_length) { @@ -489,7 +217,7 @@ ip6_map_t_icmp (vlib_main_t * vm, vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; next0 = IP6_MAPT_ICMP_NEXT_IP4_FRAG; } - + err0: if (PREDICT_TRUE (error0 == MAP_ERROR_NONE)) { vlib_increment_combined_counter (cm + MAP_DOMAIN_COUNTER_RX, @@ -513,6 +241,17 @@ ip6_map_t_icmp (vlib_main_t * vm, return frame->n_vectors; } +static int +ip6_to_ip4_set_cb (ip6_header_t * ip6, ip4_header_t * ip4, void *ctx) +{ + vlib_buffer_t *p = ctx; + + ip4->dst_address.as_u32 = vnet_buffer (p)->map_t.v6.daddr; + ip4->src_address.as_u32 = vnet_buffer (p)->map_t.v6.saddr; + + return 0; +} + static uword ip6_map_t_fragmented (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) @@ -521,6 +260,8 @@ ip6_map_t_fragmented (vlib_main_t * vm, from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_t_fragmented_node.index); while (n_left_from > 0) { @@ -531,11 +272,6 @@ ip6_map_t_fragmented (vlib_main_t * vm, { u32 pi0, pi1; vlib_buffer_t *p0, *p1; - ip6_header_t *ip60, *ip61; - ip6_frag_hdr_t *frag0, *frag1; - ip4_header_t *ip40, *ip41; - u16 frag_id0, frag_offset0, frag_id1, frag_offset1; - u8 frag_more0, frag_more1; u32 next0, next1; pi0 = to_next[0] = from[0]; @@ -549,94 +285,41 @@ ip6_map_t_fragmented (vlib_main_t * vm, next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); - ip60 = vlib_buffer_get_current (p0); - ip61 = vlib_buffer_get_current (p1); - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.frag_offset); - frag1 = - (ip6_frag_hdr_t *) u8_ptr_add (ip61, - vnet_buffer (p1)->map_t. - v6.frag_offset); - ip40 = - (ip4_header_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.l4_offset - sizeof (*ip40)); - ip41 = - (ip4_header_t *) u8_ptr_add (ip61, - vnet_buffer (p1)->map_t. - v6.l4_offset - sizeof (*ip40)); - vlib_buffer_advance (p0, - vnet_buffer (p0)->map_t.v6.l4_offset - - sizeof (*ip40)); - vlib_buffer_advance (p1, - vnet_buffer (p1)->map_t.v6.l4_offset - - sizeof (*ip40)); - - frag_id0 = frag_id_6to4 (frag0->identification); - frag_id1 = frag_id_6to4 (frag1->identification); - frag_more0 = ip6_frag_hdr_more (frag0); - frag_more1 = ip6_frag_hdr_more (frag1); - frag_offset0 = ip6_frag_hdr_offset (frag0); - frag_offset1 = ip6_frag_hdr_offset (frag1); - - ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; - ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr; - ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; - ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr; - ip40->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip41->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip40->tos = ip6_translate_tos (ip60); - ip41->tos = ip6_translate_tos (ip61); - ip40->length = u16_net_add (ip60->payload_length, - sizeof (*ip40) - - vnet_buffer (p0)->map_t.v6.l4_offset + - sizeof (*ip60)); - ip41->length = - u16_net_add (ip61->payload_length, - sizeof (*ip40) - - vnet_buffer (p1)->map_t.v6.l4_offset + - sizeof (*ip60)); - ip40->fragment_id = frag_id0; - ip41->fragment_id = frag_id1; - ip40->flags_and_fragment_offset = - clib_host_to_net_u16 (frag_offset0 | - (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS - : 0)); - ip41->flags_and_fragment_offset = - clib_host_to_net_u16 (frag_offset1 | - (frag_more1 ? IP4_HEADER_FLAG_MORE_FRAGMENTS - : 0)); - ip40->ttl = ip60->hop_limit; - ip41->ttl = ip61->hop_limit; - ip40->protocol = - (vnet_buffer (p0)->map_t.v6.l4_protocol == - IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)-> - map_t.v6.l4_protocol; - ip41->protocol = - (vnet_buffer (p1)->map_t.v6.l4_protocol == - IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p1)-> - map_t.v6.l4_protocol; - ip40->checksum = ip4_header_checksum (ip40); - ip41->checksum = ip4_header_checksum (ip41); - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0)) { - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; + next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP; + } + else + { + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } } - if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + if (ip6_to_ip4_fragmented (p1, ip6_to_ip4_set_cb, p1)) { - vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; - vnet_buffer (p1)->ip_frag.header_offset = 0; - vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + p1->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; + next1 = IP6_MAPT_FRAGMENTED_NEXT_DROP; + } + else + { + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, @@ -649,12 +332,6 @@ ip6_map_t_fragmented (vlib_main_t * vm, { u32 pi0; vlib_buffer_t *p0; - ip6_header_t *ip60; - ip6_frag_hdr_t *frag0; - ip4_header_t *ip40; - u16 frag_id0; - u8 frag_more0; - u16 frag_offset0; u32 next0; pi0 = to_next[0] = from[0]; @@ -665,51 +342,23 @@ ip6_map_t_fragmented (vlib_main_t * vm, next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; p0 = vlib_get_buffer (vm, pi0); - ip60 = vlib_buffer_get_current (p0); - frag0 = - (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.frag_offset); - ip40 = - (ip4_header_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.l4_offset - sizeof (*ip40)); - vlib_buffer_advance (p0, - vnet_buffer (p0)->map_t.v6.l4_offset - - sizeof (*ip40)); - - frag_id0 = frag_id_6to4 (frag0->identification); - frag_more0 = ip6_frag_hdr_more (frag0); - frag_offset0 = ip6_frag_hdr_offset (frag0); - - ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; - ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; - ip40->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip40->tos = ip6_translate_tos (ip60); - ip40->length = u16_net_add (ip60->payload_length, - sizeof (*ip40) - - vnet_buffer (p0)->map_t.v6.l4_offset + - sizeof (*ip60)); - ip40->fragment_id = frag_id0; - ip40->flags_and_fragment_offset = - clib_host_to_net_u16 (frag_offset0 | - (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS - : 0)); - ip40->ttl = ip60->hop_limit; - ip40->protocol = - (vnet_buffer (p0)->map_t.v6.l4_protocol == - IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)-> - map_t.v6.l4_protocol; - ip40->checksum = ip4_header_checksum (ip40); - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + if (ip6_to_ip4_fragmented (p0, ip6_to_ip4_set_cb, p0)) { - //Send to fragmentation node if necessary - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED]; + next0 = IP6_MAPT_FRAGMENTED_NEXT_DROP; + } + else + { + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_FRAGMENTED_NEXT_IP4_FRAG; + } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, @@ -726,6 +375,9 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; + vlib_node_runtime_t *error_node = + vlib_node_get_runtime (vm, ip6_map_t_tcp_udp_node.index); + from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; @@ -738,11 +390,6 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, { u32 pi0, pi1; vlib_buffer_t *p0, *p1; - ip6_header_t *ip60, *ip61; - ip_csum_t csum0, csum1; - ip4_header_t *ip40, *ip41; - u16 fragment_id0, flags0, *checksum0, - fragment_id1, flags1, *checksum1; ip6_mapt_tcp_udp_next_t next0, next1; pi0 = to_next[0] = from[0]; @@ -756,118 +403,41 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, p0 = vlib_get_buffer (vm, pi0); p1 = vlib_get_buffer (vm, pi1); - ip60 = vlib_buffer_get_current (p0); - ip61 = vlib_buffer_get_current (p1); - ip40 = - (ip4_header_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.l4_offset - sizeof (*ip40)); - ip41 = - (ip4_header_t *) u8_ptr_add (ip61, - vnet_buffer (p1)->map_t. - v6.l4_offset - sizeof (*ip40)); - vlib_buffer_advance (p0, - vnet_buffer (p0)->map_t.v6.l4_offset - - sizeof (*ip40)); - vlib_buffer_advance (p1, - vnet_buffer (p1)->map_t.v6.l4_offset - - sizeof (*ip40)); - checksum0 = - (u16 *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t.checksum_offset); - checksum1 = - (u16 *) u8_ptr_add (ip61, - vnet_buffer (p1)->map_t.checksum_offset); - - csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]); - csum1 = ip_csum_sub_even (*checksum1, ip61->src_address.as_u64[0]); - csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]); - csum1 = ip_csum_sub_even (csum1, ip61->src_address.as_u64[1]); - csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]); - csum1 = ip_csum_sub_even (csum0, ip61->dst_address.as_u64[0]); - csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]); - csum1 = ip_csum_sub_even (csum1, ip61->dst_address.as_u64[1]); - csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr); - csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.daddr); - csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr); - csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.saddr); - *checksum0 = ip_csum_fold (csum0); - *checksum1 = ip_csum_fold (csum1); - - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset)) - { - ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer - (p0)-> - map_t. - v6.frag_offset); - fragment_id0 = frag_id_6to4 (hdr->identification); - flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); - } - else - { - fragment_id0 = 0; - flags0 = 0; - } - if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset)) + if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1)) { - ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip61, - vnet_buffer - (p1)-> - map_t. - v6.frag_offset); - fragment_id1 = frag_id_6to4 (hdr->identification); - flags1 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next0 = IP6_MAPT_TCP_UDP_NEXT_DROP; } else { - fragment_id1 = 0; - flags1 = 0; + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } } - ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; - ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr; - ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; - ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr; - ip40->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip41->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip40->tos = ip6_translate_tos (ip60); - ip41->tos = ip6_translate_tos (ip61); - ip40->length = u16_net_add (ip60->payload_length, - sizeof (*ip40) + sizeof (*ip60) - - vnet_buffer (p0)->map_t.v6.l4_offset); - ip41->length = - u16_net_add (ip61->payload_length, - sizeof (*ip40) + sizeof (*ip60) - - vnet_buffer (p1)->map_t.v6.l4_offset); - ip40->fragment_id = fragment_id0; - ip41->fragment_id = fragment_id1; - ip40->flags_and_fragment_offset = flags0; - ip41->flags_and_fragment_offset = flags1; - ip40->ttl = ip60->hop_limit; - ip41->ttl = ip61->hop_limit; - ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol; - ip41->protocol = vnet_buffer (p1)->map_t.v6.l4_protocol; - ip40->checksum = ip4_header_checksum (ip40); - ip41->checksum = ip4_header_checksum (ip41); - - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + if (ip6_to_ip4_tcp_udp (p1, ip6_to_ip4_set_cb, p1, 1)) { - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + p1->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next1 = IP6_MAPT_TCP_UDP_NEXT_DROP; } - - if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + else { - vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; - vnet_buffer (p1)->ip_frag.header_offset = 0; - vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + if (vnet_buffer (p1)->map_t.mtu < p1->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu; + vnet_buffer (p1)->ip_frag.header_offset = 0; + vnet_buffer (p1)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next1 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } } vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next, @@ -880,12 +450,6 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, { u32 pi0; vlib_buffer_t *p0; - ip6_header_t *ip60; - u16 *checksum0; - ip_csum_t csum0; - ip4_header_t *ip40; - u16 fragment_id0; - u16 flags0; ip6_mapt_tcp_udp_next_t next0; pi0 = to_next[0] = from[0]; @@ -896,65 +460,23 @@ ip6_map_t_tcp_udp (vlib_main_t * vm, next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP; p0 = vlib_get_buffer (vm, pi0); - ip60 = vlib_buffer_get_current (p0); - ip40 = - (ip4_header_t *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t. - v6.l4_offset - sizeof (*ip40)); - vlib_buffer_advance (p0, - vnet_buffer (p0)->map_t.v6.l4_offset - - sizeof (*ip40)); - checksum0 = - (u16 *) u8_ptr_add (ip60, - vnet_buffer (p0)->map_t.checksum_offset); - - //TODO: This can probably be optimized - csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]); - csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]); - csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]); - csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]); - csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr); - csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr); - *checksum0 = ip_csum_fold (csum0); - - if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset)) + + if (ip6_to_ip4_tcp_udp (p0, ip6_to_ip4_set_cb, p0, 1)) { - //Only the first fragment - ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60, - vnet_buffer - (p0)-> - map_t. - v6.frag_offset); - fragment_id0 = frag_id_6to4 (hdr->identification); - flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS); + p0->error = error_node->errors[MAP_ERROR_UNKNOWN]; + next0 = IP6_MAPT_TCP_UDP_NEXT_DROP; } else { - fragment_id0 = 0; - flags0 = 0; - } - - ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr; - ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr; - ip40->ip_version_and_header_length = - IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS; - ip40->tos = ip6_translate_tos (ip60); - ip40->length = u16_net_add (ip60->payload_length, - sizeof (*ip40) + sizeof (*ip60) - - vnet_buffer (p0)->map_t.v6.l4_offset); - ip40->fragment_id = fragment_id0; - ip40->flags_and_fragment_offset = flags0; - ip40->ttl = ip60->hop_limit; - ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol; - ip40->checksum = ip4_header_checksum (ip40); - - if (vnet_buffer (p0)->map_t.mtu < p0->current_length) - { - //Send to fragmentation node if necessary - vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; - vnet_buffer (p0)->ip_frag.header_offset = 0; - vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP; - next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + if (vnet_buffer (p0)->map_t.mtu < p0->current_length) + { + //Send to fragmentation node if necessary + vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu; + vnet_buffer (p0)->ip_frag.header_offset = 0; + vnet_buffer (p0)->ip_frag.next_index = + IP4_FRAG_NEXT_IP4_LOOKUP; + next0 = IP6_MAPT_TCP_UDP_NEXT_IP4_FRAG; + } } vlib_validate_buffer_enqueue_x1 (vm, node, next_index, diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 6a707df1..8579cdf6 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -76,91 +76,6 @@ crc_u32 (u32 data, u32 value) */ -i32 -ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len) -{ - //TODO: use buffer length - if (ip->ip_version_and_header_length != 0x45 || - ip4_get_fragment_offset (ip)) - return -1; - - if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || - (ip->protocol == IP_PROTOCOL_UDP))) - { - udp_header_t *udp = (void *) (ip + 1); - return (dir == MAP_SENDER) ? udp->src_port : udp->dst_port; - } - else if (ip->protocol == IP_PROTOCOL_ICMP) - { - icmp46_header_t *icmp = (void *) (ip + 1); - if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply) - { - return *((u16 *) (icmp + 1)); - } - else if (clib_net_to_host_u16 (ip->length) >= 64) - { - ip = (ip4_header_t *) (icmp + 2); - if (PREDICT_TRUE ((ip->protocol == IP_PROTOCOL_TCP) || - (ip->protocol == IP_PROTOCOL_UDP))) - { - udp_header_t *udp = (void *) (ip + 1); - return (dir == MAP_SENDER) ? udp->dst_port : udp->src_port; - } - else if (ip->protocol == IP_PROTOCOL_ICMP) - { - icmp46_header_t *icmp = (void *) (ip + 1); - if (icmp->type == ICMP4_echo_request || - icmp->type == ICMP4_echo_reply) - { - return *((u16 *) (icmp + 1)); - } - } - } - } - return -1; -} - -i32 -ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len) -{ - u8 l4_protocol; - u16 l4_offset; - u16 frag_offset; - u8 *l4; - - if (ip6_parse (ip6, buffer_len, &l4_protocol, &l4_offset, &frag_offset)) - return -1; - - //TODO: Use buffer length - - if (frag_offset && - ip6_frag_hdr_offset (((ip6_frag_hdr_t *) - u8_ptr_add (ip6, frag_offset)))) - return -1; //Can't deal with non-first fragment for now - - l4 = u8_ptr_add (ip6, l4_offset); - if (l4_protocol == IP_PROTOCOL_TCP || l4_protocol == IP_PROTOCOL_UDP) - { - return (dir == - MAP_SENDER) ? ((udp_header_t *) (l4))->src_port : ((udp_header_t - *) - (l4))->dst_port; - } - else if (l4_protocol == IP_PROTOCOL_ICMP6) - { - icmp46_header_t *icmp = (icmp46_header_t *) (l4); - if (icmp->type == ICMP6_echo_request) - { - return (dir == MAP_SENDER) ? ((u16 *) (icmp))[2] : -1; - } - else if (icmp->type == ICMP6_echo_reply) - { - return (dir == MAP_SENDER) ? -1 : ((u16 *) (icmp))[2]; - } - } - return -1; -} - int map_create_domain (ip4_address_t * ip4_prefix, diff --git a/src/vnet/map/map.h b/src/vnet/map/map.h index 644e80f5..208a58ef 100644 --- a/src/vnet/map/map.h +++ b/src/vnet/map/map.h @@ -25,12 +25,6 @@ #define MAP_SKIP_IP6_LOOKUP 1 -typedef enum -{ - MAP_SENDER, - MAP_RECEIVER -} map_dir_e; - int map_create_domain (ip4_address_t * ip4_prefix, u8 ip4_prefix_len, ip6_address_t * ip6_prefix, u8 ip6_prefix_len, ip6_address_t * ip6_src, u8 ip6_src_len, @@ -40,9 +34,6 @@ int map_delete_domain (u32 map_domain_index); int map_add_del_psid (u32 map_domain_index, u16 psid, ip6_address_t * tep, u8 is_add); u8 *format_map_trace (u8 * s, va_list * args); -i32 ip4_get_port (ip4_header_t * ip, map_dir_e dir, u16 buffer_len); -i32 ip6_get_port (ip6_header_t * ip6, map_dir_e dir, u16 buffer_len); -u16 ip4_map_get_port (ip4_header_t * ip, map_dir_e dir); typedef enum __attribute__ ((__packed__)) { @@ -518,30 +509,10 @@ int map_ip6_reass_conf_lifetime(u16 lifetime_ms); int map_ip6_reass_conf_buffers(u32 buffers); #define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff) -static_always_inline -int ip6_parse(const ip6_header_t *ip6, u32 buff_len, - u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset) -{ - if (ip6->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION) { - *l4_protocol = ((ip6_frag_hdr_t *)(ip6 + 1))->next_hdr; - *frag_hdr_offset = sizeof(*ip6); - *l4_offset = sizeof(*ip6) + sizeof(ip6_frag_hdr_t); - } else { - *l4_protocol = ip6->protocol; - *frag_hdr_offset = 0; - *l4_offset = sizeof(*ip6); - } - - return (buff_len < (*l4_offset + 4)) || - (clib_net_to_host_u16(ip6->payload_length) < (*l4_offset + 4 - sizeof(*ip6))); -} - #define u8_ptr_add(ptr, index) (((u8 *)ptr) + index) #define u16_net_add(u, val) clib_host_to_net_u16(clib_net_to_host_u16(u) + (val)) -#define frag_id_6to4(id) ((id) ^ ((id) >> 16)) - static_always_inline void ip4_map_t_embedded_address (map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4) -- cgit 1.2.3-korg From b4bd28a490012ecbce292561da404d4e9b02d24b Mon Sep 17 00:00:00 2001 From: Christophe Fontaine Date: Wed, 31 May 2017 11:27:19 +0200 Subject: Remove calls to crc_u32 and add clib_crc32c for armv8+crc crc_u32 was not defined for non x86_64 with SSE4.2 processors. Calls to "crc_u32" are removed and replaced by either a call to clib_crc32c or a call to clib_xxhash, as the result is not used as a check value but as a hash. Change-Id: I3af4d68e2e5ebd0c9b0a6090f848d043cb0f20a2 Signed-off-by: Christophe Fontaine --- src/plugins/flowprobe/node.c | 13 ++++++++++--- src/vnet/map/map.c | 42 ++++++++++++++++-------------------------- src/vppinfra/bihash_16_8.h | 30 ++++-------------------------- src/vppinfra/bihash_24_8.h | 4 ++-- src/vppinfra/bihash_48_8.h | 38 ++++---------------------------------- src/vppinfra/crc32.h | 26 +++++++++++++++++++++++++- 6 files changed, 61 insertions(+), 92 deletions(-) (limited to 'src/vnet/map') diff --git a/src/plugins/flowprobe/node.c b/src/plugins/flowprobe/node.c index 6a539db9..80bfa9b5 100644 --- a/src/plugins/flowprobe/node.c +++ b/src/plugins/flowprobe/node.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -258,10 +259,16 @@ static inline u32 flowprobe_hash (flowprobe_key_t * k) { flowprobe_main_t *fm = &flowprobe_main; - int i; u32 h = 0; - for (i = 0; i < sizeof (k->as_u32) / sizeof (u32); i++) - h = crc_u32 (k->as_u32[i], h); + +#ifdef clib_crc32c_uses_intrinsics + h = clib_crc32c ((u8 *) k->as_u32, FLOWPROBE_KEY_IN_U32); +#else + u64 tmp = + k->as_u32[0] ^ k->as_u32[1] ^ k->as_u32[2] ^ k->as_u32[3] ^ k->as_u32[4]; + h = clib_xxhash (tmp); +#endif + return h >> (32 - fm->ht_log2len); } diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index 8579cdf6..d246f6c8 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -19,29 +19,10 @@ #include #include #include +#include #include "map.h" -#ifdef __SSE4_2__ -static inline u32 -crc_u32 (u32 data, u32 value) -{ - __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] - "rm" (data)); - return value; -} -#else -#include - -static inline u32 -crc_u32 (u32 data, u32 value) -{ - u64 tmp = ((u64) data << 32) | (u64) value; - return (u32) clib_xxhash (tmp); -} -#endif - - /* * This code supports the following MAP modes: * @@ -1488,10 +1469,12 @@ map_ip4_reass_get (u32 src, u32 dst, u16 fragment_id, }; u32 h = 0; - h = crc_u32 (k.as_u32[0], h); - h = crc_u32 (k.as_u32[1], h); - h = crc_u32 (k.as_u32[2], h); - h = crc_u32 (k.as_u32[3], h); +#ifdef clib_crc32c_uses_intrinsics + h = clib_crc32c ((u8 *) k.as_u32, 16); +#else + u64 tmp = k.as_u32[0] ^ k.as_u32[1] ^ k.as_u32[2] ^ k.as_u32[3]; + h = clib_xxhash (tmp); +#endif h = h >> (32 - mm->ip4_reass_ht_log2len); f64 now = vlib_time_now (mm->vlib_main); @@ -1660,8 +1643,15 @@ map_ip6_reass_get (ip6_address_t * src, ip6_address_t * dst, u32 fragment_id, u32 h = 0; int i; - for (i = 0; i < 10; i++) - h = crc_u32 (k.as_u32[i], h); + +#ifdef clib_crc32c_uses_intrinsics + h = clib_crc32c ((u8 *) k.as_u32, 40); +#else + u64 tmp = + k.as_u64[0] ^ k.as_u64[1] ^ k.as_u64[2] ^ k.as_u64[3] ^ k.as_u64[4]; + h = clib_xxhash (tmp); +#endif + h = h >> (32 - mm->ip6_reass_ht_log2len); f64 now = vlib_time_now (mm->vlib_main); diff --git a/src/vppinfra/bihash_16_8.h b/src/vppinfra/bihash_16_8.h index ce80f70e..6b1b563e 100644 --- a/src/vppinfra/bihash_16_8.h +++ b/src/vppinfra/bihash_16_8.h @@ -24,6 +24,7 @@ #include #include #include +#include typedef struct { @@ -40,39 +41,16 @@ clib_bihash_is_free_16_8 (clib_bihash_kv_16_8_t * v) return 0; } -#if __SSE4_2__ -#ifndef __defined_crc_u32__ -#define __defined_crc_u32__ -static inline u32 -crc_u32 (u32 data, u32 value) -{ - __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] - "rm" (data)); - return value; -} -#endif /* __defined_crc_u32__ */ - static inline u64 clib_bihash_hash_16_8 (clib_bihash_kv_16_8_t * v) { - u32 *dp = (u32 *) & v->key[0]; - u32 value = 0; - - value = crc_u32 (dp[0], value); - value = crc_u32 (dp[1], value); - value = crc_u32 (dp[2], value); - value = crc_u32 (dp[3], value); - - return value; -} +#ifdef clib_crc32c_uses_intrinsics + return clib_crc32c ((u8 *) v->key, 16); #else -static inline u64 -clib_bihash_hash_16_8 (clib_bihash_kv_16_8_t * v) -{ u64 tmp = v->key[0] ^ v->key[1]; return clib_xxhash (tmp); -} #endif +} static inline u8 * format_bihash_kvp_16_8 (u8 * s, va_list * args) diff --git a/src/vppinfra/bihash_24_8.h b/src/vppinfra/bihash_24_8.h index 655dab80..db77daa4 100644 --- a/src/vppinfra/bihash_24_8.h +++ b/src/vppinfra/bihash_24_8.h @@ -20,11 +20,11 @@ #ifndef __included_bihash_24_8_h__ #define __included_bihash_24_8_h__ +#include #include #include #include #include -#include typedef struct { @@ -44,7 +44,7 @@ clib_bihash_is_free_24_8 (const clib_bihash_kv_24_8_t * v) static inline u64 clib_bihash_hash_24_8 (const clib_bihash_kv_24_8_t * v) { -#if __SSE4_2__ +#ifdef clib_crc32c_uses_intrinsics return clib_crc32c ((u8 *) v->key, 24); #else u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2]; diff --git a/src/vppinfra/bihash_48_8.h b/src/vppinfra/bihash_48_8.h index 1a6e7691..48079e0a 100644 --- a/src/vppinfra/bihash_48_8.h +++ b/src/vppinfra/bihash_48_8.h @@ -21,6 +21,7 @@ #ifndef __included_bihash_48_8_h__ #define __included_bihash_48_8_h__ +#include #include #include #include @@ -41,48 +42,17 @@ clib_bihash_is_free_48_8 (const clib_bihash_kv_48_8_t * v) return 0; } -#if __SSE4_2__ -#ifndef __defined_crc_u32__ -#define __defined_crc_u32__ -static inline u32 -crc_u32 (u32 data, u32 value) -{ - __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data] - "rm" (data)); - return value; -} -#endif /* __defined_crc_u32__ */ - static inline u64 clib_bihash_hash_48_8 (const clib_bihash_kv_48_8_t * v) { - const u32 *dp = (const u32 *) &v->key[0]; - u32 value = 0; - - value = crc_u32 (dp[0], value); - value = crc_u32 (dp[1], value); - value = crc_u32 (dp[2], value); - value = crc_u32 (dp[3], value); - value = crc_u32 (dp[4], value); - value = crc_u32 (dp[5], value); - value = crc_u32 (dp[6], value); - value = crc_u32 (dp[7], value); - value = crc_u32 (dp[8], value); - value = crc_u32 (dp[9], value); - value = crc_u32 (dp[10], value); - value = crc_u32 (dp[11], value); - - return value; -} +#ifdef clib_crc32c_uses_intrinsics + return clib_crc32c ((u8 *) v->key, 48); #else -static inline u64 -clib_bihash_hash_48_8 (const clib_bihash_kv_48_8_t * v) -{ u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2] ^ v->key[3] ^ v->key[4] ^ v->key[5]; return clib_xxhash (tmp); -} #endif +} static inline u8 * format_bihash_kvp_48_8 (u8 * s, va_list * args) diff --git a/src/vppinfra/crc32.h b/src/vppinfra/crc32.h index abb2953f..5a47236a 100644 --- a/src/vppinfra/crc32.h +++ b/src/vppinfra/crc32.h @@ -17,6 +17,7 @@ #define __included_crc32_h__ #if __SSE4_2__ +#define clib_crc32c_uses_intrinsics #include static_always_inline u32 @@ -45,8 +46,31 @@ clib_crc32c (u8 * s, int len) return v; } -#endif +#elif __ARM_FEATURE_CRC32 +#define clib_crc32c_with_intrinsics +#include + +static_always_inline u32 +clib_crc32c (u8 * s, int len) +{ + u32 v = 0; + + for (; len >= 8; len -= 8, s += 8) + v = __crc32cd (v, *((u64 *) s)); + + for (; len >= 4; len -= 4, s += 4) + v = __crc32cw (v, *((u32 *) s)); + + for (; len >= 2; len -= 2, s += 2) + v = __crc32ch (v, *((u16 *) s)); + + for (; len >= 1; len -= 1, s += 1) + v = __crc32cb (v, *((u8 *) s)); + return v; +} + +#endif #endif /* __included_crc32_h__ */ /* -- cgit 1.2.3-korg From 026c036dd4158f9f517a8dc7c79e3ad10d126b50 Mon Sep 17 00:00:00 2001 From: Ole Troan Date: Wed, 14 Jun 2017 13:12:33 +0200 Subject: VPP-879 MAP: s/u32 is_add/u8 is_add in map.api Change-Id: If35171005e409f77bed4cc16eccb66a85aae5dfb Signed-off-by: Ole Troan --- src/vnet/map/map.api | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/vnet/map') diff --git a/src/vnet/map/map.api b/src/vnet/map/map.api index d68f13f0..4b142c8f 100644 --- a/src/vnet/map/map.api +++ b/src/vnet/map/map.api @@ -83,7 +83,7 @@ autoreply define map_add_del_rule u32 client_index; u32 context; u32 index; - u32 is_add; + u8 is_add; u8 ip6_dst[16]; u16 psid; }; -- cgit 1.2.3-korg From c08b0965d73da57ca4cd6c9dcfca1abaca870578 Mon Sep 17 00:00:00 2001 From: Ole Troan Date: Mon, 26 Jun 2017 18:12:37 +0200 Subject: VPP-889: MAP Stats API/CLI crashes when no domains. Change-Id: Ib7824bfc08cb3c8f20258379e1a1f2c159c4f687 Signed-off-by: Ole Troan --- src/vnet/map/map.c | 5 ++++- src/vnet/map/map_api.c | 7 +++++++ 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'src/vnet/map') diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index d246f6c8..fa13588f 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -1106,7 +1106,10 @@ show_map_stats_command_fn (vlib_main_t * vm, unformat_input_t * input, map_domain_t *d; int domains = 0, rules = 0, domaincount = 0, rulecount = 0; if (pool_elts (mm->domains) == 0) - vlib_cli_output (vm, "No MAP domains are configured..."); + { + vlib_cli_output (vm, "No MAP domains are configured..."); + return 0; + } /* *INDENT-OFF* */ pool_foreach(d, mm->domains, ({ diff --git a/src/vnet/map/map_api.c b/src/vnet/map/map_api.c index d618e7a6..994a64de 100644 --- a/src/vnet/map/map_api.c +++ b/src/vnet/map/map_api.c @@ -203,6 +203,12 @@ vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp) rmp->context = mp->context; rmp->retval = 0; + if (pool_elts (mm->domains) == 0) + { + rmp->retval = -1; + goto out; + } + memset (total_pkts, 0, sizeof (total_pkts)); memset (total_bytes, 0, sizeof (total_bytes)); @@ -239,6 +245,7 @@ vl_api_map_summary_stats_t_handler (vl_api_map_summary_stats_t * mp) clib_host_to_net_u64 (map_error_counter_get (ip4_map_node.index, MAP_ERROR_DECAP_SEC_CHECK)); +out: vl_msg_api_send_shmem (q, (u8 *) & rmp); } -- cgit 1.2.3-korg From 01e078fff983e89f1b402988473960d322d2b4a0 Mon Sep 17 00:00:00 2001 From: dongjuan Date: Thu, 7 Sep 2017 14:58:56 +0800 Subject: Initialize ip4_reass_lock of map_main to zero (VPP-975) Change-Id: I0f58100a944e36f5b530101178303d4595b3207f Signed-off-by: dongjuan --- src/vnet/map/map.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'src/vnet/map') diff --git a/src/vnet/map/map.c b/src/vnet/map/map.c index fa13588f..0c8cd6cd 100644 --- a/src/vnet/map/map.c +++ b/src/vnet/map/map.c @@ -2241,6 +2241,7 @@ map_init (vlib_main_t * vm) mm->ip4_reass_pool = 0; mm->ip4_reass_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + *mm->ip4_reass_lock = 0; mm->ip4_reass_conf_ht_ratio = MAP_IP4_REASS_HT_RATIO_DEFAULT; mm->ip4_reass_conf_lifetime_ms = MAP_IP4_REASS_LIFETIME_DEFAULT; mm->ip4_reass_conf_pool_size = MAP_IP4_REASS_POOL_SIZE_DEFAULT; @@ -2256,6 +2257,7 @@ map_init (vlib_main_t * vm) mm->ip6_reass_pool = 0; mm->ip6_reass_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + *mm->ip6_reass_lock = 0; mm->ip6_reass_conf_ht_ratio = MAP_IP6_REASS_HT_RATIO_DEFAULT; mm->ip6_reass_conf_lifetime_ms = MAP_IP6_REASS_LIFETIME_DEFAULT; mm->ip6_reass_conf_pool_size = MAP_IP6_REASS_POOL_SIZE_DEFAULT; -- cgit 1.2.3-korg