summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorin Coras <fcoras@cisco.com>2018-03-27 17:29:32 -0700
committerDave Barach <openvpp@barachs.net>2018-03-29 11:23:58 +0000
commit56b39f6a9e1e467b3e7413092e20882df3d59ced (patch)
treefdeab6ec42d7c3c1fbedce3d21fb48997795f902
parentcc5677b49695526c005b81bf2f0bfd320c9017f1 (diff)
tcp: fix fib index buffer tagging
Change-Id: I373cc252df3621d44879b8eca70aed17d7752a2a Signed-off-by: Florin Coras <fcoras@cisco.com>
-rw-r--r--src/vnet/session/application_namespace.c2
-rw-r--r--src/vnet/tcp/tcp_input.c1
-rw-r--r--src/vnet/tcp/tcp_output.c30
-rw-r--r--test/test_vcl.py44
4 files changed, 44 insertions, 33 deletions
diff --git a/src/vnet/session/application_namespace.c b/src/vnet/session/application_namespace.c
index 8bd5b65f7fc..ce44547a86d 100644
--- a/src/vnet/session/application_namespace.c
+++ b/src/vnet/session/application_namespace.c
@@ -81,7 +81,7 @@ vnet_app_namespace_add_del (vnet_app_namespace_add_del_args_t * a)
fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
a->sw_if_index);
a->ip6_fib_id =
- fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
+ fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP6,
a->sw_if_index);
}
if (a->sw_if_index == APP_NAMESPACE_INVALID_INDEX
diff --git a/src/vnet/tcp/tcp_input.c b/src/vnet/tcp/tcp_input.c
index 07f183891ea..3b37bcd52cd 100644
--- a/src/vnet/tcp/tcp_input.c
+++ b/src/vnet/tcp/tcp_input.c
@@ -2757,6 +2757,7 @@ tcp46_listen_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
child0->c_rmt_port = th0->src_port;
child0->c_is_ip4 = is_ip4;
child0->state = TCP_STATE_SYN_RCVD;
+ child0->c_fib_index = lc0->c_fib_index;
if (is_ip4)
{
diff --git a/src/vnet/tcp/tcp_output.c b/src/vnet/tcp/tcp_output.c
index 2b5a5cd73fe..53d8ab90dff 100644
--- a/src/vnet/tcp/tcp_output.c
+++ b/src/vnet/tcp/tcp_output.c
@@ -630,7 +630,7 @@ tcp_make_synack (tcp_connection_t * tc, vlib_buffer_t * b)
always_inline void
tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
- u8 is_ip4, u8 flush)
+ u8 is_ip4, u32 fib_index, u8 flush)
{
tcp_main_t *tm = vnet_get_tcp_main ();
u32 thread_index = vlib_get_thread_index ();
@@ -640,8 +640,8 @@ tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
b->error = 0;
- /* Default FIB for now */
- vnet_buffer (b)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b)->sw_if_index[VLIB_TX] = fib_index;
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = 0;
/* Send to IP lookup */
next_index = is_ip4 ? ip4_lookup_node.index : ip6_lookup_node.index;
@@ -667,16 +667,16 @@ tcp_enqueue_to_ip_lookup_i (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
always_inline void
tcp_enqueue_to_ip_lookup_now (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
- u8 is_ip4)
+ u8 is_ip4, u32 fib_index)
{
- tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 1);
+ tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, fib_index, 1);
}
always_inline void
tcp_enqueue_to_ip_lookup (vlib_main_t * vm, vlib_buffer_t * b, u32 bi,
- u8 is_ip4)
+ u8 is_ip4, u32 fib_index)
{
- tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, 0);
+ tcp_enqueue_to_ip_lookup_i (vm, b, bi, is_ip4, fib_index, 0);
}
always_inline void
@@ -814,7 +814,7 @@ void
tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
{
vlib_buffer_t *b;
- u32 bi;
+ u32 bi, sw_if_index, fib_index;
tcp_main_t *tm = vnet_get_tcp_main ();
vlib_main_t *vm = vlib_get_main ();
u8 tcp_hdr_len, flags = 0;
@@ -822,11 +822,15 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
u32 seq, ack;
ip4_header_t *ih4, *pkt_ih4;
ip6_header_t *ih6, *pkt_ih6;
+ fib_protocol_t fib_proto;
if (PREDICT_FALSE (tcp_get_free_buffer_index (tm, &bi)))
return;
b = vlib_get_buffer (vm, bi);
+ sw_if_index = vnet_buffer (pkt)->sw_if_index[VLIB_RX];
+ fib_proto = is_ip4 ? FIB_PROTOCOL_IP4 : FIB_PROTOCOL_IP6;
+ fib_index = fib_table_get_index_for_sw_if_index (fib_proto, sw_if_index);
tcp_init_buffer (vm, b);
/* Make and write options */
@@ -878,7 +882,7 @@ tcp_send_reset_w_pkt (tcp_connection_t * tc, vlib_buffer_t * pkt, u8 is_ip4)
ASSERT (!bogus);
}
- tcp_enqueue_to_ip_lookup_now (vm, b, bi, is_ip4);
+ tcp_enqueue_to_ip_lookup_now (vm, b, bi, is_ip4, fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
@@ -927,7 +931,7 @@ tcp_send_reset (tcp_connection_t * tc)
th->checksum = ip6_tcp_udp_icmp_compute_checksum (vm, b, ih6, &bogus);
ASSERT (!bogus);
}
- tcp_enqueue_to_ip_lookup_now (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_ip_lookup_now (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
TCP_EVT_DBG (TCP_EVT_RST_SENT, tc);
}
@@ -991,7 +995,7 @@ tcp_send_syn (tcp_connection_t * tc)
tc->rto_boff = 0;
tcp_push_ip_hdr (tm, tc, b);
- tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
TCP_EVT_DBG (TCP_EVT_SYN_SENT, tc);
}
@@ -1469,7 +1473,7 @@ tcp_timer_retransmit_handler_i (u32 index, u8 is_syn)
/* This goes straight to ipx_lookup. Retransmit timer set already */
tcp_push_ip_hdr (tm, tc, b);
- tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4);
+ tcp_enqueue_to_ip_lookup (vm, b, bi, tc->c_is_ip4, tc->c_fib_index);
}
/* Retransmit SYN-ACK */
else if (tc->state == TCP_STATE_SYN_RCVD)
@@ -1880,7 +1884,7 @@ tcp46_output_inline (vlib_main_t * vm,
#endif
vnet_buffer (b0)->sw_if_index[VLIB_RX] = 0;
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = tc0->c_fib_index;
b0->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
done:
diff --git a/test/test_vcl.py b/test/test_vcl.py
index b427c945383..c02f13f6990 100644
--- a/test/test_vcl.py
+++ b/test/test_vcl.py
@@ -7,7 +7,7 @@ import subprocess
import signal
from framework import VppTestCase, VppTestRunner, running_extended_tests, \
Worker
-from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
+from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath, DpoProto
class VCLAppWorker(Worker):
@@ -75,7 +75,7 @@ class VCLTestCase(VppTestCase):
self.vapi.session_enable_disable(is_enabled=1)
self.create_loopback_interfaces(range(2))
- table_id = 0
+ table_id = 1
for i in self.lo_interfaces:
i.admin_up()
@@ -89,22 +89,23 @@ class VCLTestCase(VppTestCase):
table_id += 1
# Configure namespaces
- self.vapi.app_namespace_add(namespace_id="0", secret=1234,
+ self.vapi.app_namespace_add(namespace_id="1", secret=1234,
sw_if_index=self.loop0.sw_if_index)
- self.vapi.app_namespace_add(namespace_id="1", secret=5678,
+ self.vapi.app_namespace_add(namespace_id="2", secret=5678,
sw_if_index=self.loop1.sw_if_index)
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
- nh_table_id=1)])
+ nh_table_id=2)], table_id=1)
ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
- nh_table_id=0)], table_id=1)
+ nh_table_id=1)], table_id=2)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
+ self.logger.debug(self.vapi.cli("show ip fib"))
def thru_host_stack_tear_down(self):
for i in self.lo_interfaces:
@@ -131,19 +132,21 @@ class VCLTestCase(VppTestCase):
table_id += 1
# Configure namespaces
- self.vapi.app_namespace_add(namespace_id="0", secret=1234,
+ self.vapi.app_namespace_add(namespace_id="1", secret=1234,
sw_if_index=self.loop0.sw_if_index)
- self.vapi.app_namespace_add(namespace_id="1", secret=5678,
+ self.vapi.app_namespace_add(namespace_id="2", secret=5678,
sw_if_index=self.loop1.sw_if_index)
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip6, 128,
- [VppRoutePath("0.0.0.0", 0xffffffff,
- nh_table_id=2)],
+ [VppRoutePath("::0", 0xffffffff,
+ nh_table_id=2,
+ proto=DpoProto.DPO_PROTO_IP6)],
table_id=1, is_ip6=1)
ip_t10 = VppIpRoute(self, self.loop0.local_ip6, 128,
- [VppRoutePath("0.0.0.0", 0xffffffff,
- nh_table_id=1)],
+ [VppRoutePath("::0", 0xffffffff,
+ nh_table_id=1,
+ proto=DpoProto.DPO_PROTO_IP6)],
table_id=2, is_ip6=1)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
@@ -162,7 +165,7 @@ class VCLTestCase(VppTestCase):
client_app, client_args):
self.env = {'VCL_API_PREFIX': self.shm_prefix,
'VCL_APP_SCOPE_GLOBAL': "true",
- 'VCL_APP_NAMESPACE_ID': "0",
+ 'VCL_APP_NAMESPACE_ID': "1",
'VCL_APP_NAMESPACE_SECRET': "1234"}
worker_server = VCLAppWorker(self.build_dir, server_app, server_args,
@@ -170,7 +173,7 @@ class VCLTestCase(VppTestCase):
worker_server.start()
self.sleep(0.2)
- self.env.update({'VCL_APP_NAMESPACE_ID': "1",
+ self.env.update({'VCL_APP_NAMESPACE_ID': "2",
'VCL_APP_NAMESPACE_SECRET': "5678"})
worker_client = VCLAppWorker(self.build_dir, client_app, client_args,
self.logger, self.env)
@@ -314,6 +317,7 @@ class VCLThruHostStackTestCase(VCLTestCase):
super(VCLThruHostStackTestCase, self).tearDown()
+ @unittest.skipUnless(running_extended_tests(), "part of extended tests")
def test_ldp_thru_host_stack_echo(self):
""" run LDP thru host stack echo test """
@@ -329,9 +333,9 @@ class VCLThruHostStackTestCase(VCLTestCase):
""" run VCL thru host stack echo test """
# TBD: Enable this when VPP thru host teardown config bug is fixed.
- # self.thru_host_stack_test("vcl_test_server", self.server_args,
- # "vcl_test_client",
- # self.client_echo_test_args)
+ self.thru_host_stack_test("vcl_test_server", self.server_args,
+ "vcl_test_client",
+ self.client_echo_test_args)
# TBD: Remove VCLThruHostStackExtended*TestCase classes and move
# tests here when VPP thru host teardown/setup config bug
@@ -617,6 +621,7 @@ class VCLIpv6ThruHostStackTestCase(VCLTestCase):
super(VCLIpv6ThruHostStackTestCase, self).tearDown()
+ @unittest.skipUnless(running_extended_tests(), "part of extended tests")
def test_ldp_ipv6_thru_host_stack_echo(self):
""" run LDP IPv6 thru host stack echo test """
@@ -634,8 +639,8 @@ class VCLIpv6ThruHostStackTestCase(VCLTestCase):
# TBD: Enable this when VPP IPv6 thru host teardown
# config bug is fixed.
# self.thru_host_stack_test("vcl_test_server", self.server_ipv6_args,
- # "vcl_test_client",
- # self.client_ipv6_echo_test_args)
+ # "vcl_test_client",
+ # self.client_ipv6_echo_test_args)
# TBD: Remove VCLIpv6ThruHostStackExtended*TestCase classes and move
# tests here when VPP thru host teardown/setup config bug
@@ -796,6 +801,7 @@ class VCLIpv6ThruHostStackIperfTestCase(VCLTestCase):
super(VCLIpv6ThruHostStackIperfTestCase, self).tearDown()
+ @unittest.skipUnless(running_extended_tests(), "part of extended tests")
def test_ldp_thru_host_stack_iperf3(self):
""" run LDP thru host stack iperf3 test """