diff options
-rw-r--r-- | src/plugins/kubeproxy/kp.c | 8 | ||||
-rw-r--r-- | src/plugins/kubeproxy/kp_cli.c | 35 | ||||
-rw-r--r-- | test/test_kubeproxy.py | 8 |
3 files changed, 47 insertions, 4 deletions
diff --git a/src/plugins/kubeproxy/kp.c b/src/plugins/kubeproxy/kp.c index 1a087e82073..b31b3171d90 100644 --- a/src/plugins/kubeproxy/kp.c +++ b/src/plugins/kubeproxy/kp.c @@ -747,8 +747,10 @@ int kp_vip_add(ip46_address_t *prefix, u8 plen, kp_vip_type_t type, if (ip46_prefix_is_ip4(prefix, plen) && (type != KP_VIP_TYPE_IP4_NAT44) && - (type != KP_VIP_TYPE_IP4_NAT46)) + (type != KP_VIP_TYPE_IP4_NAT46)) { + kp_put_writer_lock(); return VNET_API_ERROR_INVALID_ADDRESS_FAMILY; + } //Allocate @@ -785,8 +787,10 @@ int kp_vip_add(ip46_address_t *prefix, u8 plen, kp_vip_type_t type, //Create maping from nodeport to vip_index key = clib_host_to_net_u16(node_port); entry = hash_get_mem (kpm->nodeport_by_key, &key); - if (entry) + if (entry) { + kp_put_writer_lock(); return VNET_API_ERROR_VALUE_EXIST; + } key_copy = clib_mem_alloc (sizeof (*key_copy)); clib_memcpy (key_copy, &key, sizeof (*key_copy)); diff --git a/src/plugins/kubeproxy/kp_cli.c b/src/plugins/kubeproxy/kp_cli.c index 6a18834274e..43c5c51ae53 100644 --- a/src/plugins/kubeproxy/kp_cli.c +++ b/src/plugins/kubeproxy/kp_cli.c @@ -345,3 +345,38 @@ VLIB_CLI_COMMAND (kp_set_interface_nat4_command, static) = { .short_help = "kube-proxy set interface nat4 in <intfc> [del]", }; +static clib_error_t * +kp_flowtable_flush_command_fn(vlib_main_t * vm, + unformat_input_t * input, vlib_cli_command_t * cmd) +{ + u32 thread_index; + vlib_thread_main_t *tm = vlib_get_thread_main(); + kp_main_t *kpm = &kp_main; + + for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) { + kp_hash_t *h = kpm->per_cpu[thread_index].sticky_ht; + if (h != NULL) { + kp_hash_bucket_t *b; + u32 i; + kp_hash_foreach_entry(h, b, i) { + vlib_refcount_add(&kpm->pod_refcount, thread_index, b->value[i], -1); + vlib_refcount_add(&kpm->pod_refcount, thread_index, 0, 1); + } + + kp_hash_free(h); + kpm->per_cpu[thread_index].sticky_ht = NULL; + } + } + + return NULL; +} + +/* + * flush all kube-proxy flowtables + * This is indented for debug and unit-tests purposes only + */ +VLIB_CLI_COMMAND (kp_flowtable_flush_command, static) = { + .path = "test kube-proxy flowtable flush", + .short_help = "test kube-proxy flowtable flush", + .function = kp_flowtable_flush_command_fn, +}; diff --git a/test/test_kubeproxy.py b/test/test_kubeproxy.py index 418e03adb52..2398802b362 100644 --- a/test/test_kubeproxy.py +++ b/test/test_kubeproxy.py @@ -146,6 +146,7 @@ class TestKP(VppTestCase): for podid in self.pods: self.vapi.cli("ku pod 90.0.0.0/8 10.0.0.%u del" % (podid)) self.vapi.cli("ku vip 90.0.0.0/8 nat4 del") + self.vapi.cli("test kube-proxy flowtable flush") @unittest.skipUnless(running_extended_tests(), "part of extended tests") def test_kp_ip6_nat4(self): @@ -165,6 +166,7 @@ class TestKP(VppTestCase): for podid in self.pods: self.vapi.cli("ku pod 2001::/16 10.0.0.%u del" % (podid)) self.vapi.cli("ku vip 2001::/16 nat4 del") + self.vapi.cli("test kube-proxy flowtable flush") @unittest.skipUnless(running_extended_tests(), "part of extended tests") def test_kp_ip4_nat6(self): @@ -181,14 +183,15 @@ class TestKP(VppTestCase): self.checkCapture(nat4=False, isv4=True) finally: for podid in self.pods: - self.vapi.cli("ku pod 90.0.0.0/8 2002::%u" % (podid)) + self.vapi.cli("ku pod 90.0.0.0/8 2002::%u del" % (podid)) self.vapi.cli("ku vip 90.0.0.0/8 nat6 del") + self.vapi.cli("test kube-proxy flowtable flush") @unittest.skipUnless(running_extended_tests(), "part of extended tests") def test_kp_ip6_nat6(self): """ Kube-proxy NAT66 """ try: - self.vapi.cli("ku vip 90.0.0.0/8 port 3306 target_port 3307 nat6") + self.vapi.cli("ku vip 2001::/16 port 3306 target_port 3307 nat6") for podid in self.pods: self.vapi.cli("ku pod 2001::/16 2002::%u" % (podid)) @@ -201,3 +204,4 @@ class TestKP(VppTestCase): for podid in self.pods: self.vapi.cli("ku pod 2001::/16 2002::%u del" % (podid)) self.vapi.cli("ku vip 2001::/16 nat6 del") + self.vapi.cli("test kube-proxy flowtable flush") |