diff options
-rw-r--r-- | src/vnet/ipsec/ipsec.c | 35 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 23 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_cli.c | 8 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_format.c | 15 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_input.c | 252 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_output.c | 2 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd.h | 3 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd_policy.c | 27 | ||||
-rw-r--r-- | test/template_ipsec.py | 23 | ||||
-rw-r--r-- | test/test_ipsec_spd_flow_cache_input.py | 683 | ||||
-rw-r--r-- | test/test_ipsec_spd_flow_cache_output.py (renamed from test/test_ipsec_spd_flow_cache.py) | 0 |
11 files changed, 1011 insertions, 60 deletions
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 5cc8044e3d4..2749b04587b 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -31,6 +31,10 @@ */ #define IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) +/* Flow cache is sized for 1 million flows with a load factor of .25. + */ +#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) + ipsec_main_t ipsec_main; esp_async_post_next_t esp_encrypt_async_next; esp_async_post_next_t esp_decrypt_async_next; @@ -554,12 +558,18 @@ ipsec_init (vlib_main_t * vm) crypto_engine_backend_register_post_node (vm); im->ipsec4_out_spd_hash_tbl = NULL; - im->flow_cache_flag = 0; + im->output_flow_cache_flag = 0; im->ipsec4_out_spd_flow_cache_entries = 0; im->epoch_count = 0; im->ipsec4_out_spd_hash_num_buckets = IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS; + im->ipsec4_in_spd_hash_tbl = NULL; + im->input_flow_cache_flag = 0; + im->ipsec4_in_spd_flow_cache_entries = 0; + im->input_epoch_count = 0; + im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS; + return 0; } @@ -570,14 +580,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) { ipsec_main_t *im = &ipsec_main; unformat_input_t sub_input; + u32 ipsec4_out_spd_hash_num_buckets; + u32 ipsec4_in_spd_hash_num_buckets; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "ipv4-outbound-spd-flow-cache on")) - im->flow_cache_flag = 1; + im->output_flow_cache_flag = 1; else if (unformat (input, "ipv4-outbound-spd-flow-cache off")) - im->flow_cache_flag = 0; + im->output_flow_cache_flag = 0; else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d", &ipsec4_out_spd_hash_num_buckets)) { @@ -585,6 +597,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) im->ipsec4_out_spd_hash_num_buckets = 1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets); } + else if (unformat (input, "ipv4-inbound-spd-flow-cache on")) + im->input_flow_cache_flag = 1; + else if (unformat (input, "ipv4-inbound-spd-flow-cache off")) + im->input_flow_cache_flag = 0; + else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d", + &ipsec4_in_spd_hash_num_buckets)) + { + im->ipsec4_in_spd_hash_num_buckets = + 1ULL << max_log2 (ipsec4_in_spd_hash_num_buckets); + } else if (unformat (input, "ip4 %U", unformat_vlib_cli_sub_input, &sub_input)) { @@ -623,11 +645,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } - if (im->flow_cache_flag) + if (im->output_flow_cache_flag) { vec_add2 (im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_num_buckets); } + if (im->input_flow_cache_flag) + { + vec_add2 (im->ipsec4_in_spd_hash_tbl, im->ipsec4_in_spd_hash_tbl, + im->ipsec4_in_spd_hash_num_buckets); + } return 0; } diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 38feaed6f77..05d8484a28c 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -36,7 +36,7 @@ typedef clib_error_t *(*enable_disable_cb_t) (int is_enable); typedef struct { - u64 key[2]; + u64 key[2]; // 16 bytes u64 value; i32 bucket_lock; u32 un_used; @@ -54,6 +54,18 @@ typedef union ipsec4_hash_kv_16_8_t kv_16_8; } ipsec4_spd_5tuple_t; +typedef union +{ + struct + { + ip4_address_t ip4_src_addr; + ip4_address_t ip4_dest_addr; + ipsec_spd_policy_type_t policy_type; + u8 pad[4]; + }; // 16 bytes total + ipsec4_hash_kv_16_8_t kv_16_8; +} ipsec4_inbound_spd_tuple_t; + typedef struct { u8 *name; @@ -151,6 +163,7 @@ typedef struct uword *ipsec_if_by_sw_if_index; ipsec4_hash_kv_16_8_t *ipsec4_out_spd_hash_tbl; + ipsec4_hash_kv_16_8_t *ipsec4_in_spd_hash_tbl; clib_bihash_8_16_t tun4_protect_by_key; clib_bihash_24_16_t tun6_protect_by_key; @@ -223,9 +236,15 @@ typedef struct u32 ipsec4_out_spd_hash_num_buckets; u32 ipsec4_out_spd_flow_cache_entries; u32 epoch_count; + u8 output_flow_cache_flag; + + u32 ipsec4_in_spd_hash_num_buckets; + u32 ipsec4_in_spd_flow_cache_entries; + u32 input_epoch_count; + u8 input_flow_cache_flag; + u8 async_mode; u16 msg_id_base; - u8 flow_cache_flag; } ipsec_main_t; typedef enum ipsec_format_flags_t_ diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c index c48d6855169..e1263037c6c 100644 --- a/src/vnet/ipsec/ipsec_cli.c +++ b/src/vnet/ipsec/ipsec_cli.c @@ -428,9 +428,13 @@ ipsec_spd_show_all (vlib_main_t * vm, ipsec_main_t * im) vlib_cli_output(vm, "%U", format_ipsec_spd, spdi); } - if (im->flow_cache_flag) + if (im->output_flow_cache_flag) { - vlib_cli_output (vm, "%U", format_ipsec_spd_flow_cache); + vlib_cli_output (vm, "%U", format_ipsec_out_spd_flow_cache); + } + if (im->input_flow_cache_flag) + { + vlib_cli_output (vm, "%U", format_ipsec_in_spd_flow_cache); } /* *INDENT-ON* */ } diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c index 751d098bcdd..c8c0170efe7 100644 --- a/src/vnet/ipsec/ipsec_format.c +++ b/src/vnet/ipsec/ipsec_format.c @@ -232,17 +232,28 @@ done: } u8 * -format_ipsec_spd_flow_cache (u8 *s, va_list *args) +format_ipsec_out_spd_flow_cache (u8 *s, va_list *args) { ipsec_main_t *im = &ipsec_main; - s = format (s, "\nip4-outbound-spd-flow-cache-entries: %u", + s = format (s, "\nipv4-outbound-spd-flow-cache-entries: %u", im->ipsec4_out_spd_flow_cache_entries); return (s); } u8 * +format_ipsec_in_spd_flow_cache (u8 *s, va_list *args) +{ + ipsec_main_t *im = &ipsec_main; + + s = format (s, "\nipv4-inbound-spd-flow-cache-entries: %u", + im->ipsec4_in_spd_flow_cache_entries); + + return (s); +} + +u8 * format_ipsec_key (u8 * s, va_list * args) { ipsec_key_t *key = va_arg (*args, ipsec_key_t *); diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c index c47ea34f288..09166bccf5b 100644 --- a/src/vnet/ipsec/ipsec_input.c +++ b/src/vnet/ipsec/ipsec_input.c @@ -71,8 +71,86 @@ format_ipsec_input_trace (u8 * s, va_list * args) return s; } +always_inline void +ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da, + ipsec_spd_policy_type_t policy_type, + u32 pol_id) +{ + u64 hash; + u8 is_overwrite = 0, is_stale_overwrite = 0; + /* Store in network byte order to avoid conversion on lookup */ + ipsec4_inbound_spd_tuple_t ip4_tuple = { + .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa), + .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da), + .policy_type = policy_type + }; + + ip4_tuple.kv_16_8.value = + (((u64) pol_id) << 32) | ((u64) im->input_epoch_count); + + hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8); + hash &= (im->ipsec4_in_spd_hash_num_buckets - 1); + + ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + /* Check if we are overwriting an existing entry so we know + whether to increment the flow cache counter. Since flow + cache counter is reset on any policy add/remove, but + hash table values are not, we need to check if the entry + we are overwriting is stale or not. If it's a stale entry + overwrite, we still want to increment flow cache counter */ + is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0); + /* Check if we are overwriting a stale entry by comparing + with current epoch count */ + if (PREDICT_FALSE (is_overwrite)) + is_stale_overwrite = + (im->input_epoch_count != + ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF))); + clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8, + sizeof (ip4_tuple.kv_16_8)); + ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + + /* Increment the counter to track active flow cache entries + when entering a fresh entry or overwriting a stale one */ + if (!is_overwrite || is_stale_overwrite) + clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1); + + return; +} + always_inline ipsec_policy_t * -ipsec_input_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, +ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da, + ipsec_spd_policy_type_t policy_type) +{ + ipsec_policy_t *p = NULL; + ipsec4_hash_kv_16_8_t kv_result; + u64 hash; + ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa, + .ip4_dest_addr = (ip4_address_t) da, + .policy_type = policy_type }; + + hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8); + hash &= (im->ipsec4_in_spd_hash_num_buckets - 1); + + ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + kv_result = im->ipsec4_in_spd_hash_tbl[hash]; + ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + + if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8, + (u64 *) &kv_result)) + { + if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF))) + { + /* Get the policy based on the index */ + p = + pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32))); + } + } + + return p; +} + +always_inline ipsec_policy_t * +ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, ipsec_spd_policy_type_t policy_type) { ipsec_main_t *im = &ipsec_main; @@ -95,13 +173,18 @@ ipsec_input_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32)) continue; + if (im->input_flow_cache_flag) + { + /* Add an Entry in Flow cache */ + ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i); + } return p; } return 0; } always_inline ipsec_policy_t * -ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) +ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi) { ipsec_main_t *im = &ipsec_main; ipsec_policy_t *p; @@ -124,7 +207,7 @@ ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32)) continue; - return p; + goto return_policy; } if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32)) @@ -139,6 +222,14 @@ ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32)) continue; + return_policy: + if (im->input_flow_cache_flag) + { + /* Add an Entry in Flow cache */ + ipsec4_input_spd_add_flow_cache_entry ( + im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i); + } + return p; } return 0; @@ -225,6 +316,7 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, ipsec_spd_t *spd0; ipsec_policy_t *p0 = NULL; u8 has_space0; + bool search_flow_cache = false; if (n_left_from > 2) { @@ -252,13 +344,28 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, esp0 = (esp_header_t *) ((u8 *) esp0 + sizeof (udp_header_t)); } - p0 = ipsec_input_protect_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - clib_net_to_host_u32 - (esp0->spi)); + // if flow cache is enabled, first search through flow cache for a + // policy match for either protect, bypass or discard rules, in that + // order. if no match is found search_flow_cache is set to false (1) + // and we revert back to linear search + search_flow_cache = im->input_flow_cache_flag; + + esp_or_udp: + if (search_flow_cache) // attempt to match policy in flow cache + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT); + } + + else // linear search if flow cache is not enabled, + // or flow cache search just failed + { + p0 = ipsec_input_protect_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + clib_net_to_host_u32 (esp0->spi)); + } has_space0 = vlib_buffer_has_space (b[0], @@ -285,12 +392,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_bypassed += 1; @@ -308,12 +424,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_dropped += 1; @@ -332,6 +457,13 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; + // flow cache search failed, try again with linear search + if (search_flow_cache && p0 == NULL) + { + search_flow_cache = false; + goto esp_or_udp; + } + /* Drop by default if no match on PROTECT, BYPASS or DISCARD */ ipsec_unprocessed += 1; next[0] = IPSEC_INPUT_NEXT_DROP; @@ -354,13 +486,26 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH) { ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0)); - p0 = ipsec_input_protect_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - clib_net_to_host_u32 - (ah0->spi)); + + // if flow cache is enabled, first search through flow cache for a + // policy match and revert back to linear search on failure + search_flow_cache = im->input_flow_cache_flag; + + ah: + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT); + } + + else + { + p0 = ipsec_input_protect_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + clib_net_to_host_u32 (ah0->spi)); + } has_space0 = vlib_buffer_has_space (b[0], @@ -386,12 +531,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; } - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_bypassed += 1; @@ -409,12 +563,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_dropped += 1; @@ -433,6 +596,13 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; + // flow cache search failed, retry with linear search + if (search_flow_cache && p0 == NULL) + { + search_flow_cache = false; + goto ah; + } + /* Drop by default if no match on PROTECT, BYPASS or DISCARD */ ipsec_unprocessed += 1; next[0] = IPSEC_INPUT_NEXT_DROP; diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c index 84927debaca..6d6608d7a32 100644 --- a/src/vnet/ipsec/ipsec_output.c +++ b/src/vnet/ipsec/ipsec_output.c @@ -278,7 +278,7 @@ ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ipsec_spd_t *spd0 = 0; int bogus; u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0; - u8 flow_cache_enabled = im->flow_cache_flag; + u8 flow_cache_enabled = im->output_flow_cache_flag; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; diff --git a/src/vnet/ipsec/ipsec_spd.h b/src/vnet/ipsec/ipsec_spd.h index 5bfc6ae56f6..757a1b72d51 100644 --- a/src/vnet/ipsec/ipsec_spd.h +++ b/src/vnet/ipsec/ipsec_spd.h @@ -64,7 +64,8 @@ extern int ipsec_set_interface_spd (vlib_main_t * vm, extern u8 *format_ipsec_spd (u8 * s, va_list * args); -extern u8 *format_ipsec_spd_flow_cache (u8 *s, va_list *args); +extern u8 *format_ipsec_out_spd_flow_cache (u8 *s, va_list *args); +extern u8 *format_ipsec_in_spd_flow_cache (u8 *s, va_list *args); #endif /* __IPSEC_SPD_H__ */ diff --git a/src/vnet/ipsec/ipsec_spd_policy.c b/src/vnet/ipsec/ipsec_spd_policy.c index 36405bd6d9b..72da408c161 100644 --- a/src/vnet/ipsec/ipsec_spd_policy.c +++ b/src/vnet/ipsec/ipsec_spd_policy.c @@ -156,7 +156,7 @@ ipsec_add_del_policy (vlib_main_t * vm, if (!spd) return VNET_API_ERROR_SYSCALL_ERROR_1; - if (im->flow_cache_flag && !policy->is_ipv6 && + if (im->output_flow_cache_flag && !policy->is_ipv6 && policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) { /* @@ -179,6 +179,31 @@ ipsec_add_del_policy (vlib_main_t * vm, clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0); } + if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT || + policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS || + policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) && + im->input_flow_cache_flag && !policy->is_ipv6) + { + /* + * Flow cache entry is valid only when input_epoch_count value in control + * plane and data plane match. Otherwise, flow cache entry is considered + * stale. To avoid the race condition of using old input_epoch_count + * value in data plane after the roll over of input_epoch_count in + * control plane, entire flow cache is reset. + */ + if (im->input_epoch_count == 0xFFFFFFFF) + { + /* Reset all the entries in flow cache */ + clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0, + im->ipsec4_in_spd_hash_num_buckets * + (sizeof (*(im->ipsec4_in_spd_hash_tbl)))); + } + /* Increment epoch counter by 1 */ + clib_atomic_fetch_add_relax (&im->input_epoch_count, 1); + /* Reset spd flow cache counter since all old entries are stale */ + im->ipsec4_in_spd_flow_cache_entries = 0; + } + if (is_add) { u32 policy_index; diff --git a/test/template_ipsec.py b/test/template_ipsec.py index c2a14e36097..8105f0ca52d 100644 --- a/test/template_ipsec.py +++ b/test/template_ipsec.py @@ -1785,15 +1785,21 @@ class SpdFlowCacheTemplate(IPSecIPv4Fwd): def tearDown(self): super(SpdFlowCacheTemplate, self).tearDown() - def get_spd_flow_cache_entries(self): + def get_spd_flow_cache_entries(self, outbound): """ 'show ipsec spd' output: - ip4-outbound-spd-flow-cache-entries: 0 + ipv4-inbound-spd-flow-cache-entries: 0 + ipv4-outbound-spd-flow-cache-entries: 0 """ show_ipsec_reply = self.vapi.cli("show ipsec spd") # match the relevant section of 'show ipsec spd' output - regex_match = re.search( - 'ip4-outbound-spd-flow-cache-entries: (.*)', - show_ipsec_reply, re.DOTALL) + if(outbound): + regex_match = re.search( + 'ipv4-outbound-spd-flow-cache-entries: (.*)', + show_ipsec_reply, re.DOTALL) + else: + regex_match = re.search( + 'ipv4-inbound-spd-flow-cache-entries: (.*)', + show_ipsec_reply, re.DOTALL) if regex_match is None: raise Exception("Unable to find spd flow cache entries \ in \'show ipsec spd\' CLI output - regex failed to match") @@ -1807,7 +1813,12 @@ class SpdFlowCacheTemplate(IPSecIPv4Fwd): return num_entries def verify_num_outbound_flow_cache_entries(self, expected_elements): - self.assertEqual(self.get_spd_flow_cache_entries(), expected_elements) + self.assertEqual(self.get_spd_flow_cache_entries(outbound=True), + expected_elements) + + def verify_num_inbound_flow_cache_entries(self, expected_elements): + self.assertEqual(self.get_spd_flow_cache_entries(outbound=False), + expected_elements) def crc32_supported(self): # lscpu is part of util-linux package, available on all Linux Distros diff --git a/test/test_ipsec_spd_flow_cache_input.py b/test/test_ipsec_spd_flow_cache_input.py new file mode 100644 index 00000000000..2d70d1540b8 --- /dev/null +++ b/test/test_ipsec_spd_flow_cache_input.py @@ -0,0 +1,683 @@ +from os import remove +import socket +import unittest + +from util import ppp +from framework import VppTestRunner +from template_ipsec import SpdFlowCacheTemplate + + +class SpdFlowCacheInbound(SpdFlowCacheTemplate): + # Override setUpConstants to enable inbound flow cache in config + @classmethod + def setUpConstants(cls): + super(SpdFlowCacheInbound, cls).setUpConstants() + cls.vpp_cmdline.extend(["ipsec", "{", + "ipv4-inbound-spd-flow-cache on", + "}"]) + cls.logger.info("VPP modified cmdline is %s" % " " + .join(cls.vpp_cmdline)) + + +class IPSec4SpdTestCaseBypass(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (add bypass)""" + def test_ipsec_spd_inbound_bypass(self): + # In this test case, packets in IPv4 FWD path are configured + # to go through IPSec inbound SPD policy lookup. + # + # 2 inbound SPD rules (1 HIGH and 1 LOW) are added. + # - High priority rule action is set to DISCARD. + # - Low priority rule action is set to BYPASS. + # + # Since BYPASS rules take precedence over DISCARD + # (the order being PROTECT, BYPASS, DISCARD) we expect the + # BYPASS rule to match and traffic to be correctly forwarded. + self.create_interfaces(2) + pkt_count = 5 + + self.spd_create_and_intf_add(1, [self.pg1, self.pg0]) + + # create input rules + # bypass rule should take precedence over discard rule, + # even though it's lower priority + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + policy_1 = self.spd_add_rem_policy( # inbound, priority 15 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=15, policy_type="discard") + + # create output rule so we can capture forwarded packets + policy_2 = self.spd_add_rem_policy( # outbound, priority 10 + 1, self.pg0, self.pg1, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass") + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + # create the packet stream + packets = self.create_stream(self.pg0, self.pg1, pkt_count) + # add the stream to the source interface + self.pg0.add_stream(packets) + self.pg1.enable_capture() + self.pg_start() + + # check capture on pg1 + capture = self.pg1.get_capture() + for packet in capture: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error(ppp("Unexpected or invalid packet:", packet)) + raise + self.logger.debug("SPD: Num packets: %s", len(capture.res)) + + # verify captured packets + self.verify_capture(self.pg0, self.pg1, capture) + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(0, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # check input policy has been cached + self.verify_num_inbound_flow_cache_entries(1) + + +class IPSec4SpdTestCaseDiscard(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (add discard)""" + def test_ipsec_spd_inbound_discard(self): + # In this test case, packets in IPv4 FWD path are configured + # to go through IPSec inbound SPD policy lookup. + # 1 DISCARD rule is added, so all traffic should be dropped. + self.create_interfaces(2) + pkt_count = 5 + + self.spd_create_and_intf_add(1, [self.pg1, self.pg0]) + + # create input rule + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="discard") + + # create output rule so we can capture forwarded packets + policy_1 = self.spd_add_rem_policy( # outbound, priority 10 + 1, self.pg0, self.pg1, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass") + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + # create the packet stream + packets = self.create_stream(self.pg0, self.pg1, pkt_count) + # add the stream to the source interface + self.pg0.add_stream(packets) + self.pg1.enable_capture() + self.pg_start() + # inbound discard rule should have dropped traffic + self.pg1.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(0, policy_1) + # only inbound discard rule should have been cached + self.verify_num_inbound_flow_cache_entries(1) + + +class IPSec4SpdTestCaseRemove(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (remove bypass)""" + def test_ipsec_spd_inbound_remove(self): + # In this test case, packets in IPv4 FWD path are configured + # to go through IPSec inbound SPD policy lookup. + # + # 2 inbound SPD rules (1 HIGH and 1 LOW) are added. + # - High priority rule action is set to DISCARD. + # - Low priority rule action is set to BYPASS. + # + # Since BYPASS rules take precedence over DISCARD + # (the order being PROTECT, BYPASS, DISCARD) we expect the + # BYPASS rule to match and traffic to be correctly forwarded. + # + # The BYPASS rules is then removed, and we check that all traffic + # is now correctly dropped. + self.create_interfaces(2) + pkt_count = 5 + + self.spd_create_and_intf_add(1, [self.pg1, self.pg0]) + + # create input rules + # bypass rule should take precedence over discard rule, + # even though it's lower priority + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + policy_1 = self.spd_add_rem_policy( # inbound, priority 15 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=15, policy_type="discard") + + # create output rule so we can capture forwarded packets + policy_2 = self.spd_add_rem_policy( # outbound, priority 10 + 1, self.pg0, self.pg1, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass") + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + # create the packet stream + packets = self.create_stream(self.pg0, self.pg1, pkt_count) + # add the stream to the source interface + self.pg0.add_stream(packets) + self.pg1.enable_capture() + self.pg_start() + + # check capture on pg1 + capture = self.pg1.get_capture() + for packet in capture: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error(ppp("Unexpected or invalid packet:", packet)) + raise + self.logger.debug("SPD: Num packets: %s", len(capture.res)) + + # verify captured packets + self.verify_capture(self.pg0, self.pg1, capture) + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(0, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # check input policy has been cached + self.verify_num_inbound_flow_cache_entries(1) + + # remove the input bypass rule + self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass", + remove=True) + # verify flow cache counter has been reset by rule removal + self.verify_num_inbound_flow_cache_entries(0) + + # resend the same packets + self.pg0.add_stream(packets) + self.pg1.enable_capture() # flush the old capture + self.pg_start() + + # inbound discard rule should have dropped traffic + self.pg1.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # by removing the bypass rule, we should have reset the flow cache + # we only expect the discard rule to now be in the flow cache + self.verify_num_inbound_flow_cache_entries(1) + + +class IPSec4SpdTestCaseReadd(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (add, remove, re-add bypass)""" + def test_ipsec_spd_inbound_readd(self): + # In this test case, packets in IPv4 FWD path are configured + # to go through IPSec inbound SPD policy lookup. + # + # 2 inbound SPD rules (1 HIGH and 1 LOW) are added. + # - High priority rule action is set to DISCARD. + # - Low priority rule action is set to BYPASS. + # + # Since BYPASS rules take precedence over DISCARD + # (the order being PROTECT, BYPASS, DISCARD) we expect the + # BYPASS rule to match and traffic to be correctly forwarded. + # + # The BYPASS rules is then removed, and we check that all traffic + # is now correctly dropped. + # + # The BYPASS rule is then readded, checking traffic is not forwarded + # correctly again + self.create_interfaces(2) + pkt_count = 5 + + self.spd_create_and_intf_add(1, [self.pg1, self.pg0]) + + # create input rules + # bypass rule should take precedence over discard rule, + # even though it's lower priority + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + policy_1 = self.spd_add_rem_policy( # inbound, priority 15 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=15, policy_type="discard") + + # create output rule so we can capture forwarded packets + policy_2 = self.spd_add_rem_policy( # outbound, priority 10 + 1, self.pg0, self.pg1, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass") + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + # create the packet stream + packets = self.create_stream(self.pg0, self.pg1, pkt_count) + # add the stream to the source interface + self.pg0.add_stream(packets) + self.pg1.enable_capture() + self.pg_start() + + # check capture on pg1 + capture = self.pg1.get_capture() + for packet in capture: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error(ppp("Unexpected or invalid packet:", packet)) + raise + self.logger.debug("SPD: Num packets: %s", len(capture.res)) + + # verify captured packets + self.verify_capture(self.pg0, self.pg1, capture) + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(0, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # check input policy has been cached + self.verify_num_inbound_flow_cache_entries(1) + + # remove the input bypass rule + self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass", + remove=True) + # verify flow cache counter has been reset by rule removal + self.verify_num_inbound_flow_cache_entries(0) + + # resend the same packets + self.pg0.add_stream(packets) + self.pg1.enable_capture() # flush the old capture + self.pg_start() + + # inbound discard rule should have dropped traffic + self.pg1.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # by removing the bypass rule, flow cache was reset + # we only expect the discard rule to now be in the flow cache + self.verify_num_inbound_flow_cache_entries(1) + + # readd the input bypass rule + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # verify flow cache counter has been reset by rule addition + self.verify_num_inbound_flow_cache_entries(0) + + # resend the same packets + self.pg0.add_stream(packets) + self.pg1.enable_capture() # flush the old capture + self.pg_start() + + # check capture on pg1 + capture = self.pg1.get_capture() + for packet in capture: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error(ppp("Unexpected or invalid packet:", packet)) + raise + + # verify captured packets + self.verify_capture(self.pg0, self.pg1, capture) + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count*2, policy_2) + # by readding the bypass rule, we reset the flow cache + # we only expect the bypass rule to now be in the flow cache + self.verify_num_inbound_flow_cache_entries(1) + + +class IPSec4SpdTestCaseMultiple(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (multiple interfaces, multiple rules)""" + def test_ipsec_spd_inbound_multiple(self): + # In this test case, packets in IPv4 FWD path are configured to go + # through IPSec outbound SPD policy lookup. + # + # Multiples rules on multiple interfaces are tested at the same time. + # 3x interfaces are configured, binding the same SPD to each. + # Each interface has 1 SPD rule- 2x BYPASS and 1x DISCARD + # + # Traffic should be forwarded with destinations pg1 & pg2 + # and dropped to pg0. + self.create_interfaces(3) + pkt_count = 5 + # bind SPD to all interfaces + self.spd_create_and_intf_add(1, self.pg_interfaces) + # add input rules on all interfaces + # pg0 -> pg1 + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # pg1 -> pg2 + policy_1 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg2, self.pg1, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # pg2 -> pg0 + policy_2 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg0, self.pg2, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="discard") + + # create output rules covering the the full ip range + # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets + policy_3 = self.spd_add_rem_policy( # outbound, priority 10 + 1, self.pg0, self.pg0, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass", + all_ips=True) + + # check flow cache is empty (0 active elements) before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + + # create the packet streams + packets0 = self.create_stream(self.pg0, self.pg1, pkt_count) + packets1 = self.create_stream(self.pg1, self.pg2, pkt_count) + packets2 = self.create_stream(self.pg2, self.pg0, pkt_count) + # add the streams to the source interfaces + self.pg0.add_stream(packets0) + self.pg1.add_stream(packets1) + self.pg2.add_stream(packets2) + # enable capture on all interfaces + for pg in self.pg_interfaces: + pg.enable_capture() + # start the packet generator + self.pg_start() + + # get captures from ifs + if_caps = [] + for pg in [self.pg1, self.pg2]: # we are expecting captures on pg1/pg2 + if_caps.append(pg.get_capture()) + for packet in if_caps[-1]: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error( + ppp("Unexpected or invalid packet:", packet)) + raise + + # verify captures that matched BYPASS rules + self.verify_capture(self.pg0, self.pg1, if_caps[0]) + self.verify_capture(self.pg1, self.pg2, if_caps[1]) + # verify that traffic to pg0 matched DISCARD rule and was dropped + self.pg0.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # check flow/policy match was cached for: 3x input policies + self.verify_num_inbound_flow_cache_entries(3) + + +class IPSec4SpdTestCaseOverwriteStale(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (overwrite stale entries)""" + def test_ipsec_spd_inbound_overwrite(self): + # The operation of the flow cache is setup so that the entire cache + # is invalidated when adding or removing an SPD policy rule. + # For performance, old cache entries are not zero'd, but remain + # in the table as "stale" entries. If a flow matches a stale entry, + # and the epoch count does NOT match the current count, the entry + # is overwritten. + # In this test, 3 active rules are created and matched to enter + # them into the flow cache. + # A single entry is removed to invalidate the entire cache. + # We then readd the rule and test that overwriting of the previous + # stale entries occurs as expected, and that the flow cache entry + # counter is updated correctly. + self.create_interfaces(3) + pkt_count = 5 + # bind SPD to all interfaces + self.spd_create_and_intf_add(1, self.pg_interfaces) + # add input rules on all interfaces + # pg0 -> pg1 + policy_0 = self.spd_add_rem_policy( # inbound + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # pg1 -> pg2 + policy_1 = self.spd_add_rem_policy( # inbound + 1, self.pg2, self.pg1, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # pg2 -> pg0 + policy_2 = self.spd_add_rem_policy( # inbound + 1, self.pg0, self.pg2, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="discard") + + # create output rules covering the the full ip range + # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets + policy_3 = self.spd_add_rem_policy( # outbound + 1, self.pg0, self.pg0, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass", + all_ips=True) + + # check flow cache is empty (0 active elements) before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + + # create the packet streams + packets0 = self.create_stream(self.pg0, self.pg1, pkt_count) + packets1 = self.create_stream(self.pg1, self.pg2, pkt_count) + packets2 = self.create_stream(self.pg2, self.pg0, pkt_count) + # add the streams to the source interfaces + self.pg0.add_stream(packets0) + self.pg1.add_stream(packets1) + self.pg2.add_stream(packets2) + # enable capture on all interfaces + for pg in self.pg_interfaces: + pg.enable_capture() + # start the packet generator + self.pg_start() + + # get captures from ifs + if_caps = [] + for pg in [self.pg1, self.pg2]: # we are expecting captures on pg1/pg2 + if_caps.append(pg.get_capture()) + for packet in if_caps[-1]: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error( + ppp("Unexpected or invalid packet:", packet)) + raise + + # verify captures that matched BYPASS rules + self.verify_capture(self.pg0, self.pg1, if_caps[0]) + self.verify_capture(self.pg1, self.pg2, if_caps[1]) + # verify that traffic to pg0 matched DISCARD rule and was dropped + self.pg0.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count, policy_2) + # check flow/policy match was cached for: 3x input policies + self.verify_num_inbound_flow_cache_entries(3) + + # adding an outbound policy should not invalidate output flow cache + self.spd_add_rem_policy( # outbound + 1, self.pg0, self.pg0, socket.IPPROTO_UDP, + is_out=1, priority=1, policy_type="bypass", + all_ips=True) + # check inbound flow cache counter has not been reset + self.verify_num_inbound_flow_cache_entries(3) + + # remove + readd bypass policy - flow cache counter will be reset, + # and there will be 3x stale entries in flow cache + self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass", + remove=True) + # readd policy + policy_0 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + # check counter was reset + self.verify_num_inbound_flow_cache_entries(0) + + # resend the same packets + self.pg0.add_stream(packets0) + self.pg1.add_stream(packets1) + self.pg2.add_stream(packets2) + for pg in self.pg_interfaces: + pg.enable_capture() # flush previous captures + self.pg_start() + + # get captures from ifs + if_caps = [] + for pg in [self.pg1, self.pg2]: # we are expecting captures on pg1/pg2 + if_caps.append(pg.get_capture()) + for packet in if_caps[-1]: + try: + self.logger.debug(ppp("SPD Add - Got packet:", packet)) + except Exception: + self.logger.error( + ppp("Unexpected or invalid packet:", packet)) + raise + + # verify captures that matched BYPASS rules + self.verify_capture(self.pg0, self.pg1, if_caps[0]) + self.verify_capture(self.pg1, self.pg2, if_caps[1]) + # verify that traffic to pg0 matched DISCARD rule and was dropped + self.pg0.assert_nothing_captured() + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_0) + self.verify_policy_match(pkt_count*2, policy_1) + self.verify_policy_match(pkt_count*2, policy_2) + # we are overwriting 3x stale entries - check flow cache counter + # is correct + self.verify_num_inbound_flow_cache_entries(3) + + +class IPSec4SpdTestCaseCollision(SpdFlowCacheInbound): + """ IPSec/IPv4 inbound: Policy mode test case with flow cache \ + (hash collision)""" + # Override class setup to restrict hash table size to 16 buckets. + # This forces using only the lower 4 bits of the hash as a key, + # making hash collisions easy to find. + @classmethod + def setUpConstants(cls): + super(SpdFlowCacheInbound, cls).setUpConstants() + cls.vpp_cmdline.extend(["ipsec", "{", + "ipv4-inbound-spd-flow-cache on", + "ipv4-inbound-spd-hash-buckets 16", + "}"]) + cls.logger.info("VPP modified cmdline is %s" % " " + .join(cls.vpp_cmdline)) + + def test_ipsec_spd_inbound_collision(self): + # The flow cache operation is setup to overwrite an entry + # if a hash collision occurs. + # In this test, 2 packets are configured that result in a + # hash with the same lower 4 bits. + # After the first packet is received, there should be one + # active entry in the flow cache. + # After the second packet with the same lower 4 bit hash + # is received, this should overwrite the same entry. + # Therefore there will still be a total of one (1) entry, + # in the flow cache with two matching policies. + # crc32_supported() method is used to check cpu for crc32 + # intrinsic support for hashing. + # If crc32 is not supported, we fall back to clib_xxhash() + self.create_interfaces(4) + pkt_count = 5 + # bind SPD to all interfaces + self.spd_create_and_intf_add(1, self.pg_interfaces) + + # create output rules covering the the full ip range + # 0.0.0.0 -> 255.255.255.255, so we can capture forwarded packets + policy_0 = self.spd_add_rem_policy( # outbound + 1, self.pg0, self.pg0, socket.IPPROTO_UDP, + is_out=1, priority=10, policy_type="bypass", + all_ips=True) + + capture_intfs = [] + if self.crc32_supported(): # create crc32 collision on last 4 bits + hashed_with_crc32 = True + # add matching rules + policy_1 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg2, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + policy_2 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg3, self.pg0, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + + # we expect to get captures on pg1 + pg3 + capture_intfs.append(self.pg1) + capture_intfs.append(self.pg3) + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + + # create the packet streams + # packet hashes to: + # ad727628 + packets1 = self.create_stream(self.pg2, self.pg1, pkt_count, 1, 1) + # b5512898 + packets2 = self.create_stream(self.pg0, self.pg3, pkt_count, 1, 1) + # add the streams to the source interfaces + self.pg2.add_stream(packets1) + self.pg0.add_stream(packets2) + else: # create xxhash collision on last 4 bits + hashed_with_crc32 = False + # add matching rules + policy_1 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg1, self.pg2, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + policy_2 = self.spd_add_rem_policy( # inbound, priority 10 + 1, self.pg2, self.pg3, socket.IPPROTO_UDP, + is_out=0, priority=10, policy_type="bypass") + + capture_intfs.append(self.pg1) + capture_intfs.append(self.pg2) + + # check flow cache is empty before sending traffic + self.verify_num_inbound_flow_cache_entries(0) + + # create the packet streams + # 2f8f90f557eef12c + packets1 = self.create_stream(self.pg2, self.pg1, pkt_count, 1, 1) + # 6b7f9987719ffc1c + packets2 = self.create_stream(self.pg3, self.pg2, pkt_count, 1, 1) + # add the streams to the source interfaces + self.pg2.add_stream(packets1) + self.pg3.add_stream(packets2) + + # enable capture on interfaces we expect capture on & send pkts + for pg in capture_intfs: + pg.enable_capture() + self.pg_start() + + # get captures + if_caps = [] + for pg in capture_intfs: + if_caps.append(pg.get_capture()) + for packet in if_caps[-1]: + try: + self.logger.debug(ppp( + "SPD Add - Got packet:", packet)) + except Exception: + self.logger.error(ppp( + "Unexpected or invalid packet:", packet)) + raise + + # verify captures that matched BYPASS rule + if(hashed_with_crc32): + self.verify_capture(self.pg2, self.pg1, if_caps[0]) + self.verify_capture(self.pg0, self.pg3, if_caps[1]) + else: # hashed with xxhash + self.verify_capture(self.pg2, self.pg1, if_caps[0]) + self.verify_capture(self.pg3, self.pg2, if_caps[1]) + + # verify all policies matched the expected number of times + self.verify_policy_match(pkt_count, policy_1) + self.verify_policy_match(pkt_count, policy_2) + self.verify_policy_match(pkt_count*2, policy_0) # output policy + # we have matched 2 policies, but due to the hash collision + # one active entry is expected + self.verify_num_inbound_flow_cache_entries(1) + + +if __name__ == '__main__': + unittest.main(testRunner=VppTestRunner) diff --git a/test/test_ipsec_spd_flow_cache.py b/test/test_ipsec_spd_flow_cache_output.py index 54571c6741a..54571c6741a 100644 --- a/test/test_ipsec_spd_flow_cache.py +++ b/test/test_ipsec_spd_flow_cache_output.py |