diff options
author | Zachary Leaf <zachary.leaf@arm.com> | 2021-06-25 08:11:15 -0500 |
---|---|---|
committer | Fan Zhang <roy.fan.zhang@intel.com> | 2022-04-14 12:46:51 +0000 |
commit | 7cd35f5d688d9e3bddf66602655274dae944b086 (patch) | |
tree | a379d214f3036cecf5d13fe94f65dd4ba85c73f5 /src/vnet/ipsec | |
parent | e1fd3903efe38880a45687299a414b1516994955 (diff) |
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy
Database (SPD) lookup performance. By enabling the flow cache in startup
conf, this replaces a linear O(N) SPD search, with an O(1) hash table
search.
This patch is the ipsec4_input_node counterpart to
https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code,
theory and mechanism of action.
Details about the flow cache:
Mechanism:
1. First packet of a flow will undergo linear search in SPD
table. Once a policy match is found, a new entry will be added
into the flow cache. From 2nd packet onwards, the policy lookup
will happen in flow cache.
2. The flow cache is implemented using a hash table without collision
handling. This will avoid the logic to age out or recycle the old
flows in flow cache. Whenever a collision occurs, the old entry
will be overwritten by the new entry. Worst case is when all the
256 packets in a batch result in collision, falling back to linear
search. Average and best case will be O(1).
3. The size of flow cache is fixed and decided based on the number
of flows to be supported. The default is set to 1 million flows,
but is configurable by a startup.conf option.
4. Whenever a SPD rule is added/deleted by the control plane, all
current flow cache entries will be invalidated. As the SPD API is
not mp-safe, the data plane will wait for the control plane
operation to complete.
Cache invalidation is via an epoch counter that is incremented on
policy add/del and stored with each entry in the flow cache. If the
epoch counter in the flow cache does not match the current count,
the entry is considered stale, and we fall back to linear search.
The following configurable options are available through startup
conf under the ipsec{} entry:
1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache
(default off)
2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets
(default 4,194,304: ~1 million flows with 25% load factor)
Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass,
94B (null encrypted packet) for different SPD policy matching indices:
SPD Policy index : 2 10 100 1000
Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps
(Baseline/Optimized)
ARM TX2 : 300/290 230/290 70/290 8.5/290
Type: improvement
Signed-off-by: Zachary Leaf <zachary.leaf@arm.com>
Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com>
Tested-by: Jieqiang Wang <jieqiang.wang@arm.com>
Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
Diffstat (limited to 'src/vnet/ipsec')
-rw-r--r-- | src/vnet/ipsec/ipsec.c | 35 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 23 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_cli.c | 8 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_format.c | 15 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_input.c | 252 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_output.c | 2 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd.h | 3 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd_policy.c | 27 |
8 files changed, 311 insertions, 54 deletions
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 5cc8044e3d4..2749b04587b 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -31,6 +31,10 @@ */ #define IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) +/* Flow cache is sized for 1 million flows with a load factor of .25. + */ +#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22) + ipsec_main_t ipsec_main; esp_async_post_next_t esp_encrypt_async_next; esp_async_post_next_t esp_decrypt_async_next; @@ -554,12 +558,18 @@ ipsec_init (vlib_main_t * vm) crypto_engine_backend_register_post_node (vm); im->ipsec4_out_spd_hash_tbl = NULL; - im->flow_cache_flag = 0; + im->output_flow_cache_flag = 0; im->ipsec4_out_spd_flow_cache_entries = 0; im->epoch_count = 0; im->ipsec4_out_spd_hash_num_buckets = IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS; + im->ipsec4_in_spd_hash_tbl = NULL; + im->input_flow_cache_flag = 0; + im->ipsec4_in_spd_flow_cache_entries = 0; + im->input_epoch_count = 0; + im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS; + return 0; } @@ -570,14 +580,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) { ipsec_main_t *im = &ipsec_main; unformat_input_t sub_input; + u32 ipsec4_out_spd_hash_num_buckets; + u32 ipsec4_in_spd_hash_num_buckets; while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) { if (unformat (input, "ipv4-outbound-spd-flow-cache on")) - im->flow_cache_flag = 1; + im->output_flow_cache_flag = 1; else if (unformat (input, "ipv4-outbound-spd-flow-cache off")) - im->flow_cache_flag = 0; + im->output_flow_cache_flag = 0; else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d", &ipsec4_out_spd_hash_num_buckets)) { @@ -585,6 +597,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) im->ipsec4_out_spd_hash_num_buckets = 1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets); } + else if (unformat (input, "ipv4-inbound-spd-flow-cache on")) + im->input_flow_cache_flag = 1; + else if (unformat (input, "ipv4-inbound-spd-flow-cache off")) + im->input_flow_cache_flag = 0; + else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d", + &ipsec4_in_spd_hash_num_buckets)) + { + im->ipsec4_in_spd_hash_num_buckets = + 1ULL << max_log2 (ipsec4_in_spd_hash_num_buckets); + } else if (unformat (input, "ip4 %U", unformat_vlib_cli_sub_input, &sub_input)) { @@ -623,11 +645,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input) return clib_error_return (0, "unknown input `%U'", format_unformat_error, input); } - if (im->flow_cache_flag) + if (im->output_flow_cache_flag) { vec_add2 (im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_num_buckets); } + if (im->input_flow_cache_flag) + { + vec_add2 (im->ipsec4_in_spd_hash_tbl, im->ipsec4_in_spd_hash_tbl, + im->ipsec4_in_spd_hash_num_buckets); + } return 0; } diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 38feaed6f77..05d8484a28c 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -36,7 +36,7 @@ typedef clib_error_t *(*enable_disable_cb_t) (int is_enable); typedef struct { - u64 key[2]; + u64 key[2]; // 16 bytes u64 value; i32 bucket_lock; u32 un_used; @@ -54,6 +54,18 @@ typedef union ipsec4_hash_kv_16_8_t kv_16_8; } ipsec4_spd_5tuple_t; +typedef union +{ + struct + { + ip4_address_t ip4_src_addr; + ip4_address_t ip4_dest_addr; + ipsec_spd_policy_type_t policy_type; + u8 pad[4]; + }; // 16 bytes total + ipsec4_hash_kv_16_8_t kv_16_8; +} ipsec4_inbound_spd_tuple_t; + typedef struct { u8 *name; @@ -151,6 +163,7 @@ typedef struct uword *ipsec_if_by_sw_if_index; ipsec4_hash_kv_16_8_t *ipsec4_out_spd_hash_tbl; + ipsec4_hash_kv_16_8_t *ipsec4_in_spd_hash_tbl; clib_bihash_8_16_t tun4_protect_by_key; clib_bihash_24_16_t tun6_protect_by_key; @@ -223,9 +236,15 @@ typedef struct u32 ipsec4_out_spd_hash_num_buckets; u32 ipsec4_out_spd_flow_cache_entries; u32 epoch_count; + u8 output_flow_cache_flag; + + u32 ipsec4_in_spd_hash_num_buckets; + u32 ipsec4_in_spd_flow_cache_entries; + u32 input_epoch_count; + u8 input_flow_cache_flag; + u8 async_mode; u16 msg_id_base; - u8 flow_cache_flag; } ipsec_main_t; typedef enum ipsec_format_flags_t_ diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c index c48d6855169..e1263037c6c 100644 --- a/src/vnet/ipsec/ipsec_cli.c +++ b/src/vnet/ipsec/ipsec_cli.c @@ -428,9 +428,13 @@ ipsec_spd_show_all (vlib_main_t * vm, ipsec_main_t * im) vlib_cli_output(vm, "%U", format_ipsec_spd, spdi); } - if (im->flow_cache_flag) + if (im->output_flow_cache_flag) { - vlib_cli_output (vm, "%U", format_ipsec_spd_flow_cache); + vlib_cli_output (vm, "%U", format_ipsec_out_spd_flow_cache); + } + if (im->input_flow_cache_flag) + { + vlib_cli_output (vm, "%U", format_ipsec_in_spd_flow_cache); } /* *INDENT-ON* */ } diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c index 751d098bcdd..c8c0170efe7 100644 --- a/src/vnet/ipsec/ipsec_format.c +++ b/src/vnet/ipsec/ipsec_format.c @@ -232,17 +232,28 @@ done: } u8 * -format_ipsec_spd_flow_cache (u8 *s, va_list *args) +format_ipsec_out_spd_flow_cache (u8 *s, va_list *args) { ipsec_main_t *im = &ipsec_main; - s = format (s, "\nip4-outbound-spd-flow-cache-entries: %u", + s = format (s, "\nipv4-outbound-spd-flow-cache-entries: %u", im->ipsec4_out_spd_flow_cache_entries); return (s); } u8 * +format_ipsec_in_spd_flow_cache (u8 *s, va_list *args) +{ + ipsec_main_t *im = &ipsec_main; + + s = format (s, "\nipv4-inbound-spd-flow-cache-entries: %u", + im->ipsec4_in_spd_flow_cache_entries); + + return (s); +} + +u8 * format_ipsec_key (u8 * s, va_list * args) { ipsec_key_t *key = va_arg (*args, ipsec_key_t *); diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c index c47ea34f288..09166bccf5b 100644 --- a/src/vnet/ipsec/ipsec_input.c +++ b/src/vnet/ipsec/ipsec_input.c @@ -71,8 +71,86 @@ format_ipsec_input_trace (u8 * s, va_list * args) return s; } +always_inline void +ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da, + ipsec_spd_policy_type_t policy_type, + u32 pol_id) +{ + u64 hash; + u8 is_overwrite = 0, is_stale_overwrite = 0; + /* Store in network byte order to avoid conversion on lookup */ + ipsec4_inbound_spd_tuple_t ip4_tuple = { + .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa), + .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da), + .policy_type = policy_type + }; + + ip4_tuple.kv_16_8.value = + (((u64) pol_id) << 32) | ((u64) im->input_epoch_count); + + hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8); + hash &= (im->ipsec4_in_spd_hash_num_buckets - 1); + + ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + /* Check if we are overwriting an existing entry so we know + whether to increment the flow cache counter. Since flow + cache counter is reset on any policy add/remove, but + hash table values are not, we need to check if the entry + we are overwriting is stale or not. If it's a stale entry + overwrite, we still want to increment flow cache counter */ + is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0); + /* Check if we are overwriting a stale entry by comparing + with current epoch count */ + if (PREDICT_FALSE (is_overwrite)) + is_stale_overwrite = + (im->input_epoch_count != + ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF))); + clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8, + sizeof (ip4_tuple.kv_16_8)); + ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + + /* Increment the counter to track active flow cache entries + when entering a fresh entry or overwriting a stale one */ + if (!is_overwrite || is_stale_overwrite) + clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1); + + return; +} + always_inline ipsec_policy_t * -ipsec_input_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, +ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da, + ipsec_spd_policy_type_t policy_type) +{ + ipsec_policy_t *p = NULL; + ipsec4_hash_kv_16_8_t kv_result; + u64 hash; + ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa, + .ip4_dest_addr = (ip4_address_t) da, + .policy_type = policy_type }; + + hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8); + hash &= (im->ipsec4_in_spd_hash_num_buckets - 1); + + ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + kv_result = im->ipsec4_in_spd_hash_tbl[hash]; + ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock); + + if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8, + (u64 *) &kv_result)) + { + if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF))) + { + /* Get the policy based on the index */ + p = + pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32))); + } + } + + return p; +} + +always_inline ipsec_policy_t * +ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, ipsec_spd_policy_type_t policy_type) { ipsec_main_t *im = &ipsec_main; @@ -95,13 +173,18 @@ ipsec_input_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32)) continue; + if (im->input_flow_cache_flag) + { + /* Add an Entry in Flow cache */ + ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i); + } return p; } return 0; } always_inline ipsec_policy_t * -ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) +ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi) { ipsec_main_t *im = &ipsec_main; ipsec_policy_t *p; @@ -124,7 +207,7 @@ ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32)) continue; - return p; + goto return_policy; } if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32)) @@ -139,6 +222,14 @@ ipsec_input_protect_policy_match (ipsec_spd_t * spd, u32 sa, u32 da, u32 spi) if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32)) continue; + return_policy: + if (im->input_flow_cache_flag) + { + /* Add an Entry in Flow cache */ + ipsec4_input_spd_add_flow_cache_entry ( + im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i); + } + return p; } return 0; @@ -225,6 +316,7 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, ipsec_spd_t *spd0; ipsec_policy_t *p0 = NULL; u8 has_space0; + bool search_flow_cache = false; if (n_left_from > 2) { @@ -252,13 +344,28 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, esp0 = (esp_header_t *) ((u8 *) esp0 + sizeof (udp_header_t)); } - p0 = ipsec_input_protect_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - clib_net_to_host_u32 - (esp0->spi)); + // if flow cache is enabled, first search through flow cache for a + // policy match for either protect, bypass or discard rules, in that + // order. if no match is found search_flow_cache is set to false (1) + // and we revert back to linear search + search_flow_cache = im->input_flow_cache_flag; + + esp_or_udp: + if (search_flow_cache) // attempt to match policy in flow cache + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT); + } + + else // linear search if flow cache is not enabled, + // or flow cache search just failed + { + p0 = ipsec_input_protect_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + clib_net_to_host_u32 (esp0->spi)); + } has_space0 = vlib_buffer_has_space (b[0], @@ -285,12 +392,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_bypassed += 1; @@ -308,12 +424,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_dropped += 1; @@ -332,6 +457,13 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; + // flow cache search failed, try again with linear search + if (search_flow_cache && p0 == NULL) + { + search_flow_cache = false; + goto esp_or_udp; + } + /* Drop by default if no match on PROTECT, BYPASS or DISCARD */ ipsec_unprocessed += 1; next[0] = IPSEC_INPUT_NEXT_DROP; @@ -354,13 +486,26 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH) { ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0)); - p0 = ipsec_input_protect_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - clib_net_to_host_u32 - (ah0->spi)); + + // if flow cache is enabled, first search through flow cache for a + // policy match and revert back to linear search on failure + search_flow_cache = im->input_flow_cache_flag; + + ah: + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT); + } + + else + { + p0 = ipsec_input_protect_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + clib_net_to_host_u32 (ah0->spi)); + } has_space0 = vlib_buffer_has_space (b[0], @@ -386,12 +531,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; } - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_bypassed += 1; @@ -409,12 +563,21 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; - p0 = ipsec_input_policy_match (spd0, - clib_net_to_host_u32 - (ip0->src_address.as_u32), - clib_net_to_host_u32 - (ip0->dst_address.as_u32), - IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + if (search_flow_cache) + { + p0 = ipsec4_input_spd_find_flow_cache_entry ( + im, ip0->src_address.as_u32, ip0->dst_address.as_u32, + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + + else + { + p0 = ipsec_input_policy_match ( + spd0, clib_net_to_host_u32 (ip0->src_address.as_u32), + clib_net_to_host_u32 (ip0->dst_address.as_u32), + IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD); + } + if (PREDICT_TRUE ((p0 != NULL))) { ipsec_dropped += 1; @@ -433,6 +596,13 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm, pi0 = ~0; }; + // flow cache search failed, retry with linear search + if (search_flow_cache && p0 == NULL) + { + search_flow_cache = false; + goto ah; + } + /* Drop by default if no match on PROTECT, BYPASS or DISCARD */ ipsec_unprocessed += 1; next[0] = IPSEC_INPUT_NEXT_DROP; diff --git a/src/vnet/ipsec/ipsec_output.c b/src/vnet/ipsec/ipsec_output.c index 84927debaca..6d6608d7a32 100644 --- a/src/vnet/ipsec/ipsec_output.c +++ b/src/vnet/ipsec/ipsec_output.c @@ -278,7 +278,7 @@ ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ipsec_spd_t *spd0 = 0; int bogus; u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0; - u8 flow_cache_enabled = im->flow_cache_flag; + u8 flow_cache_enabled = im->output_flow_cache_flag; from = vlib_frame_vector_args (from_frame); n_left_from = from_frame->n_vectors; diff --git a/src/vnet/ipsec/ipsec_spd.h b/src/vnet/ipsec/ipsec_spd.h index 5bfc6ae56f6..757a1b72d51 100644 --- a/src/vnet/ipsec/ipsec_spd.h +++ b/src/vnet/ipsec/ipsec_spd.h @@ -64,7 +64,8 @@ extern int ipsec_set_interface_spd (vlib_main_t * vm, extern u8 *format_ipsec_spd (u8 * s, va_list * args); -extern u8 *format_ipsec_spd_flow_cache (u8 *s, va_list *args); +extern u8 *format_ipsec_out_spd_flow_cache (u8 *s, va_list *args); +extern u8 *format_ipsec_in_spd_flow_cache (u8 *s, va_list *args); #endif /* __IPSEC_SPD_H__ */ diff --git a/src/vnet/ipsec/ipsec_spd_policy.c b/src/vnet/ipsec/ipsec_spd_policy.c index 36405bd6d9b..72da408c161 100644 --- a/src/vnet/ipsec/ipsec_spd_policy.c +++ b/src/vnet/ipsec/ipsec_spd_policy.c @@ -156,7 +156,7 @@ ipsec_add_del_policy (vlib_main_t * vm, if (!spd) return VNET_API_ERROR_SYSCALL_ERROR_1; - if (im->flow_cache_flag && !policy->is_ipv6 && + if (im->output_flow_cache_flag && !policy->is_ipv6 && policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) { /* @@ -179,6 +179,31 @@ ipsec_add_del_policy (vlib_main_t * vm, clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0); } + if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT || + policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS || + policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) && + im->input_flow_cache_flag && !policy->is_ipv6) + { + /* + * Flow cache entry is valid only when input_epoch_count value in control + * plane and data plane match. Otherwise, flow cache entry is considered + * stale. To avoid the race condition of using old input_epoch_count + * value in data plane after the roll over of input_epoch_count in + * control plane, entire flow cache is reset. + */ + if (im->input_epoch_count == 0xFFFFFFFF) + { + /* Reset all the entries in flow cache */ + clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0, + im->ipsec4_in_spd_hash_num_buckets * + (sizeof (*(im->ipsec4_in_spd_hash_tbl)))); + } + /* Increment epoch counter by 1 */ + clib_atomic_fetch_add_relax (&im->input_epoch_count, 1); + /* Reset spd flow cache counter since all old entries are stale */ + im->ipsec4_in_spd_flow_cache_entries = 0; + } + if (is_add) { u32 policy_index; |