aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec.c
diff options
context:
space:
mode:
authorZachary Leaf <zachary.leaf@arm.com>2021-06-25 08:11:15 -0500
committerFan Zhang <roy.fan.zhang@intel.com>2022-04-14 12:46:51 +0000
commit7cd35f5d688d9e3bddf66602655274dae944b086 (patch)
treea379d214f3036cecf5d13fe94f65dd4ba85c73f5 /src/vnet/ipsec/ipsec.c
parente1fd3903efe38880a45687299a414b1516994955 (diff)
ipsec: perf improvement of ipsec4_input_node using flow cache
Adding flow cache support to improve inbound IPv4/IPSec Security Policy Database (SPD) lookup performance. By enabling the flow cache in startup conf, this replaces a linear O(N) SPD search, with an O(1) hash table search. This patch is the ipsec4_input_node counterpart to https://gerrit.fd.io/r/c/vpp/+/31694, and shares much of the same code, theory and mechanism of action. Details about the flow cache: Mechanism: 1. First packet of a flow will undergo linear search in SPD table. Once a policy match is found, a new entry will be added into the flow cache. From 2nd packet onwards, the policy lookup will happen in flow cache. 2. The flow cache is implemented using a hash table without collision handling. This will avoid the logic to age out or recycle the old flows in flow cache. Whenever a collision occurs, the old entry will be overwritten by the new entry. Worst case is when all the 256 packets in a batch result in collision, falling back to linear search. Average and best case will be O(1). 3. The size of flow cache is fixed and decided based on the number of flows to be supported. The default is set to 1 million flows, but is configurable by a startup.conf option. 4. Whenever a SPD rule is added/deleted by the control plane, all current flow cache entries will be invalidated. As the SPD API is not mp-safe, the data plane will wait for the control plane operation to complete. Cache invalidation is via an epoch counter that is incremented on policy add/del and stored with each entry in the flow cache. If the epoch counter in the flow cache does not match the current count, the entry is considered stale, and we fall back to linear search. The following configurable options are available through startup conf under the ipsec{} entry: 1. ipv4-inbound-spd-flow-cache on/off - enable SPD flow cache (default off) 2. ipv4-inbound-spd-hash-buckets %d - set number of hash buckets (default 4,194,304: ~1 million flows with 25% load factor) Performance with 1 core, 1 ESP Tunnel, null-decrypt then bypass, 94B (null encrypted packet) for different SPD policy matching indices: SPD Policy index : 2 10 100 1000 Throughput : Mbps/Mbps Mbps/Mbps Mbps/Mbps Mbps/Mbps (Baseline/Optimized) ARM TX2 : 300/290 230/290 70/290 8.5/290 Type: improvement Signed-off-by: Zachary Leaf <zachary.leaf@arm.com> Signed-off-by: mgovind <govindarajan.Mohandoss@arm.com> Tested-by: Jieqiang Wang <jieqiang.wang@arm.com> Change-Id: I8be2ad4715accbb335c38cd933904119db75827b
Diffstat (limited to 'src/vnet/ipsec/ipsec.c')
-rw-r--r--src/vnet/ipsec/ipsec.c35
1 files changed, 31 insertions, 4 deletions
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index 5cc8044e3d4..2749b04587b 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -31,6 +31,10 @@
*/
#define IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22)
+/* Flow cache is sized for 1 million flows with a load factor of .25.
+ */
+#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22)
+
ipsec_main_t ipsec_main;
esp_async_post_next_t esp_encrypt_async_next;
esp_async_post_next_t esp_decrypt_async_next;
@@ -554,12 +558,18 @@ ipsec_init (vlib_main_t * vm)
crypto_engine_backend_register_post_node (vm);
im->ipsec4_out_spd_hash_tbl = NULL;
- im->flow_cache_flag = 0;
+ im->output_flow_cache_flag = 0;
im->ipsec4_out_spd_flow_cache_entries = 0;
im->epoch_count = 0;
im->ipsec4_out_spd_hash_num_buckets =
IPSEC4_OUT_SPD_DEFAULT_HASH_NUM_BUCKETS;
+ im->ipsec4_in_spd_hash_tbl = NULL;
+ im->input_flow_cache_flag = 0;
+ im->ipsec4_in_spd_flow_cache_entries = 0;
+ im->input_epoch_count = 0;
+ im->ipsec4_in_spd_hash_num_buckets = IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS;
+
return 0;
}
@@ -570,14 +580,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
{
ipsec_main_t *im = &ipsec_main;
unformat_input_t sub_input;
+
u32 ipsec4_out_spd_hash_num_buckets;
+ u32 ipsec4_in_spd_hash_num_buckets;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "ipv4-outbound-spd-flow-cache on"))
- im->flow_cache_flag = 1;
+ im->output_flow_cache_flag = 1;
else if (unformat (input, "ipv4-outbound-spd-flow-cache off"))
- im->flow_cache_flag = 0;
+ im->output_flow_cache_flag = 0;
else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d",
&ipsec4_out_spd_hash_num_buckets))
{
@@ -585,6 +597,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
im->ipsec4_out_spd_hash_num_buckets =
1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets);
}
+ else if (unformat (input, "ipv4-inbound-spd-flow-cache on"))
+ im->input_flow_cache_flag = 1;
+ else if (unformat (input, "ipv4-inbound-spd-flow-cache off"))
+ im->input_flow_cache_flag = 0;
+ else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d",
+ &ipsec4_in_spd_hash_num_buckets))
+ {
+ im->ipsec4_in_spd_hash_num_buckets =
+ 1ULL << max_log2 (ipsec4_in_spd_hash_num_buckets);
+ }
else if (unformat (input, "ip4 %U", unformat_vlib_cli_sub_input,
&sub_input))
{
@@ -623,11 +645,16 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
}
- if (im->flow_cache_flag)
+ if (im->output_flow_cache_flag)
{
vec_add2 (im->ipsec4_out_spd_hash_tbl, im->ipsec4_out_spd_hash_tbl,
im->ipsec4_out_spd_hash_num_buckets);
}
+ if (im->input_flow_cache_flag)
+ {
+ vec_add2 (im->ipsec4_in_spd_hash_tbl, im->ipsec4_in_spd_hash_tbl,
+ im->ipsec4_in_spd_hash_num_buckets);
+ }
return 0;
}