aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec/ipsec_input.c
diff options
context:
space:
mode:
authorPiotr Bronowski <piotrx.bronowski@intel.com>2022-08-31 13:48:14 +0000
committerPiotr Bronowski <piotrx.bronowski@intel.com>2022-09-12 11:55:14 +0200
commit993b6bee63d4f455db0a6021c9659aad4545acf2 (patch)
tree9c098bf4cadb56fbb3170264b0801c1565cac872 /src/vnet/ipsec/ipsec_input.c
parenta27aa6b413512415a592ecd1f14714fd1634d29c (diff)
ipsec: introduce fast path ipv4 inbound matching
This patch introduces fast path matching for inbound traffic ipv4. Fast path uses bihash tables in order to find matching policy. Adding and removing policies in fast path is much faster than in current implementation. It is still new feature and further work needs and can be done in order to improve perfromance. Type: feature Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com> Change-Id: Ifbd5bfecc21b76ddf8363f5dc089d77595196675
Diffstat (limited to 'src/vnet/ipsec/ipsec_input.c')
-rw-r--r--src/vnet/ipsec/ipsec_input.c54
1 files changed, 51 insertions, 3 deletions
diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c
index 09166bccf5b..0c572c83e96 100644
--- a/src/vnet/ipsec/ipsec_input.c
+++ b/src/vnet/ipsec/ipsec_input.c
@@ -19,6 +19,7 @@
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
#include <vnet/feature/feature.h>
+#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
@@ -149,6 +150,18 @@ ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
return p;
}
+always_inline void
+ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
+ u32 spi, u8 action)
+{
+ clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+ tuple->laddr.as_u32 = la;
+ tuple->raddr.as_u32 = ra;
+ tuple->spi = spi;
+ tuple->action = action;
+ tuple->is_ipv6 = 0;
+}
+
always_inline ipsec_policy_t *
ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
ipsec_spd_policy_type_t policy_type)
@@ -317,6 +330,9 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
ipsec_policy_t *p0 = NULL;
u8 has_space0;
bool search_flow_cache = false;
+ ipsec_policy_t *policies[1];
+ ipsec_fp_5tuple_t tuples[1];
+ bool ip_v6 = true;
if (n_left_from > 2)
{
@@ -351,7 +367,19 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
search_flow_cache = im->input_flow_cache_flag;
esp_or_udp:
- if (search_flow_cache) // attempt to match policy in flow cache
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ ipsec_fp_in_5tuple_from_ip4_range (
+ &tuples[0], ip0->src_address.as_u32, ip0->dst_address.as_u32,
+ clib_net_to_host_u32 (esp0->spi),
+ IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else if (search_flow_cache) // attempt to match policy in flow cache
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
@@ -392,7 +420,16 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
pi0 = ~0;
};
- if (search_flow_cache)
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS;
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else if (search_flow_cache)
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
@@ -424,7 +461,18 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
pi0 = ~0;
};
- if (search_flow_cache)
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD;
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else
+
+ if (search_flow_cache)
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,