aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPiotr Bronowski <piotrx.bronowski@intel.com>2022-08-31 13:48:14 +0000
committerPiotr Bronowski <piotrx.bronowski@intel.com>2022-09-12 11:55:14 +0200
commit993b6bee63d4f455db0a6021c9659aad4545acf2 (patch)
tree9c098bf4cadb56fbb3170264b0801c1565cac872
parenta27aa6b413512415a592ecd1f14714fd1634d29c (diff)
ipsec: introduce fast path ipv4 inbound matching
This patch introduces fast path matching for inbound traffic ipv4. Fast path uses bihash tables in order to find matching policy. Adding and removing policies in fast path is much faster than in current implementation. It is still new feature and further work needs and can be done in order to improve perfromance. Type: feature Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com> Change-Id: Ifbd5bfecc21b76ddf8363f5dc089d77595196675
-rw-r--r--src/vnet/ipsec/ipsec.c51
-rw-r--r--src/vnet/ipsec/ipsec.h17
-rw-r--r--src/vnet/ipsec/ipsec_input.c54
-rw-r--r--src/vnet/ipsec/ipsec_output.h9
-rw-r--r--src/vnet/ipsec/ipsec_spd.c151
-rw-r--r--src/vnet/ipsec/ipsec_spd.h28
-rw-r--r--src/vnet/ipsec/ipsec_spd_fp_lookup.h198
-rw-r--r--src/vnet/ipsec/ipsec_spd_policy.c269
-rw-r--r--src/vnet/ipsec/ipsec_spd_policy.h15
-rw-r--r--test/test_ipsec_spd_fp_input.py844
10 files changed, 1472 insertions, 164 deletions
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index 2dd077a74a1..e95bd163049 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -495,11 +495,6 @@ ipsec_init (vlib_main_t * vm)
if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
return error;
- im->ipv4_fp_spd_is_enabled = 0;
- im->ipv6_fp_spd_is_enabled = 0;
-
- im->fp_lookup_hash_buckets = IPSEC_FP_HASH_LOOKUP_HASH_BUCKETS;
-
vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE;
@@ -638,6 +633,13 @@ ipsec_init (vlib_main_t * vm)
vec_validate_init_empty_aligned (im->next_header_registrations, 255, ~0,
CLIB_CACHE_LINE_BYTES);
+ im->fp_spd_ipv4_out_is_enabled = 0;
+ im->fp_spd_ipv6_out_is_enabled = 0;
+ im->fp_spd_ipv4_in_is_enabled = 0;
+ im->fp_spd_ipv6_in_is_enabled = 0;
+
+ im->fp_lookup_hash_buckets = IPSEC_FP_HASH_LOOKUP_HASH_BUCKETS;
+
return 0;
}
@@ -652,22 +654,41 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
u32 ipsec4_out_spd_hash_num_buckets;
u32 ipsec4_in_spd_hash_num_buckets;
u32 ipsec_spd_fp_num_buckets;
+ bool fp_spd_ip4_enabled = false;
+ bool fp_spd_ip6_enabled = false;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "ipv6-outbound-spd-fast-path on"))
{
- im->ipv6_fp_spd_is_enabled = 1;
+ im->fp_spd_ipv6_out_is_enabled = 1;
+ fp_spd_ip6_enabled = true;
}
else if (unformat (input, "ipv6-outbound-spd-fast-path off"))
- im->ipv6_fp_spd_is_enabled = 0;
+ im->fp_spd_ipv6_out_is_enabled = 0;
else if (unformat (input, "ipv4-outbound-spd-fast-path on"))
{
- im->ipv4_fp_spd_is_enabled = 1;
+ im->fp_spd_ipv4_out_is_enabled = 1;
im->output_flow_cache_flag = 0;
+ fp_spd_ip4_enabled = true;
}
else if (unformat (input, "ipv4-outbound-spd-fast-path off"))
- im->ipv4_fp_spd_is_enabled = 0;
+ im->fp_spd_ipv4_out_is_enabled = 0;
+ else if (unformat (input, "ipv6-inbound-spd-fast-path on"))
+ {
+ im->fp_spd_ipv6_in_is_enabled = 1;
+ fp_spd_ip6_enabled = true;
+ }
+ else if (unformat (input, "ipv6-inbound-spd-fast-path off"))
+ im->fp_spd_ipv6_in_is_enabled = 0;
+ else if (unformat (input, "ipv4-inbound-spd-fast-path on"))
+ {
+ im->fp_spd_ipv4_in_is_enabled = 1;
+ im->input_flow_cache_flag = 0;
+ fp_spd_ip4_enabled = true;
+ }
+ else if (unformat (input, "ipv4-inbound-spd-fast-path off"))
+ im->fp_spd_ipv4_in_is_enabled = 0;
else if (unformat (input, "spd-fast-path-num-buckets %d",
&ipsec_spd_fp_num_buckets))
{
@@ -676,7 +697,7 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
<< max_log2 (ipsec_spd_fp_num_buckets);
}
else if (unformat (input, "ipv4-outbound-spd-flow-cache on"))
- im->output_flow_cache_flag = im->ipv4_fp_spd_is_enabled ? 0 : 1;
+ im->output_flow_cache_flag = im->fp_spd_ipv4_out_is_enabled ? 0 : 1;
else if (unformat (input, "ipv4-outbound-spd-flow-cache off"))
im->output_flow_cache_flag = 0;
else if (unformat (input, "ipv4-outbound-spd-hash-buckets %d",
@@ -687,7 +708,7 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
1ULL << max_log2 (ipsec4_out_spd_hash_num_buckets);
}
else if (unformat (input, "ipv4-inbound-spd-flow-cache on"))
- im->input_flow_cache_flag = 1;
+ im->input_flow_cache_flag = im->fp_spd_ipv4_in_is_enabled ? 0 : 1;
else if (unformat (input, "ipv4-inbound-spd-flow-cache off"))
im->input_flow_cache_flag = 0;
else if (unformat (input, "ipv4-inbound-spd-hash-buckets %d",
@@ -745,6 +766,14 @@ ipsec_config (vlib_main_t *vm, unformat_input_t *input)
im->ipsec4_in_spd_hash_num_buckets);
}
+ if (fp_spd_ip4_enabled)
+ pool_alloc_aligned (im->fp_ip4_lookup_hashes_pool,
+ IPSEC_FP_IP4_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES);
+
+ if (fp_spd_ip6_enabled)
+ pool_alloc_aligned (im->fp_ip6_lookup_hashes_pool,
+ IPSEC_FP_IP6_HASHES_POOL_SIZE, CLIB_CACHE_LINE_BYTES);
+
return 0;
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 06bb299988b..69aa661683a 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -30,6 +30,9 @@
#include <vppinfra/bihash_24_16.h>
+#define IPSEC_FP_IP4_HASHES_POOL_SIZE 128
+#define IPSEC_FP_IP6_HASHES_POOL_SIZE 128
+
typedef clib_error_t *(*add_del_sa_sess_cb_t) (u32 sa_index, u8 is_add);
typedef clib_error_t *(*check_support_cb_t) (ipsec_sa_t * sa);
typedef clib_error_t *(*enable_disable_cb_t) (int is_enable);
@@ -143,10 +146,16 @@ typedef struct
ipsec_spd_t *spds;
/* pool of policies */
ipsec_policy_t *policies;
-
- u32 ipv4_fp_spd_is_enabled;
- u32 ipv6_fp_spd_is_enabled;
-
+ /* pool of bihash tables for ipv4 ipsec rules */
+ clib_bihash_16_8_t *fp_ip4_lookup_hashes_pool;
+ /* pool of bihash tables for ipv6 ipsec rules */
+ clib_bihash_40_8_t *fp_ip6_lookup_hashes_pool;
+
+ u32 fp_spd_ipv4_out_is_enabled;
+ u32 fp_spd_ipv4_in_is_enabled;
+ u32 fp_spd_ipv6_out_is_enabled;
+ u32 fp_spd_ipv6_in_is_enabled;
+ /* pool of fast path mask types */
ipsec_fp_mask_type_entry_t *fp_mask_types;
u32 fp_lookup_hash_buckets; /* number of buckets should be power of two */
diff --git a/src/vnet/ipsec/ipsec_input.c b/src/vnet/ipsec/ipsec_input.c
index 09166bccf5b..0c572c83e96 100644
--- a/src/vnet/ipsec/ipsec_input.c
+++ b/src/vnet/ipsec/ipsec_input.c
@@ -19,6 +19,7 @@
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
#include <vnet/feature/feature.h>
+#include <vnet/ipsec/ipsec_spd_fp_lookup.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
@@ -149,6 +150,18 @@ ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
return p;
}
+always_inline void
+ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
+ u32 spi, u8 action)
+{
+ clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
+ tuple->laddr.as_u32 = la;
+ tuple->raddr.as_u32 = ra;
+ tuple->spi = spi;
+ tuple->action = action;
+ tuple->is_ipv6 = 0;
+}
+
always_inline ipsec_policy_t *
ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
ipsec_spd_policy_type_t policy_type)
@@ -317,6 +330,9 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
ipsec_policy_t *p0 = NULL;
u8 has_space0;
bool search_flow_cache = false;
+ ipsec_policy_t *policies[1];
+ ipsec_fp_5tuple_t tuples[1];
+ bool ip_v6 = true;
if (n_left_from > 2)
{
@@ -351,7 +367,19 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
search_flow_cache = im->input_flow_cache_flag;
esp_or_udp:
- if (search_flow_cache) // attempt to match policy in flow cache
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ ipsec_fp_in_5tuple_from_ip4_range (
+ &tuples[0], ip0->src_address.as_u32, ip0->dst_address.as_u32,
+ clib_net_to_host_u32 (esp0->spi),
+ IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else if (search_flow_cache) // attempt to match policy in flow cache
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
@@ -392,7 +420,16 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
pi0 = ~0;
};
- if (search_flow_cache)
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS;
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else if (search_flow_cache)
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
@@ -424,7 +461,18 @@ VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
pi0 = ~0;
};
- if (search_flow_cache)
+ if (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd0->fp_spd.ip4_in_lookup_hash_idx))
+ {
+ tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD;
+ ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
+ policies, 1);
+ p0 = policies[0];
+ }
+ else
+
+ if (search_flow_cache)
{
p0 = ipsec4_input_spd_find_flow_cache_entry (
im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
diff --git a/src/vnet/ipsec/ipsec_output.h b/src/vnet/ipsec/ipsec_output.h
index 1239ed1f192..30f4ebedeb7 100644
--- a/src/vnet/ipsec/ipsec_output.h
+++ b/src/vnet/ipsec/ipsec_output.h
@@ -179,7 +179,8 @@ ipsec_output_policy_match_n (ipsec_spd_t *spd,
clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
- if (im->ipv4_fp_spd_is_enabled)
+ if (im->fp_spd_ipv4_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
{
ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
@@ -330,7 +331,8 @@ ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
if (!spd)
return 0;
- if (im->ipv4_fp_spd_is_enabled)
+ if (im->fp_spd_ipv4_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
{
ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
@@ -437,7 +439,8 @@ ipsec6_output_policy_match (ipsec_spd_t *spd, ip6_address_t *la,
if (!spd)
return 0;
- if (im->ipv6_fp_spd_is_enabled)
+ if (im->fp_spd_ipv6_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx))
{
ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
diff --git a/src/vnet/ipsec/ipsec_spd.c b/src/vnet/ipsec/ipsec_spd.c
index 22dddfd3fa8..aa42f99bee2 100644
--- a/src/vnet/ipsec/ipsec_spd.c
+++ b/src/vnet/ipsec/ipsec_spd.c
@@ -21,6 +21,7 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
{
ipsec_main_t *im = &ipsec_main;
ipsec_spd_t *spd = 0;
+ ipsec_spd_fp_t *fp_spd = 0;
uword *p;
u32 spd_index, k, v;
@@ -36,6 +37,7 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
spd = pool_elt_at_index (im->spds, spd_index);
if (!spd)
return VNET_API_ERROR_INVALID_VALUE;
+
/* *INDENT-OFF* */
hash_foreach (k, v, im->spd_index_by_sw_if_index, ({
if (v == spd_index)
@@ -46,18 +48,64 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
#define _(s,v) vec_free(spd->policies[IPSEC_SPD_POLICY_##s]);
foreach_ipsec_spd_policy_type
#undef _
- if (im->ipv4_fp_spd_is_enabled)
- {
- ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
- clib_bihash_free_16_8 (&fp_spd->fp_ip4_lookup_hash);
- }
+ fp_spd = &spd->fp_spd;
+
+ if (im->fp_spd_ipv4_out_is_enabled)
+ {
+ if (fp_spd->ip4_out_lookup_hash_idx != INDEX_INVALID)
+ {
+ clib_bihash_16_8_t *bihash_table =
+ pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_out_lookup_hash_idx);
+
+ clib_bihash_free_16_8 (bihash_table);
+ vec_free (fp_spd->name4_out);
+ pool_put_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_out_lookup_hash_idx);
+ }
+ }
+
+ if (im->fp_spd_ipv4_in_is_enabled)
+ {
+ if (fp_spd->ip4_in_lookup_hash_idx != INDEX_INVALID)
+ {
+ clib_bihash_16_8_t *bihash_table = pool_elt_at_index (
+ im->fp_ip4_lookup_hashes_pool, fp_spd->ip4_in_lookup_hash_idx);
+
+ clib_bihash_free_16_8 (bihash_table);
+ vec_free (fp_spd->name4_in);
+ pool_put_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_in_lookup_hash_idx);
+ }
+ }
+
+ if (im->fp_spd_ipv6_out_is_enabled)
+ {
+ if (fp_spd->ip6_out_lookup_hash_idx != INDEX_INVALID)
+ {
+ clib_bihash_40_8_t *bihash_table =
+ pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_out_lookup_hash_idx);
- if (im->ipv6_fp_spd_is_enabled)
+ clib_bihash_free_40_8 (bihash_table);
+ vec_free (fp_spd->name6_out);
+ pool_put_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_out_lookup_hash_idx);
+ }
+ }
+ if (im->fp_spd_ipv6_in_is_enabled)
{
- ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
+ if (fp_spd->ip6_in_lookup_hash_idx != INDEX_INVALID)
+ {
+ clib_bihash_40_8_t *bihash_table = pool_elt_at_index (
+ im->fp_ip6_lookup_hashes_pool, fp_spd->ip6_in_lookup_hash_idx);
- clib_bihash_free_40_8 (&fp_spd->fp_ip6_lookup_hash);
+ clib_bihash_free_40_8 (bihash_table);
+ vec_free (fp_spd->name6_in);
+ pool_put_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_in_lookup_hash_idx);
+ }
}
pool_put (im->spds, spd);
@@ -69,24 +117,85 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add)
spd_index = spd - im->spds;
spd->id = spd_id;
hash_set (im->spd_index_by_spd_id, spd_id, spd_index);
- if (im->ipv4_fp_spd_is_enabled)
+
+ fp_spd = &spd->fp_spd;
+ fp_spd->ip4_out_lookup_hash_idx = INDEX_INVALID;
+ fp_spd->ip4_in_lookup_hash_idx = INDEX_INVALID;
+ fp_spd->ip6_out_lookup_hash_idx = INDEX_INVALID;
+ fp_spd->ip6_in_lookup_hash_idx = INDEX_INVALID;
+
+ if (im->fp_spd_ipv4_out_is_enabled)
{
- ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
+ if (pool_elts (im->fp_ip4_lookup_hashes_pool) <
+ pool_max_len (im->fp_ip4_lookup_hashes_pool))
+ {
+ clib_bihash_16_8_t *bihash_table;
+ fp_spd->name4_out = format (0, "spd_%u_fp_ip4_out", spd_id);
+
+ pool_get (im->fp_ip4_lookup_hashes_pool, bihash_table);
+ fp_spd->ip4_out_lookup_hash_idx =
+ bihash_table - im->fp_ip4_lookup_hashes_pool;
+ clib_bihash_init_16_8 (bihash_table, (char *) fp_spd->name4_out,
+ im->fp_lookup_hash_buckets,
+ im->fp_lookup_hash_buckets *
+ IPSEC_FP_IP4_HASH_MEM_PER_BUCKET);
+ }
+ }
+
+ if (im->fp_spd_ipv4_in_is_enabled)
+ {
+ if (pool_elts (im->fp_ip4_lookup_hashes_pool) <
+ pool_max_len (im->fp_ip4_lookup_hashes_pool))
+ {
+ clib_bihash_16_8_t *bihash_table;
+ fp_spd->name4_in = format (0, "spd_%u_fp_ip4_in", spd_id);
+
+ pool_get (im->fp_ip4_lookup_hashes_pool, bihash_table);
+ fp_spd->ip4_in_lookup_hash_idx =
+ bihash_table - im->fp_ip4_lookup_hashes_pool;
+ clib_bihash_init_16_8 (bihash_table, (char *) fp_spd->name4_in,
+ im->fp_lookup_hash_buckets,
+ im->fp_lookup_hash_buckets *
+ IPSEC_FP_IP4_HASH_MEM_PER_BUCKET);
+ }
+ }
+ if (im->fp_spd_ipv6_out_is_enabled)
+ {
+ if (pool_elts (im->fp_ip6_lookup_hashes_pool) <
+ pool_max_len (im->fp_ip6_lookup_hashes_pool))
+ {
+ clib_bihash_40_8_t *bihash_table;
+ ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
+
+ fp_spd->name6_out = format (0, "spd_%u_fp_ip6_out", spd_id);
- clib_bihash_init_16_8 (
- &fp_spd->fp_ip4_lookup_hash, "SPD_FP ip4 rules lookup bihash",
- im->fp_lookup_hash_buckets,
- im->fp_lookup_hash_buckets * IPSEC_FP_IP4_HASH_MEM_PER_BUCKET);
+ fp_spd->name6_out = format (0, "spd_%u_fp_ip6_out", spd_id);
+ pool_get (im->fp_ip6_lookup_hashes_pool, bihash_table);
+ fp_spd->ip6_out_lookup_hash_idx =
+ bihash_table - im->fp_ip6_lookup_hashes_pool;
+ clib_bihash_init_40_8 (bihash_table, (char *) fp_spd->name6_out,
+ im->fp_lookup_hash_buckets,
+ im->fp_lookup_hash_buckets *
+ IPSEC_FP_IP6_HASH_MEM_PER_BUCKET);
+ }
}
- if (im->ipv6_fp_spd_is_enabled)
+ if (im->fp_spd_ipv6_in_is_enabled)
{
- ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
+ if (pool_elts (im->fp_ip6_lookup_hashes_pool) <
+ pool_max_len (im->fp_ip6_lookup_hashes_pool))
+ {
+ clib_bihash_40_8_t *bihash_table;
+ ipsec_spd_fp_t *fp_spd = &spd->fp_spd;
- clib_bihash_init_40_8 (
- &fp_spd->fp_ip6_lookup_hash, "SPD_FP ip6 rules lookup bihash",
- im->fp_lookup_hash_buckets,
- im->fp_lookup_hash_buckets * IPSEC_FP_IP6_HASH_MEM_PER_BUCKET);
- fp_spd->fp_ip6_lookup_hash_initialized = 1;
+ fp_spd->name6_in = format (0, "spd_%u_fp_ip6_in", spd_id);
+ pool_get (im->fp_ip6_lookup_hashes_pool, bihash_table);
+ fp_spd->ip6_out_lookup_hash_idx =
+ bihash_table - im->fp_ip6_lookup_hashes_pool;
+ clib_bihash_init_40_8 (bihash_table, (char *) fp_spd->name6_in,
+ im->fp_lookup_hash_buckets,
+ im->fp_lookup_hash_buckets *
+ IPSEC_FP_IP6_HASH_MEM_PER_BUCKET);
+ }
}
}
return 0;
diff --git a/src/vnet/ipsec/ipsec_spd.h b/src/vnet/ipsec/ipsec_spd.h
index 887ae99c101..3a4fd0ec91c 100644
--- a/src/vnet/ipsec/ipsec_spd.h
+++ b/src/vnet/ipsec/ipsec_spd.h
@@ -42,20 +42,31 @@ typedef enum ipsec_spd_policy_t_
extern u8 *format_ipsec_policy_type (u8 * s, va_list * args);
+typedef struct
+{
+ /* index in the mask types pool */
+ u32 mask_type_idx;
+ /* counts references correspond to given mask type index */
+ u32 refcount;
+} ipsec_fp_mask_id_t;
+
/**
* @brief A fast path Security Policy Database
*/
typedef struct
{
- /** vectors for each of the policy types */
+ /** vectors for each of the fast path policy types */
u32 *fp_policies[IPSEC_SPD_POLICY_N_TYPES];
- u32 *fp_mask_types[IPSEC_SPD_POLICY_N_TYPES];
-
- clib_bihash_40_8_t fp_ip6_lookup_hash; /* spd fp ip6 lookup hash table. */
- clib_bihash_16_8_t fp_ip4_lookup_hash; /* spd fp ip4 lookup hash table. */
-
- u8 fp_ip6_lookup_hash_initialized;
-
+ ipsec_fp_mask_id_t *fp_mask_ids[IPSEC_SPD_POLICY_N_TYPES];
+ /* names of bihash tables */
+ u8 *name4_out;
+ u8 *name4_in;
+ u8 *name6_out;
+ u8 *name6_in;
+ u32 ip6_out_lookup_hash_idx; /* fp ip6 lookup hash out index in the pool */
+ u32 ip4_out_lookup_hash_idx; /* fp ip4 lookup hash out index in the pool */
+ u32 ip6_in_lookup_hash_idx; /* fp ip6 lookup hash in index in the pool */
+ u32 ip4_in_lookup_hash_idx; /* fp ip4 lookup hash in index in the pool */
} ipsec_spd_fp_t;
/**
@@ -67,7 +78,6 @@ typedef struct
u32 id;
/** vectors for each of the policy types */
u32 *policies[IPSEC_SPD_POLICY_N_TYPES];
- /* TODO remove fp_spd. Use directly ipsec_spd_t for fast path */
ipsec_spd_fp_t fp_spd;
} ipsec_spd_t;
diff --git a/src/vnet/ipsec/ipsec_spd_fp_lookup.h b/src/vnet/ipsec/ipsec_spd_fp_lookup.h
index 3aea86f70a0..e4ef194d68d 100644
--- a/src/vnet/ipsec/ipsec_spd_fp_lookup.h
+++ b/src/vnet/ipsec/ipsec_spd_fp_lookup.h
@@ -20,18 +20,6 @@
#include <vnet/ipsec/ipsec.h>
-/**
- * @brief function handler to perform lookup in fastpath SPD
- * for inbound traffic burst of n packets
- **/
-
-inline u32
-ipsec_fp_in_policy_match_n (void *spd_fp, u8 is_ipv6,
- ipsec_fp_5tuple_t *tuples,
- ipsec_policy_t **policies, u32 *policy_ids, u32 n)
-{
- return 0;
-}
static_always_inline int
single_rule_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match)
@@ -106,6 +94,164 @@ single_rule_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match)
return (1);
}
+static_always_inline int
+single_rule_in_match_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *match)
+{
+
+ u32 sa = clib_net_to_host_u32 (match->laddr.as_u32);
+ u32 da = clib_net_to_host_u32 (match->raddr.as_u32);
+
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
+
+ if (match->spi != s->spi)
+ return (0);
+
+ if (ipsec_sa_is_set_IS_TUNNEL (s))
+ {
+ if (da != clib_net_to_host_u32 (s->tunnel.t_dst.ip.ip4.as_u32))
+ return (0);
+
+ if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32))
+ return (0);
+ }
+ }
+ else
+ {
+ if (da < clib_net_to_host_u32 (policy->raddr.start.ip4.as_u32))
+ return (0);
+
+ if (da > clib_net_to_host_u32 (policy->raddr.stop.ip4.as_u32))
+ return (0);
+
+ if (sa < clib_net_to_host_u32 (policy->laddr.start.ip4.as_u32))
+ return (0);
+
+ if (sa > clib_net_to_host_u32 (policy->laddr.stop.ip4.as_u32))
+ return (0);
+ }
+ return (1);
+}
+
+static_always_inline u32
+ipsec_fp_in_ip6_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
+ ipsec_policy_t **policies, u32 n)
+{
+ return 0;
+}
+
+static_always_inline u32
+ipsec_fp_in_ip4_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
+ ipsec_policy_t **policies, u32 n)
+
+{
+ u32 last_priority[n];
+ u32 i = 0;
+ u32 counter = 0;
+ ipsec_fp_mask_type_entry_t *mte;
+ ipsec_fp_mask_id_t *mti;
+ ipsec_fp_5tuple_t *match = tuples;
+ ipsec_policy_t *policy;
+ u32 n_left = n;
+ clib_bihash_kv_16_8_t kv;
+ /* result of the lookup */
+ clib_bihash_kv_16_8_t result;
+ ipsec_fp_lookup_value_t *result_val =
+ (ipsec_fp_lookup_value_t *) &result.value;
+ u64 *pkey, *pmatch, *pmask;
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
+ ipsec_fp_mask_id_t *mask_type_ids = pspd_fp->fp_mask_ids[match->action];
+ clib_bihash_16_8_t *bihash_table = pool_elt_at_index (
+ im->fp_ip4_lookup_hashes_pool, pspd_fp->ip4_in_lookup_hash_idx);
+
+ /* clear the list of matched policies pointers */
+ clib_memset (policies, 0, n * sizeof (*policies));
+ clib_memset (last_priority, 0, n * sizeof (u32));
+ n_left = n;
+ while (n_left)
+ {
+ vec_foreach (mti, mask_type_ids)
+ {
+ mte = im->fp_mask_types + mti->mask_type_idx;
+ if (mte->mask.action == 0)
+ continue;
+ pmatch = (u64 *) match->kv_16_8.key;
+ pmask = (u64 *) mte->mask.kv_16_8.key;
+ pkey = (u64 *) kv.key;
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey = *pmatch & *pmask;
+
+ int res =
+ clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
+ /* lookup the hash by each packet in the burst for this mask. */
+
+ if (res == 0)
+ {
+ /* There is a hit in the hash table. */
+ /* Find the policy with highest priority. */
+ /* Store the lookup results in a dedicated array. */
+
+ if (vec_len (result_val->fp_policies_ids) > 1)
+ {
+ u32 *policy_id;
+ vec_foreach (policy_id, result_val->fp_policies_ids)
+ {
+ policy = im->policies + *policy_id;
+
+ if ((last_priority[i] < policy->priority) &&
+ (single_rule_in_match_5tuple (policy, match)))
+ {
+ last_priority[i] = policy->priority;
+ if (policies[i] == 0)
+ counter++;
+ policies[i] = policy;
+ }
+ }
+ }
+ else
+ {
+ u32 *policy_id;
+ ASSERT (vec_len (result_val->fp_policies_ids) == 1);
+ policy_id = result_val->fp_policies_ids;
+ policy = im->policies + *policy_id;
+ if ((last_priority[i] < policy->priority) &&
+ (single_rule_in_match_5tuple (policy, match)))
+ {
+ last_priority[i] = policy->priority;
+ if (policies[i] == 0)
+ counter++;
+ policies[i] = policy;
+ }
+ }
+ }
+ }
+
+ i++;
+ n_left--;
+ match++;
+ }
+ return counter;
+}
+
+/**
+ * @brief function handler to perform lookup in fastpath SPD
+ * for inbound traffic burst of n packets
+ **/
+
+static_always_inline u32
+ipsec_fp_in_policy_match_n (void *spd_fp, u8 is_ipv6,
+ ipsec_fp_5tuple_t *tuples,
+ ipsec_policy_t **policies, u32 n)
+{
+ if (is_ipv6)
+ return ipsec_fp_in_ip6_policy_match_n (spd_fp, tuples, policies, n);
+ else
+ return ipsec_fp_in_ip4_policy_match_n (spd_fp, tuples, policies, n);
+}
+
static_always_inline u32
ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
ipsec_policy_t **policies, u32 *ids, u32 n)
@@ -115,7 +261,7 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
u32 i = 0;
u32 counter = 0;
ipsec_fp_mask_type_entry_t *mte;
- u32 *mti;
+ ipsec_fp_mask_id_t *mti;
ipsec_fp_5tuple_t *match = tuples;
ipsec_policy_t *policy;
@@ -128,7 +274,10 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
u64 *pkey, *pmatch, *pmask;
ipsec_main_t *im = &ipsec_main;
ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
- u32 *mask_type_ids = pspd_fp->fp_mask_types[IPSEC_SPD_POLICY_IP6_OUTBOUND];
+ ipsec_fp_mask_id_t *mask_type_ids =
+ pspd_fp->fp_mask_ids[IPSEC_SPD_POLICY_IP6_OUTBOUND];
+ clib_bihash_40_8_t *bihash_table = pool_elt_at_index (
+ im->fp_ip6_lookup_hashes_pool, pspd_fp->ip6_out_lookup_hash_idx);
/*clear the list of matched policies pointers */
clib_memset (policies, 0, n * sizeof (*policies));
@@ -138,7 +287,7 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
{
vec_foreach (mti, mask_type_ids)
{
- mte = im->fp_mask_types + *mti;
+ mte = im->fp_mask_types + mti->mask_type_idx;
pmatch = (u64 *) match->kv_40_8.key;
pmask = (u64 *) mte->mask.kv_40_8.key;
@@ -150,8 +299,8 @@ ipsec_fp_ip6_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
*pkey++ = *pmatch++ & *pmask++;
*pkey = *pmatch & *pmask;
- int res = clib_bihash_search_inline_2_40_8 (
- &pspd_fp->fp_ip6_lookup_hash, &kv, &result);
+ int res =
+ clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
/* lookup the hash by each packet in the burst for this mask. */
if (res == 0)
@@ -216,7 +365,7 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
u32 i = 0;
u32 counter = 0;
ipsec_fp_mask_type_entry_t *mte;
- u32 *mti;
+ ipsec_fp_mask_id_t *mti;
ipsec_fp_5tuple_t *match = tuples;
ipsec_policy_t *policy;
@@ -229,7 +378,10 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
u64 *pkey, *pmatch, *pmask;
ipsec_main_t *im = &ipsec_main;
ipsec_spd_fp_t *pspd_fp = (ipsec_spd_fp_t *) spd_fp;
- u32 *mask_type_ids = pspd_fp->fp_mask_types[IPSEC_SPD_POLICY_IP4_OUTBOUND];
+ ipsec_fp_mask_id_t *mask_type_ids =
+ pspd_fp->fp_mask_ids[IPSEC_SPD_POLICY_IP4_OUTBOUND];
+ clib_bihash_16_8_t *bihash_table = pool_elt_at_index (
+ im->fp_ip4_lookup_hashes_pool, pspd_fp->ip4_out_lookup_hash_idx);
/* clear the list of matched policies pointers */
clib_memset (policies, 0, n * sizeof (*policies));
@@ -239,7 +391,9 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
{
vec_foreach (mti, mask_type_ids)
{
- mte = im->fp_mask_types + *mti;
+ mte = im->fp_mask_types + mti->mask_type_idx;
+ if (mte->mask.action != 0)
+ continue;
pmatch = (u64 *) match->kv_16_8.key;
pmask = (u64 *) mte->mask.kv_16_8.key;
@@ -248,8 +402,8 @@ ipsec_fp_ip4_out_policy_match_n (void *spd_fp, ipsec_fp_5tuple_t *tuples,
*pkey++ = *pmatch++ & *pmask++;
*pkey = *pmatch & *pmask;
- int res = clib_bihash_search_inline_2_16_8 (
- &pspd_fp->fp_ip4_lookup_hash, &kv, &result);
+ int res =
+ clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
/* lookup the hash by each packet in the burst for this mask. */
if (res == 0)
diff --git a/src/vnet/ipsec/ipsec_spd_policy.c b/src/vnet/ipsec/ipsec_spd_policy.c
index 1334491b228..1d698d53e07 100644
--- a/src/vnet/ipsec/ipsec_spd_policy.c
+++ b/src/vnet/ipsec/ipsec_spd_policy.c
@@ -80,6 +80,17 @@ ipsec_policy_mk_type (bool is_outbound,
return (-1);
}
+static_always_inline int
+ipsec_is_policy_inbound (ipsec_policy_t *policy)
+{
+ if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)
+ return 1;
+
+ return 0;
+}
+
int
ipsec_add_del_policy (vlib_main_t * vm,
ipsec_policy_t * policy, int is_add, u32 * stat_index)
@@ -167,9 +178,19 @@ ipsec_add_del_policy (vlib_main_t * vm,
* Try adding the policy into fast path SPD first. Only adding to
* traditional SPD when failed.
**/
- if ((im->ipv4_fp_spd_is_enabled &&
+ if ((im->fp_spd_ipv4_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip4_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
- (im->ipv6_fp_spd_is_enabled &&
+ (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip4_in_lookup_hash_idx) &&
+ (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
+ (im->fp_spd_ipv6_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip6_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
stat_index);
@@ -195,12 +216,36 @@ ipsec_add_del_policy (vlib_main_t * vm,
* traditional SPD when fp delete fails.
**/
- if ((im->ipv4_fp_spd_is_enabled &&
+ if ((im->fp_spd_ipv4_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip4_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
- (im->ipv6_fp_spd_is_enabled &&
+ (im->fp_spd_ipv4_in_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip4_in_lookup_hash_idx) &&
+ (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
+ policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
+ (im->fp_spd_ipv6_out_is_enabled &&
+ PREDICT_TRUE (INDEX_INVALID !=
+ spd->fp_spd.ip6_out_lookup_hash_idx) &&
policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
- return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
- stat_index);
+ {
+ if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+ {
+ index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
+
+ if (INDEX_INVALID == sa_index)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ policy->sa_index = sa_index;
+ ipsec_sa_unlock_id (policy->sa_id);
+ }
+ else
+ policy->sa_index = INDEX_INVALID;
+
+ return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
+ stat_index);
+ }
vec_foreach_index (ii, (spd->policies[policy->type]))
{
@@ -220,7 +265,7 @@ ipsec_add_del_policy (vlib_main_t * vm,
}
static_always_inline void
-release_mask_type_index (ipsec_main_t *im, u32 mask_type_index)
+ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
{
ipsec_fp_mask_type_entry_t *mte =
pool_elt_at_index (im->fp_mask_types, mask_type_index);
@@ -281,24 +326,24 @@ fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
}
static_always_inline u16
-get_highest_set_bit_u16 (u16 x)
+mask_out_highest_set_bit_u16 (u16 x)
{
x |= x >> 8;
x |= x >> 4;
x |= x >> 2;
x |= x >> 1;
- return x ^= x >> 1;
+ return ~x;
}
static_always_inline u32
-get_highest_set_bit_u32 (u32 x)
+mask_out_highest_set_bit_u32 (u32 x)
{
x |= x >> 16;
x |= x >> 8;
x |= x >> 4;
x |= x >> 2;
x |= x >> 1;
- return x ^= x >> 1;
+ return ~x;
}
static_always_inline u64
@@ -324,11 +369,9 @@ ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
mask->lport = policy->lport.start ^ policy->lport.stop;
mask->rport = policy->rport.start ^ policy->rport.stop;
- mask->lport = get_highest_set_bit_u16 (mask->lport);
- mask->lport = ~(mask->lport - 1) & (~mask->lport);
+ mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
- mask->rport = get_highest_set_bit_u16 (mask->rport);
- mask->rport = ~(mask->rport - 1) & (~mask->rport);
+ mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
}
else
{
@@ -337,10 +380,12 @@ ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
}
mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
+ mask->action = 0;
}
static_always_inline void
-ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
+ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
+ bool inbound)
{
u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
@@ -360,32 +405,24 @@ ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
* the bit itself. Remember that policy stores start and stop in the net
* order.
*/
- *plmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask));
- *plmask = clib_host_to_net_u32 (~(*plmask - 1) & (~*plmask));
+ *plmask = clib_host_to_net_u32 (
+ mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
- *prmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask));
- *prmask = clib_host_to_net_u32 (~(*prmask - 1) & (~*prmask));
+ *prmask = clib_host_to_net_u32 (
+ mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
- if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
- (policy->protocol == IP_PROTOCOL_UDP) ||
- (policy->protocol == IP_PROTOCOL_SCTP)))
+ if (inbound)
{
- mask->lport = policy->lport.start ^ policy->lport.stop;
- mask->rport = policy->rport.start ^ policy->rport.stop;
+ if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
+ mask->spi = 0;
- mask->lport = get_highest_set_bit_u16 (mask->lport);
- mask->lport = ~(mask->lport - 1) & (~mask->lport);
-
- mask->rport = get_highest_set_bit_u16 (mask->rport);
- mask->rport = ~(mask->rport - 1) & (~mask->rport);
+ mask->protocol = 0;
}
else
{
- mask->lport = 0;
- mask->rport = 0;
+ mask->action = 0;
+ ipsec_fp_get_policy_ports_mask (policy, mask);
}
-
- mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
}
static_always_inline void
@@ -437,7 +474,8 @@ ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
}
static_always_inline void
-ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple)
+ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
+ bool inbound)
{
memset (tuple, 0, sizeof (*tuple));
tuple->is_ipv6 = policy->is_ipv6;
@@ -452,17 +490,39 @@ ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple)
tuple->raddr = policy->raddr.start.ip4;
}
+ if (inbound)
+ {
+
+ if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
+ policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
+ policy->sa_index != INDEX_INVALID)
+ {
+ ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
+ tuple->spi = s->spi;
+ }
+ else
+ tuple->spi = INDEX_INVALID;
+ tuple->action = policy->type;
+ return;
+ }
+
tuple->protocol = policy->protocol;
tuple->lport = policy->lport.start;
tuple->rport = policy->rport.start;
}
+static_always_inline int
+ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
+{
+ return mask_id->mask_type_idx == *idx;
+}
+
int
ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
ipsec_policy_t *policy, u32 *stat_index)
{
- u32 mask_index;
+ u32 mask_index, searched_idx;
ipsec_policy_t *vp;
ipsec_fp_mask_type_entry_t *mte;
u32 policy_index;
@@ -474,8 +534,14 @@ ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
ipsec_fp_5tuple_t mask, policy_5tuple;
int res;
-
- ipsec_fp_ip4_get_policy_mask (policy, &mask);
+ bool inbound = ipsec_is_policy_inbound (policy);
+ clib_bihash_16_8_t *bihash_table =
+ inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_in_lookup_hash_idx) :
+ pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_out_lookup_hash_idx);
+
+ ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
pool_get (im->policies, vp);
policy_index = vp - im->policies;
vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
@@ -494,17 +560,17 @@ ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
mte = im->fp_mask_types + mask_index;
policy->fp_mask_type_id = mask_index;
- ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
+ ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
- res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
- &result);
+ res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
if (res != 0)
{
/* key was not found crate a new entry */
vec_add1 (key_val->fp_policies_ids, policy_index);
- res = clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, 1);
+ res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
+
if (res != 0)
goto error;
}
@@ -521,8 +587,7 @@ ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
{
vec_add1 (result_val->fp_policies_ids, policy_index);
- res =
- clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &result, 1);
+ res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
if (res != 0)
goto error;
@@ -533,9 +598,19 @@ ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
{
clib_memcpy (&mte->mask, &mask, sizeof (mask));
mte->refcount = 0;
- vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
}
+ searched_idx =
+ vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
+ ipsec_fp_mask_type_idx_cmp);
+ if (~0 == searched_idx)
+ {
+ ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
+ vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
+ }
+ else
+ (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
+
mte->refcount++;
vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
clib_memcpy (vp, policy, sizeof (*vp));
@@ -544,7 +619,7 @@ ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
error:
pool_put (im->policies, vp);
- release_mask_type_index (im, mask_index);
+ ipsec_fp_release_mask_type (im, mask_index);
return -1;
}
@@ -553,7 +628,7 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
ipsec_policy_t *policy, u32 *stat_index)
{
- u32 mask_index;
+ u32 mask_index, searched_idx;
ipsec_policy_t *vp;
ipsec_fp_mask_type_entry_t *mte;
u32 policy_index;
@@ -565,14 +640,20 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
ipsec_fp_5tuple_t mask, policy_5tuple;
int res;
- ipsec_fp_ip6_get_policy_mask (policy, &mask);
+ bool inbound = ipsec_is_policy_inbound (policy);
+ ipsec_fp_ip6_get_policy_mask (policy, &mask);
pool_get (im->policies, vp);
policy_index = vp - im->policies;
vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
*stat_index = policy_index;
mask_index = find_mask_type_index (im, &mask);
+ clib_bihash_40_8_t *bihash_table =
+ inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_in_lookup_hash_idx) :
+ pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_out_lookup_hash_idx);
if (mask_index == ~0)
{
@@ -585,17 +666,16 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
mte = im->fp_mask_types + mask_index;
policy->fp_mask_type_id = mask_index;
- ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
+ ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
- res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
- &result);
+ res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
if (res != 0)
{
/* key was not found crate a new entry */
vec_add1 (key_val->fp_policies_ids, policy_index);
- res = clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, 1);
+ res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
if (res != 0)
goto error;
}
@@ -612,8 +692,7 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
{
vec_add1 (result_val->fp_policies_ids, policy_index);
- res =
- clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &result, 1);
+ res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
if (res != 0)
goto error;
@@ -624,9 +703,19 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
{
clib_memcpy (&mte->mask, &mask, sizeof (mask));
mte->refcount = 0;
- vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
}
+ searched_idx =
+ vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
+ ipsec_fp_mask_type_idx_cmp);
+ if (~0 == searched_idx)
+ {
+ ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
+ vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
+ }
+ else
+ (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
+
mte->refcount++;
vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
clib_memcpy (vp, policy, sizeof (*vp));
@@ -635,7 +724,7 @@ ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
error:
pool_put (im->policies, vp);
- release_mask_type_index (im, mask_index);
+ ipsec_fp_release_mask_type (im, mask_index);
return -1;
}
@@ -649,15 +738,20 @@ ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
clib_bihash_kv_40_8_t result;
ipsec_fp_lookup_value_t *result_val =
(ipsec_fp_lookup_value_t *) &result.value;
+ bool inbound = ipsec_is_policy_inbound (policy);
+ clib_bihash_40_8_t *bihash_table =
+ inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_in_lookup_hash_idx) :
+ pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
+ fp_spd->ip6_out_lookup_hash_idx);
ipsec_policy_t *vp;
u32 ii, iii, imt;
ipsec_fp_ip6_get_policy_mask (policy, &mask);
- ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
+ ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
- res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
- &result);
+ res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
if (res != 0)
return -1;
@@ -676,8 +770,7 @@ ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
if (vec_len (result_val->fp_policies_ids) == 1)
{
vec_free (result_val->fp_policies_ids);
- clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash,
- &result, 0);
+ clib_bihash_add_del_40_8 (bihash_table, &result, 0);
}
else
{
@@ -685,17 +778,16 @@ ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
}
vec_del1 (fp_spd->fp_policies[policy->type], iii);
- vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
+ vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
{
- if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
- vp->fp_mask_type_id)
+ if ((fp_spd->fp_mask_ids[policy->type] + imt)
+ ->mask_type_idx == vp->fp_mask_type_id)
{
- ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
- im->fp_mask_types, vp->fp_mask_type_id);
- if (mte->refcount == 1)
- vec_del1 (fp_spd->fp_mask_types[policy->type],
- imt);
+ if ((fp_spd->fp_mask_ids[policy->type] + imt)
+ ->refcount-- == 1)
+ vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
+
break;
}
}
@@ -709,7 +801,7 @@ ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
continue;
else
{
- release_mask_type_index (im, vp->fp_mask_type_id);
+ ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
ipsec_sa_unlock (vp->sa_index);
pool_put (im->policies, vp);
return 0;
@@ -729,15 +821,20 @@ ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
clib_bihash_kv_16_8_t result;
ipsec_fp_lookup_value_t *result_val =
(ipsec_fp_lookup_value_t *) &result.value;
-
+ bool inbound = ipsec_is_policy_inbound (policy);
ipsec_policy_t *vp;
u32 ii, iii, imt;
-
- ipsec_fp_ip4_get_policy_mask (policy, &mask);
- ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
+ clib_bihash_16_8_t *bihash_table =
+ inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_in_lookup_hash_idx) :
+ pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
+ fp_spd->ip4_out_lookup_hash_idx);
+
+ ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
+ ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
- res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
- &result);
+ res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
+
if (res != 0)
return -1;
@@ -756,8 +853,7 @@ ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
if (vec_len (result_val->fp_policies_ids) == 1)
{
vec_free (result_val->fp_policies_ids);
- clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash,
- &result, 0);
+ clib_bihash_add_del_16_8 (bihash_table, &result, 0);
}
else
{
@@ -765,17 +861,16 @@ ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
}
vec_del1 (fp_spd->fp_policies[policy->type], iii);
- vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
+ vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
{
- if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
- vp->fp_mask_type_id)
+ if ((fp_spd->fp_mask_ids[policy->type] + imt)
+ ->mask_type_idx == vp->fp_mask_type_id)
{
- ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
- im->fp_mask_types, vp->fp_mask_type_id);
- if (mte->refcount == 1)
- vec_del1 (fp_spd->fp_mask_types[policy->type],
- imt);
+ if ((fp_spd->fp_mask_ids[policy->type] + imt)
+ ->refcount-- == 1)
+ vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
+
break;
}
}
@@ -789,7 +884,7 @@ ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
continue;
else
{
- release_mask_type_index (im, vp->fp_mask_type_id);
+ ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
ipsec_sa_unlock (vp->sa_index);
pool_put (im->policies, vp);
return 0;
diff --git a/src/vnet/ipsec/ipsec_spd_policy.h b/src/vnet/ipsec/ipsec_spd_policy.h
index 57985116c94..34f444efb9c 100644
--- a/src/vnet/ipsec/ipsec_spd_policy.h
+++ b/src/vnet/ipsec/ipsec_spd_policy.h
@@ -134,10 +134,17 @@ typedef union
ip6_address_t ip6_raddr;
};
};
-
- u16 lport;
- u16 rport;
- u16 protocol;
+ union
+ {
+ struct
+ {
+ u16 lport;
+ u16 rport;
+ };
+ u32 spi;
+ };
+ u8 protocol;
+ u8 action;
u16 is_ipv6;
};
/* for ipv6 */
diff --git a/test/test_ipsec_spd_fp_input.py b/test/test_ipsec_spd_fp_input.py
new file mode 100644
index 00000000000..199fbdf7c5d
--- /dev/null
+++ b/test/test_ipsec_spd_fp_input.py
@@ -0,0 +1,844 @@
+import socket
+import unittest
+import ipaddress
+
+from util import ppp
+from framework import VppTestRunner
+from template_ipsec import IPSecIPv4Fwd
+from template_ipsec import IPSecIPv6Fwd
+from test_ipsec_esp import TemplateIpsecEsp
+import pdb
+
+
+def debug_signal_handler(signal, frame):
+ import pdb
+
+ pdb.set_trace()
+
+
+import signal
+
+signal.signal(signal.SIGINT, debug_signal_handler)
+
+
+class SpdFastPathInbound(IPSecIPv4Fwd):
+ # In test cases derived from this class, packets in IPv4 FWD path
+ # are configured to go through IPSec inbound SPD policy lookup.
+ # Note that order in which the rules are applied is
+ # PROTECT, BYPASS, DISCARD. Therefore BYPASS rules take
+ # precedence over DISCARD.
+ #
+ # Override setUpConstants to enable inbound fast path in config
+ @classmethod
+ def setUpConstants(cls):
+ super(SpdFastPathInbound, cls).setUpConstants()
+ cls.vpp_cmdline.extend(["ipsec", "{", "ipv4-inbound-spd-fast-path on", "}"])
+ cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
+
+ @classmethod
+ def create_enc_stream(self, src_if, dst_if, pkt_count, src_prt=1234, dst_prt=5678):
+ packets = []
+ params = self.params[socket.AF_INET]
+ for i in range(pkt_count):
+ # create packet info stored in the test case instance
+ info = self.create_packet_info(src_if, dst_if)
+ # convert the info into packet payload
+ payload = self.info_to_payload(info)
+ # create the packet itself
+ p = Ether(
+ src=self.tra_if.remote_mac, dst=self.tra_if.local_mac
+ ) / params.scapy_tra_sa.encrypt(
+ IP(src=self.tra_if.remote_ip4, dst=self.tra_if.local_ip4)
+ / UDP(sport=src_prt, dport=dst_prt)
+ / Raw(payload)
+ )
+ # store a copy of the packet in the packet info
+ info.data = p.copy()
+ # append the packet to the list
+ packets.append(p)
+ # return the created packet list
+ return packets
+
+
+class SpdFastPathInboundProtect(TemplateIpsecEsp):
+ @classmethod
+ def setUpConstants(cls):
+ super(SpdFastPathInboundProtect, cls).setUpConstants()
+ cls.vpp_cmdline.extend(["ipsec", "{", "ipv4-inbound-spd-fast-path on", "}"])
+ cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
+
+ @classmethod
+ def setUpClass(cls):
+ super(SpdFastPathInboundProtect, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(SpdFastPathInboundProtect, cls).tearDownClass()
+
+ def setUp(self):
+ super(SpdFastPathInboundProtect, self).setUp()
+
+ def tearDown(self):
+ self.unconfig_network()
+ super(SpdFastPathInboundProtect, self).tearDown()
+
+
+class SpdFastPathIPv6Inbound(IPSecIPv6Fwd):
+ # In test cases derived from this class, packets in IPvr6 FWD path
+ # are configured to go through IPSec inbound SPD policy lookup.
+ # Note that order in which the rules are applied is
+ # PROTECT, BYPASS, DISCARD. Therefore BYPASS rules take
+ # precedence over DISCARDi.
+
+ # Override setUpConstants to enable inbound fast path in config
+ @classmethod
+ def setUpConstants(cls):
+ super(SpdFastPathIPv6Inbound, cls).setUpConstants()
+ cls.vpp_cmdline.extend(["ipsec", "{", "ipv6-inbound-spd-fast-path on", "}"])
+ cls.logger.info("VPP modified cmdline is %s" % " ".join(cls.vpp_cmdline))
+
+
+class IPSec4SpdTestCaseBypass(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add bypass)"""
+
+ def test_ipsec_spd_inbound_bypass(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ #
+ # 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
+ # - High priority rule action is set to DISCARD.
+ # - Low priority rule action is set to BYPASS.
+ #
+ # Since BYPASS rules take precedence over DISCARD
+ # (the order being PROTECT, BYPASS, DISCARD) we expect the
+ # BYPASS rule to match and traffic to be correctly forwarded.
+ self.create_interfaces(2)
+ pkt_count = 5
+
+ self.spd_create_and_intf_add(1, [self.pg1, self.pg0])
+
+ # create input rules
+ # bypass rule should take precedence over discard rule,
+ # even though it's lower priority
+ policy_0 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg1,
+ self.pg0,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ ip_range=True,
+ local_ip_start=self.pg0.remote_ip4,
+ local_ip_stop=self.pg0.remote_ip4,
+ remote_ip_start=self.pg1.remote_ip4,
+ remote_ip_stop=self.pg1.remote_ip4,
+ )
+ policy_1 = self.spd_add_rem_policy( # inbound, priority 15
+ 1,
+ self.pg1,
+ self.pg0,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=15,
+ policy_type="discard",
+ ip_range=True,
+ local_ip_start=self.pg0.remote_ip4,
+ local_ip_stop=self.pg0.remote_ip4,
+ remote_ip_start=self.pg1.remote_ip4,
+ remote_ip_stop=self.pg1.remote_ip4,
+ )
+
+ # create output rule so we can capture forwarded packets
+ policy_2 = self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=10,
+ policy_type="bypass",
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface
+ self.pg0.add_stream(packets)
+ self.pg1.enable_capture()
+ self.pg_start()
+
+ # check capture on pg1
+ capture = self.pg1.get_capture()
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD Add - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+
+ # verify captured packets
+ self.verify_capture(self.pg0, self.pg1, capture)
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(0, policy_1)
+ self.verify_policy_match(pkt_count, policy_2)
+
+
+class IPSec4SpdTestCaseDiscard(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add discard)"""
+
+ def test_ipsec_spd_inbound_discard(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ #
+ # Rule action is set to DISCARD.
+
+ self.create_interfaces(2)
+ pkt_count = 5
+
+ self.spd_create_and_intf_add(1, [self.pg1, self.pg0])
+
+ # create input rules
+ # bypass rule should take precedence over discard rule,
+ # even though it's lower priority
+ policy_0 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg1,
+ self.pg0,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="discard",
+ ip_range=True,
+ local_ip_start=self.pg0.remote_ip4,
+ local_ip_stop=self.pg0.remote_ip4,
+ remote_ip_start=self.pg1.remote_ip4,
+ remote_ip_stop=self.pg1.remote_ip4,
+ )
+
+ # create output rule so we can capture forwarded packets
+ policy_1 = self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=10,
+ policy_type="bypass",
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface
+ self.pg0.add_stream(packets)
+ self.pg1.enable_capture()
+ self.pg_start()
+
+ # check capture on pg1
+ capture = self.pg1.assert_nothing_captured()
+
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(0, policy_1)
+
+
+class IPSec4SpdTestCaseProtect(SpdFastPathInboundProtect):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add protect)"""
+
+ @classmethod
+ def setUpClass(cls):
+ super(IPSec4SpdTestCaseProtect, cls).setUpClass()
+
+ @classmethod
+ def tearDownClass(cls):
+ super(IPSec4SpdTestCaseProtect, cls).tearDownClass()
+
+ def setUp(self):
+ super(IPSec4SpdTestCaseProtect, self).setUp()
+
+ def tearDown(self):
+ super(IPSec4SpdTestCaseProtect, self).tearDown()
+
+ def test_ipsec_spd_inbound_protect(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ #
+ # 2 inbound SPD rules (1 HIGH and 1 LOW) are added.
+ # - High priority rule action is set to DISCARD.
+ # - Low priority rule action is set to BYPASS.
+ #
+ # Since BYPASS rules take precedence over DISCARD
+ # (the order being PROTECT, BYPASS, DISCARD) we expect the
+ # BYPASS rule to match and traffic to be correctly forwarded.
+
+ pkt_count = 5
+ payload_size = 64
+ p = self.params[socket.AF_INET]
+ send_pkts = self.gen_encrypt_pkts(
+ p,
+ p.scapy_tra_sa,
+ self.tra_if,
+ src=self.tra_if.local_ip4,
+ dst=self.tra_if.remote_ip4,
+ count=pkt_count,
+ payload_size=payload_size,
+ )
+ recv_pkts = self.send_and_expect(self.tra_if, send_pkts, self.tra_if)
+
+ self.logger.info(self.vapi.ppcli("show error"))
+ self.logger.info(self.vapi.ppcli("show ipsec all"))
+
+ pkts = p.tra_sa_in.get_stats()["packets"]
+ self.assertEqual(
+ pkts,
+ pkt_count,
+ "incorrect SA in counts: expected %d != %d" % (pkt_count, pkts),
+ )
+ pkts = p.tra_sa_out.get_stats()["packets"]
+ self.assertEqual(
+ pkts,
+ pkt_count,
+ "incorrect SA out counts: expected %d != %d" % (pkt_count, pkts),
+ )
+ self.assertEqual(p.tra_sa_out.get_lost(), 0)
+ self.assertEqual(p.tra_sa_in.get_lost(), 0)
+
+
+class IPSec4SpdTestCaseAddIPRange(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add ips range with any port rule)"""
+
+ def test_ipsec_spd_inbound_add(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ # 2 SPD bypass rules (1 for inbound and 1 for outbound) are added.
+ # Traffic sent on pg0 interface should match fast path priority
+ # rule and should be sent out on pg1 interface.
+ self.create_interfaces(2)
+ pkt_count = 5
+ s_ip_s1 = ipaddress.ip_address(self.pg0.remote_ip4)
+ s_ip_e1 = ipaddress.ip_address(int(s_ip_s1) + 5)
+ d_ip_s1 = ipaddress.ip_address(self.pg1.remote_ip4)
+ d_ip_e1 = ipaddress.ip_address(int(d_ip_s1) + 0)
+
+ s_ip_s0 = ipaddress.ip_address(self.pg0.remote_ip4)
+ s_ip_e0 = ipaddress.ip_address(int(s_ip_s0) + 6)
+ d_ip_s0 = ipaddress.ip_address(self.pg1.remote_ip4)
+ d_ip_e0 = ipaddress.ip_address(int(d_ip_s0) + 0)
+ self.spd_create_and_intf_add(1, [self.pg1, self.pg0])
+
+ policy_0 = self.spd_add_rem_policy( # inbound fast path, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ ip_range=True,
+ local_ip_start=s_ip_s0,
+ local_ip_stop=s_ip_e0,
+ remote_ip_start=d_ip_s0,
+ remote_ip_stop=d_ip_e0,
+ )
+ policy_1 = self.spd_add_rem_policy( # outbound, priority 5
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=5,
+ policy_type="bypass",
+ ip_range=True,
+ local_ip_start=s_ip_s1,
+ local_ip_stop=s_ip_e1,
+ remote_ip_start=d_ip_s1,
+ remote_ip_stop=d_ip_e1,
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface + enable capture
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture()
+ self.pg1.enable_capture()
+ # start the packet generator
+ self.pg_start()
+ # get capture
+ capture = self.pg1.get_capture()
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # verify captured packets
+ self.verify_capture(self.pg0, self.pg1, capture)
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(pkt_count, policy_1)
+
+
+class IPSec4SpdTestCaseAddAll(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add all ips ports rule)"""
+
+ def test_ipsec_spd_inbound_add(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ # 2 SPD rules (1 HIGH and 1 LOW) are added.
+ # Low priority rule action is set to BYPASS all ips.
+ # High priority rule action is set to DISCARD all ips.
+ # Traffic not sent on pg0 interface when HIGH discard priority rule is added.
+ # Then LOW priority
+ # rule is added and send the same traffic to pg0, this time expect
+ # the traffic is bypassed as bypass takes priority over discard.
+ self.create_interfaces(2)
+ pkt_count = 5
+ self.spd_create_and_intf_add(1, [self.pg0, self.pg1])
+
+ policy_0 = self.spd_add_rem_policy( # inbound, priority 20
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=20,
+ policy_type="discard",
+ all_ips=True,
+ )
+
+ policy_1 = self.spd_add_rem_policy( # inbound, priority 20
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=True,
+ priority=5,
+ policy_type="bypass",
+ all_ips=True,
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface + enable capture
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture()
+ self.pg1.enable_capture()
+ # start the packet generator
+ self.pg_start()
+ # assert nothing captured on pg0 and pg1
+ self.pg0.assert_nothing_captured()
+ self.pg1.assert_nothing_captured()
+
+ policy_2 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ all_ips=True,
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface + enable capture
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture()
+ self.pg1.enable_capture()
+ # start the packet generator
+ self.pg_start()
+ # get capture
+ capture = self.pg1.get_capture(expected_count=pkt_count)
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_2)
+
+
+class IPSec4SpdTestCaseRemove(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (remove rule)"""
+
+ def test_ipsec_spd_inbound_remove(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec inbound SPD policy lookup.
+ # 2 SPD rules (1 HIGH and 1 LOW) are added.
+ # High priority rule action is set to BYPASS.
+ # Low priority rule action is set to DISCARD.
+ # High priority rule is then removed.
+ # Traffic sent on pg0 interface should match low priority
+ # rule and should be discarded after SPD lookup.
+ self.create_interfaces(2)
+ pkt_count = 5
+ self.spd_create_and_intf_add(1, [self.pg0, self.pg1])
+ policy_0 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ )
+ policy_1 = self.spd_add_rem_policy( # inbound, priority 5
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=5,
+ policy_type="discard",
+ )
+
+ policy_out = self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=10,
+ policy_type="bypass",
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface + enable capture
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture()
+ self.pg1.enable_capture()
+ # start the packet generator
+ self.pg_start()
+ # get capture
+ capture = self.pg1.get_capture()
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # verify capture on pg1
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+ self.verify_capture(self.pg0, self.pg1, capture)
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(0, policy_1)
+ # now remove the bypass rule
+ self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ remove=True,
+ )
+
+ # resend the same packets
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture() # flush the old captures
+ self.pg1.enable_capture()
+ self.pg_start()
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # all packets will be dropped by SPD rule
+ self.pg1.assert_nothing_captured()
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(pkt_count, policy_1)
+
+
+class IPSec4SpdTestCaseReadd(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (add, remove, re-add)"""
+
+ def test_ipsec_spd_inbound_readd(self):
+ # In this test case, packets in IPv4 FWD path are configured
+ # to go through IPSec outbound SPD policy lookup.
+ # 2 SPD rules (1 HIGH and 1 LOW) are added.
+ # High priority rule action is set to BYPASS.
+ # Low priority rule action is set to DISCARD.
+ # Traffic sent on pg0 interface should match high priority
+ # rule and should be sent out on pg1 interface.
+ # High priority rule is then removed.
+ # Traffic sent on pg0 interface should match low priority
+ # rule and should be discarded after SPD lookup.
+ # Readd high priority rule.
+ # Traffic sent on pg0 interface should match high priority
+ # rule and should be sent out on pg1 interface.
+ self.create_interfaces(2)
+ pkt_count = 5
+ self.spd_create_and_intf_add(1, [self.pg0, self.pg1])
+ policy_0 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ )
+ policy_1 = self.spd_add_rem_policy( # inbound, priority 5
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=5,
+ policy_type="discard",
+ )
+ policy_2 = self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=10,
+ policy_type="bypass",
+ )
+
+ # create the packet stream
+ packets = self.create_stream(self.pg0, self.pg1, pkt_count)
+ # add the stream to the source interface + enable capture
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture()
+ self.pg1.enable_capture()
+ # start the packet generator
+ self.pg_start()
+ # get capture
+ capture = self.pg1.get_capture()
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # verify capture on pg1
+ self.verify_capture(self.pg0, self.pg1, capture)
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(0, policy_1)
+ # remove the bypass rule, leaving only the discard rule
+ self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ remove=True,
+ )
+
+ # resend the same packets
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture() # flush the old captures
+ self.pg1.enable_capture()
+ self.pg_start()
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # all packets will be dropped by SPD rule
+ self.pg1.assert_nothing_captured()
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(pkt_count, policy_1)
+
+ # now readd the bypass rule
+ policy_0 = self.spd_add_rem_policy( # outbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ )
+
+ # resend the same packets
+ self.pg0.add_stream(packets)
+ self.pg0.enable_capture() # flush the old captures
+ self.pg1.enable_capture()
+ self.pg_start()
+
+ # get capture
+ capture = self.pg1.get_capture(pkt_count)
+ for packet in capture:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(capture.res))
+
+ # assert nothing captured on pg0
+ self.pg0.assert_nothing_captured()
+ # verify captured packets
+ self.verify_capture(self.pg0, self.pg1, capture)
+ # verify all policies matched the expected number of times
+ self.verify_policy_match(pkt_count, policy_0)
+ self.verify_policy_match(pkt_count, policy_1)
+
+
+class IPSec4SpdTestCaseMultiple(SpdFastPathInbound):
+ """ IPSec/IPv4 inbound: Policy mode test case with fast path \
+ (multiple interfaces, multiple rules)"""
+
+ def test_ipsec_spd_inbound_multiple(self):
+ # In this test case, packets in IPv4 FWD path are configured to go
+ # through IPSec outbound SPD policy lookup.
+ # Multiples rules on multiple interfaces are tested at the same time.
+ # 3x interfaces are configured, binding the same SPD to each.
+ # Each interface has 2 SPD rules (1 BYPASS and 1 DISCARD).
+ # On pg0 & pg1, the BYPASS rule is HIGH priority
+ # On pg2, the DISCARD rule is HIGH priority
+ # Traffic should be received on pg0 & pg1 and dropped on pg2.
+ self.create_interfaces(3)
+ pkt_count = 5
+ # bind SPD to all interfaces
+ self.spd_create_and_intf_add(1, self.pg_interfaces)
+ # add rules on all interfaces
+ policy_01 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ )
+ policy_02 = self.spd_add_rem_policy( # inbound, priority 5
+ 1,
+ self.pg0,
+ self.pg1,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=5,
+ policy_type="discard",
+ )
+
+ policy_11 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg1,
+ self.pg2,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="bypass",
+ )
+ policy_12 = self.spd_add_rem_policy( # inbound, priority 5
+ 1,
+ self.pg1,
+ self.pg2,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=5,
+ policy_type="discard",
+ )
+
+ policy_21 = self.spd_add_rem_policy( # inbound, priority 5
+ 1,
+ self.pg2,
+ self.pg0,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=5,
+ policy_type="bypass",
+ )
+ policy_22 = self.spd_add_rem_policy( # inbound, priority 10
+ 1,
+ self.pg2,
+ self.pg0,
+ socket.IPPROTO_UDP,
+ is_out=0,
+ priority=10,
+ policy_type="discard",
+ )
+
+ # interfaces bound to an SPD, will by default drop outbound
+ # traffic with no matching policies. add catch-all outbound
+ # bypass rule to SPD:
+ self.spd_add_rem_policy( # outbound, all interfaces
+ 1,
+ None,
+ None,
+ socket.IPPROTO_UDP,
+ is_out=1,
+ priority=10,
+ policy_type="bypass",
+ all_ips=True,
+ )
+
+ # create the packet streams
+ packets0 = self.create_stream(self.pg0, self.pg1, pkt_count)
+ packets1 = self.create_stream(self.pg1, self.pg2, pkt_count)
+ packets2 = self.create_stream(self.pg2, self.pg0, pkt_count)
+ # add the streams to the source interfaces
+ self.pg0.add_stream(packets0)
+ self.pg1.add_stream(packets1)
+ self.pg2.add_stream(packets2)
+ # enable capture on all interfaces
+ for pg in self.pg_interfaces:
+ pg.enable_capture()
+ # start the packet generator
+ self.pg_start()
+
+ # get captures
+ if_caps = []
+ for pg in [self.pg1, self.pg2]: # we are expecting captures on pg1/pg2
+ if_caps.append(pg.get_capture())
+ for packet in if_caps[-1]:
+ try:
+ self.logger.debug(ppp("SPD - Got packet:", packet))
+ except Exception:
+ self.logger.error(ppp("Unexpected or invalid packet:", packet))
+ raise
+ self.logger.debug("SPD: Num packets: %s", len(if_caps[0].res))
+ self.logger.debug("SPD: Num packets: %s", len(if_caps[1].res))
+
+ # verify captures that matched BYPASS rule
+ self.verify_capture(self.pg0, self.pg1, if_caps[0])
+ self.verify_capture(self.pg1, self.pg2, if_caps[1])
+ # verify that traffic to pg0 matched BYPASS rule
+ # although DISCARD rule had higher prioriy and was not dropped
+ self.verify_policy_match(pkt_count, policy_21)
+
+ # verify all packets that were expected to match rules, matched
+ # pg0 -> pg1
+ self.verify_policy_match(pkt_count, policy_01)
+ self.verify_policy_match(0, policy_02)
+ # pg1 -> pg2
+ self.verify_policy_match(pkt_count, policy_11)
+ self.verify_policy_match(0, policy_12)
+ # pg2 -> pg0
+ self.verify_policy_match(0, policy_22)
+
+
+if __name__ == "__main__":
+ unittest.main(testRunner=VppTestRunner)