diff options
author | Piotr Bronowski <piotrx.bronowski@intel.com> | 2022-05-10 13:18:22 +0000 |
---|---|---|
committer | Piotr Bronowski <piotrx.bronowski@intel.com> | 2022-06-29 09:05:51 +0000 |
commit | 0464310fd3d4234e5b0aaf730360a1db2b5f7384 (patch) | |
tree | 312e4a3df3bc57408cc10c52e565d8c46a2e77c8 /src | |
parent | fc20c8e50f2784ad62b97bdb0094605d2b86f596 (diff) |
ipsec: add/delete ipsec fast path policy
This patch introduces functions to add and delete fast path
policies.
Type: feature
Signed-off-by: Piotr Bronowski <piotrx.bronowski@intel.com>
Change-Id: I3f1f1323148080c9dac531fbe9fa33bad4efe814
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/ipsec/ipsec.c | 3 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 4 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd.c | 17 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd.h | 22 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd_policy.c | 652 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec_spd_policy.h | 78 |
6 files changed, 717 insertions, 59 deletions
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 62ab23976a8..e181e0f3229 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -481,6 +481,9 @@ ipsec_init (vlib_main_t * vm) if ((error = vlib_call_init_function (vm, ipsec_cli_init))) return error; + im->fp_spd_is_enabled = 0; + im->fp_lookup_hash_buckets = IPSEC_FP_HASH_LOOKUP_HASH_BUCKETS; + vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1); a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE; diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 58b0ffc93f9..c4f5326c962 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -144,6 +144,10 @@ typedef struct /* pool of policies */ ipsec_policy_t *policies; + u32 fp_spd_is_enabled; + ipsec_fp_mask_type_entry_t *fp_mask_types; + u32 fp_lookup_hash_buckets; /* number of buckets should be power of two */ + /* hash tables of UDP port registrations */ uword *udp_port_registrations; diff --git a/src/vnet/ipsec/ipsec_spd.c b/src/vnet/ipsec/ipsec_spd.c index 4e8017c35ff..aecb50dd346 100644 --- a/src/vnet/ipsec/ipsec_spd.c +++ b/src/vnet/ipsec/ipsec_spd.c @@ -46,7 +46,13 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add) #define _(s,v) vec_free(spd->policies[IPSEC_SPD_POLICY_##s]); foreach_ipsec_spd_policy_type #undef _ - pool_put (im->spds, spd); + if (im->fp_spd_is_enabled) + { + ipsec_spd_fp_t *fp_spd = &spd->fp_spd; + + clib_bihash_free_16_8 (&fp_spd->fp_ip4_lookup_hash); + } + pool_put (im->spds, spd); } else /* create new SPD */ { @@ -55,6 +61,15 @@ ipsec_add_del_spd (vlib_main_t * vm, u32 spd_id, int is_add) spd_index = spd - im->spds; spd->id = spd_id; hash_set (im->spd_index_by_spd_id, spd_id, spd_index); + if (im->fp_spd_is_enabled) + { + ipsec_spd_fp_t *fp_spd = &spd->fp_spd; + + clib_bihash_init_16_8 ( + &fp_spd->fp_ip4_lookup_hash, "SPD_FP ip4 rules lookup bihash", + im->fp_lookup_hash_buckets, + im->fp_lookup_hash_buckets * IPSEC_FP_IP4_HASH_MEM_PER_BUCKET); + } } return 0; } diff --git a/src/vnet/ipsec/ipsec_spd.h b/src/vnet/ipsec/ipsec_spd.h index 757a1b72d51..887ae99c101 100644 --- a/src/vnet/ipsec/ipsec_spd.h +++ b/src/vnet/ipsec/ipsec_spd.h @@ -15,6 +15,8 @@ #ifndef __IPSEC_SPD_H__ #define __IPSEC_SPD_H__ +#include <vppinfra/bihash_40_8.h> +#include <vppinfra/bihash_16_8.h> #include <vlib/vlib.h> #define foreach_ipsec_spd_policy_type \ @@ -41,7 +43,23 @@ typedef enum ipsec_spd_policy_t_ extern u8 *format_ipsec_policy_type (u8 * s, va_list * args); /** - * @brief A Secruity Policy Database + * @brief A fast path Security Policy Database + */ +typedef struct +{ + /** vectors for each of the policy types */ + u32 *fp_policies[IPSEC_SPD_POLICY_N_TYPES]; + u32 *fp_mask_types[IPSEC_SPD_POLICY_N_TYPES]; + + clib_bihash_40_8_t fp_ip6_lookup_hash; /* spd fp ip6 lookup hash table. */ + clib_bihash_16_8_t fp_ip4_lookup_hash; /* spd fp ip4 lookup hash table. */ + + u8 fp_ip6_lookup_hash_initialized; + +} ipsec_spd_fp_t; + +/** + * @brief A Security Policy Database */ typedef struct { @@ -49,6 +67,8 @@ typedef struct u32 id; /** vectors for each of the policy types */ u32 *policies[IPSEC_SPD_POLICY_N_TYPES]; + /* TODO remove fp_spd. Use directly ipsec_spd_t for fast path */ + ipsec_spd_fp_t fp_spd; } ipsec_spd_t; /** diff --git a/src/vnet/ipsec/ipsec_spd_policy.c b/src/vnet/ipsec/ipsec_spd_policy.c index 72da408c161..8cdbe3257d7 100644 --- a/src/vnet/ipsec/ipsec_spd_policy.c +++ b/src/vnet/ipsec/ipsec_spd_policy.c @@ -25,62 +25,6 @@ vlib_combined_counter_main_t ipsec_spd_policy_counters = { }; static int -ipsec_policy_is_equal (ipsec_policy_t * p1, ipsec_policy_t * p2) -{ - if (p1->priority != p2->priority) - return 0; - if (p1->type != p2->type) - return (0); - if (p1->policy != p2->policy) - return (0); - if (p1->sa_id != p2->sa_id) - return (0); - if (p1->protocol != p2->protocol) - return (0); - if (p1->lport.start != p2->lport.start) - return (0); - if (p1->lport.stop != p2->lport.stop) - return (0); - if (p1->rport.start != p2->rport.start) - return (0); - if (p1->rport.stop != p2->rport.stop) - return (0); - if (p1->is_ipv6 != p2->is_ipv6) - return (0); - if (p2->is_ipv6) - { - if (p1->laddr.start.ip6.as_u64[0] != p2->laddr.start.ip6.as_u64[0]) - return (0); - if (p1->laddr.start.ip6.as_u64[1] != p2->laddr.start.ip6.as_u64[1]) - return (0); - if (p1->laddr.stop.ip6.as_u64[0] != p2->laddr.stop.ip6.as_u64[0]) - return (0); - if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1]) - return (0); - if (p1->raddr.start.ip6.as_u64[0] != p2->raddr.start.ip6.as_u64[0]) - return (0); - if (p1->raddr.start.ip6.as_u64[1] != p2->raddr.start.ip6.as_u64[1]) - return (0); - if (p1->raddr.stop.ip6.as_u64[0] != p2->raddr.stop.ip6.as_u64[0]) - return (0); - if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1]) - return (0); - } - else - { - if (p1->laddr.start.ip4.as_u32 != p2->laddr.start.ip4.as_u32) - return (0); - if (p1->laddr.stop.ip4.as_u32 != p2->laddr.stop.ip4.as_u32) - return (0); - if (p1->raddr.start.ip4.as_u32 != p2->raddr.start.ip4.as_u32) - return (0); - if (p1->raddr.stop.ip4.as_u32 != p2->raddr.stop.ip4.as_u32) - return (0); - } - return (1); -} - -static int ipsec_spd_entry_sort (void *a1, void *a2) { ipsec_main_t *im = &ipsec_main; @@ -219,6 +163,15 @@ ipsec_add_del_policy (vlib_main_t * vm, else policy->sa_index = INDEX_INVALID; + /** + * Try adding the policy into fast path SPD first. Only adding to + * traditional SPD when failed. + **/ + if (im->fp_spd_is_enabled && + (policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)) + return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1, + stat_index); + pool_get (im->policies, vp); clib_memcpy (vp, policy, sizeof (*vp)); policy_index = vp - im->policies; @@ -226,7 +179,6 @@ ipsec_add_del_policy (vlib_main_t * vm, vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index); vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index); - vec_add1 (spd->policies[policy->type], policy_index); vec_sort_with_function (spd->policies[policy->type], ipsec_spd_entry_sort); @@ -236,6 +188,19 @@ ipsec_add_del_policy (vlib_main_t * vm, { u32 ii; + /** + * Try to delete the policy from the fast path SPD first. Delete from + * traditional SPD when fp delete fails. + **/ + /** + * TODO: add ipv6 fast path support for outbound and + * ipv4/v6 inbound support for fast path + */ + if (im->fp_spd_is_enabled && + (policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)) + return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0, + stat_index); + vec_foreach_index (ii, (spd->policies[policy->type])) { vp = pool_elt_at_index (im->policies, @@ -253,6 +218,579 @@ ipsec_add_del_policy (vlib_main_t * vm, return 0; } +static_always_inline void +release_mask_type_index (ipsec_main_t *im, u32 mask_type_index) +{ + ipsec_fp_mask_type_entry_t *mte = + pool_elt_at_index (im->fp_mask_types, mask_type_index); + mte->refcount--; + if (mte->refcount == 0) + { + /* this entry is not in use anymore */ + ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK); + pool_put (im->fp_mask_types, mte); + } +} + +static_always_inline u32 +find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask) +{ + ipsec_fp_mask_type_entry_t *mte; + + pool_foreach (mte, im->fp_mask_types) + { + if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0) + return (mte - im->fp_mask_types); + } + + return ~0; +} + +static_always_inline void +fill_ip6_hash_policy_kv (ipsec_main_t *im, ipsec_fp_5tuple_t *match, + ipsec_fp_5tuple_t *mask, clib_bihash_kv_40_8_t *kv) +{ + ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value; + u64 *pmatch = (u64 *) &match; + u64 *pmask = (u64 *) &mask; + u64 *pkey = (u64 *) &kv->key; + + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + + kv_val->as_u64 = 0; +} + +static_always_inline void +fill_ip4_hash_policy_kv (ipsec_main_t *im, ipsec_fp_5tuple_t *match, + ipsec_fp_5tuple_t *mask, clib_bihash_kv_16_8_t *kv) +{ + ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value; + u64 *pmatch = (u64 *) &match->laddr; + u64 *pmask = (u64 *) &mask->laddr; + u64 *pkey = (u64 *) kv->key; + + *pkey++ = *pmatch++ & *pmask++; + *pkey++ = *pmatch++ & *pmask++; + + kv_val->as_u64 = 0; +} + +static_always_inline u16 +get_highest_set_bit_u16 (u16 x) +{ + x |= x >> 8; + x |= x >> 4; + x |= x >> 2; + x |= x >> 1; + return x ^= x >> 1; +} + +static_always_inline u32 +get_highest_set_bit_u32 (u32 x) +{ + x |= x >> 16; + x |= x >> 8; + x |= x >> 4; + x |= x >> 2; + x |= x >> 1; + return x ^= x >> 1; +} + +static_always_inline void +ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask) +{ + u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4; + u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4; + u32 *plmask = (u32 *) &mask->laddr; + u32 *praddr_start = (u32 *) &policy->raddr.start.ip4; + u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4; + u32 *prmask = (u32 *) &mask->raddr; + + memset (mask, 0, sizeof (mask->l3_zero_pad)); + memset (plmask, 1, sizeof (*mask) - sizeof (mask->l3_zero_pad)); + /* find bits where start != stop */ + *plmask = *pladdr_start ^ *pladdr_stop; + *prmask = *praddr_start ^ *praddr_stop; + /* Find most significant bit set (that is the first position + * start differs from stop). Mask out everything after that bit and + * the bit itself. Remember that policy stores start and stop in the net + * order. + */ + *plmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)); + *plmask = clib_host_to_net_u32 (~(*plmask - 1) & (~*plmask)); + + *prmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)); + *prmask = clib_host_to_net_u32 (~(*prmask - 1) & (~*prmask)); + + if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) || + (policy->protocol == IP_PROTOCOL_UDP) || + (policy->protocol == IP_PROTOCOL_SCTP))) + { + mask->lport = policy->lport.start ^ policy->lport.stop; + mask->rport = policy->rport.start ^ policy->rport.stop; + + mask->lport = get_highest_set_bit_u16 (mask->lport); + mask->lport = ~(mask->lport - 1) & (~mask->lport); + + mask->rport = get_highest_set_bit_u16 (mask->rport); + mask->rport = ~(mask->rport - 1) & (~mask->rport); + } + else + { + mask->lport = 0; + mask->rport = 0; + } + + mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0; +} + +static_always_inline int +ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask) +{ + u64 *pladdr_start = (u64 *) &policy->laddr.start; + u64 *pladdr_stop = (u64 *) &policy->laddr.stop; + u64 *plmask = (u64 *) &mask->laddr; + u64 *praddr_start = (u64 *) &policy->raddr.start; + u64 *praddr_stop = (u64 *) &policy->raddr.stop; + u64 *prmask = (u64 *) &mask->ip6_raddr; + u16 *plport_start = (u16 *) &policy->lport.start; + u16 *plport_stop = (u16 *) &policy->lport.stop; + u16 *prport_start = (u16 *) &policy->rport.start; + u16 *prport_stop = (u16 *) &policy->rport.stop; + + /* test if x is not power of 2. The test form is !((x & (x - 1)) == 0) */ + if (((*pladdr_stop - *pladdr_start + 1) & (*pladdr_stop - *pladdr_start)) && + (((*(pladdr_stop + 1) - *(pladdr_start + 1)) + 1) & + (*(pladdr_stop + 1) - *(pladdr_start + 1)))) + return -1; + + if (((*praddr_stop - *praddr_start + 1) & (*praddr_stop - *praddr_start)) && + (((*(praddr_stop + 1) - *(praddr_start + 1)) + 1) & + (*(praddr_stop + 1) - *(praddr_start + 1)))) + return -1; + + if (((*plport_stop - *plport_start + 1) & (*plport_stop - *plport_start))) + return -1; + + if (((*prport_stop - *prport_start + 1) & (*prport_stop - *prport_start))) + return -1; + + memset (mask, 1, sizeof (ipsec_fp_5tuple_t)); + + *plmask++ = ~(*pladdr_start++ ^ *pladdr_stop++); + *plmask++ = ~(*pladdr_start++ ^ *pladdr_stop++); + + *prmask++ = ~(*praddr_start++ ^ *praddr_stop++); + *prmask++ = ~(*praddr_start++ ^ *praddr_stop++); + + mask->lport = ~(policy->lport.start ^ policy->lport.stop); + mask->rport = ~(policy->rport.start ^ policy->rport.stop); + mask->protocol = 0; + return 0; +} + +static_always_inline void +ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple) +{ + memset (tuple, 0, sizeof (*tuple)); + tuple->is_ipv6 = policy->is_ipv6; + if (tuple->is_ipv6) + { + tuple->ip6_laddr = policy->laddr.start.ip6; + tuple->ip6_raddr = policy->raddr.start.ip6; + } + else + { + tuple->laddr = policy->laddr.start.ip4; + tuple->raddr = policy->raddr.start.ip4; + } + + tuple->protocol = policy->protocol; + + tuple->lport = policy->lport.start; + tuple->rport = policy->rport.start; +} + +int +ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd, + ipsec_policy_t *policy, u32 *stat_index) +{ + u32 mask_index; + ipsec_policy_t *vp; + ipsec_fp_mask_type_entry_t *mte; + u32 policy_index; + clib_bihash_kv_16_8_t kv; + clib_bihash_kv_16_8_t result; + ipsec_fp_lookup_value_t *result_val = + (ipsec_fp_lookup_value_t *) &result.value; + ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value; + + ipsec_fp_5tuple_t mask, policy_5tuple; + int res; + + ipsec_fp_ip4_get_policy_mask (policy, &mask); + pool_get (im->policies, vp); + policy_index = vp - im->policies; + vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index); + vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index); + *stat_index = policy_index; + mask_index = find_mask_type_index (im, &mask); + + if (mask_index == ~0) + { + /* mask type not found, we need to create a new entry */ + pool_get (im->fp_mask_types, mte); + mask_index = mte - im->fp_mask_types; + mte->refcount = 0; + } + else + mte = im->fp_mask_types + mask_index; + + policy->fp_mask_type_id = mask_index; + ipsec_fp_get_policy_5tuple (policy, &policy_5tuple); + + fill_ip4_hash_policy_kv (im, &policy_5tuple, &mask, &kv); + + res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, + &result); + if (res != 0) + { + /* key was not found crate a new entry */ + vec_add1 (key_val->fp_policies_ids, policy_index); + res = clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, 1); + if (res != 0) + goto error; + } + else + { + + if (vec_max_len (result_val->fp_policies_ids) != + vec_len (result_val->fp_policies_ids)) + { + /* no need to resize */ + vec_add1 (result_val->fp_policies_ids, policy_index); + } + else + { + vec_add1 (result_val->fp_policies_ids, policy_index); + + res = + clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &result, 1); + + if (res != 0) + goto error; + } + } + + if (mte->refcount == 0) + { + clib_memcpy (&mte->mask, &mask, sizeof (mask)); + mte->refcount = 0; + vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index); + } + + mte->refcount++; + vec_add1 (fp_spd->fp_policies[policy->type], policy_index); + clib_memcpy (vp, policy, sizeof (*vp)); + + return 0; + +error: + pool_put (im->policies, vp); + release_mask_type_index (im, mask_index); + return -1; +} + +int +ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd, + ipsec_policy_t *policy, u32 *stat_index) +{ + + u32 mask_index; + ipsec_policy_t *vp; + ipsec_fp_mask_type_entry_t *mte; + u32 policy_index; + clib_bihash_kv_40_8_t kv; + clib_bihash_kv_40_8_t result; + ipsec_fp_lookup_value_t *result_val = + (ipsec_fp_lookup_value_t *) &result.value; + ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value; + + ipsec_fp_5tuple_t mask, policy_5tuple; + int res; + /* u64 hash; */ + + if (PREDICT_FALSE (!fp_spd->fp_ip6_lookup_hash_initialized)) + { + clib_bihash_init_40_8 ( + &fp_spd->fp_ip6_lookup_hash, "SPD_FP ip6 rules lookup bihash", + im->fp_lookup_hash_buckets, + im->fp_lookup_hash_buckets * IPSEC_FP_IP6_HASH_MEM_PER_BUCKET); + fp_spd->fp_ip6_lookup_hash_initialized = 1; + } + + if (ipsec_fp_ip6_get_policy_mask (policy, &mask) != 0) + return -1; + + pool_get (im->policies, vp); + policy_index = vp - im->policies; + vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index); + vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index); + *stat_index = policy_index; + mask_index = find_mask_type_index (im, &mask); + + if (mask_index == ~0) + { + /* mask type not found, we need to create a new entry */ + pool_get (im->fp_mask_types, mte); + mask_index = mte - im->fp_mask_types; + mte->refcount = 0; + } + else + mte = im->fp_mask_types + mask_index; + + policy->fp_mask_type_id = mask_index; + ipsec_fp_ip6_get_policy_mask (policy, &mask); + ipsec_fp_get_policy_5tuple (policy, &policy_5tuple); + + fill_ip6_hash_policy_kv (im, &policy_5tuple, &mask, &kv); + + res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, + &result); + if (res != 0) + { + /* key was not found crate a new entry */ + vec_add1 (key_val->fp_policies_ids, policy_index); + res = clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, 1); + if (res != 0) + goto error; + } + else + { + + if (vec_max_len (result_val->fp_policies_ids) != + vec_len (result_val->fp_policies_ids)) + { + /* no need to resize */ + vec_add1 (result_val->fp_policies_ids, policy_index); + } + else + { + vec_add1 (result_val->fp_policies_ids, policy_index); + + res = + clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &result, 1); + + if (res != 0) + goto error; + } + } + + if (mte->refcount == 0) + { + clib_memcpy (&mte->mask, &mask, sizeof (mask)); + mte->refcount = 0; + vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index); + } + + mte->refcount++; + vec_add1 (fp_spd->fp_policies[policy->type], policy_index); + clib_memcpy (vp, policy, sizeof (*vp)); + + return 0; + +error: + pool_put (im->policies, vp); + release_mask_type_index (im, mask_index); + return -1; +} + +int +ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd, + ipsec_policy_t *policy) +{ + int res; + ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple; + clib_bihash_kv_40_8_t kv; + clib_bihash_kv_40_8_t result; + ipsec_fp_lookup_value_t *result_val = + (ipsec_fp_lookup_value_t *) &result.value; + + ipsec_policy_t *vp; + u32 ii, iii, imt; + + ipsec_fp_ip6_get_policy_mask (policy, &mask); + ipsec_fp_get_policy_5tuple (policy, &policy_5tuple); + fill_ip6_hash_policy_kv (im, &policy_5tuple, &mask, &kv); + res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, + &result); + if (res != 0) + return -1; + + res = -1; + vec_foreach_index (ii, result_val->fp_policies_ids) + { + vp = + pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii)); + if (ipsec_policy_is_equal (vp, policy)) + { + vec_foreach_index (iii, fp_spd->fp_policies[policy->type]) + { + if (*(fp_spd->fp_policies[policy->type] + iii) == + *(result_val->fp_policies_ids + ii)) + { + if (vec_len (result_val->fp_policies_ids) == 1) + { + vec_free (result_val->fp_policies_ids); + clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, + &result, 0); + } + else + { + vec_del1 (result_val->fp_policies_ids, ii); + } + vec_del1 (fp_spd->fp_policies[policy->type], iii); + + vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type]) + { + if (*(fp_spd->fp_mask_types[policy->type] + imt) == + vp->fp_mask_type_id) + { + ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index ( + im->fp_mask_types, vp->fp_mask_type_id); + + if (mte->refcount == 1) + vec_del1 (fp_spd->fp_mask_types[policy->type], + imt); + break; + } + } + + res = 0; + break; + } + } + + if (res != 0) + continue; + else + { + release_mask_type_index (im, vp->fp_mask_type_id); + ipsec_sa_unlock (vp->sa_index); + pool_put (im->policies, vp); + return 0; + } + } + } + return -1; +} + +int +ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd, + ipsec_policy_t *policy) +{ + int res; + ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple; + clib_bihash_kv_16_8_t kv; + clib_bihash_kv_16_8_t result; + ipsec_fp_lookup_value_t *result_val = + (ipsec_fp_lookup_value_t *) &result.value; + + ipsec_policy_t *vp; + u32 ii, iii, imt; + + ipsec_fp_ip4_get_policy_mask (policy, &mask); + ipsec_fp_get_policy_5tuple (policy, &policy_5tuple); + fill_ip4_hash_policy_kv (im, &policy_5tuple, &mask, &kv); + res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, + &result); + if (res != 0) + return -1; + + res = -1; + vec_foreach_index (ii, result_val->fp_policies_ids) + { + vp = + pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii)); + if (ipsec_policy_is_equal (vp, policy)) + { + vec_foreach_index (iii, fp_spd->fp_policies[policy->type]) + { + if (*(fp_spd->fp_policies[policy->type] + iii) == + *(result_val->fp_policies_ids + ii)) + { + if (vec_len (result_val->fp_policies_ids) == 1) + { + vec_free (result_val->fp_policies_ids); + clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, + &result, 0); + } + else + { + vec_del1 (result_val->fp_policies_ids, ii); + } + vec_del1 (fp_spd->fp_policies[policy->type], iii); + + vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type]) + { + if (*(fp_spd->fp_mask_types[policy->type] + imt) == + vp->fp_mask_type_id) + { + ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index ( + im->fp_mask_types, vp->fp_mask_type_id); + + if (mte->refcount == 1) + vec_del1 (fp_spd->fp_mask_types[policy->type], + imt); + break; + } + } + + res = 0; + break; + } + } + + if (res != 0) + continue; + else + { + release_mask_type_index (im, vp->fp_mask_type_id); + ipsec_sa_unlock (vp->sa_index); + pool_put (im->policies, vp); + return 0; + } + } + } + return -1; +} + +int +ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add, + u32 *stat_index) +{ + ipsec_main_t *im = &ipsec_main; + + if (is_add) + if (policy->is_ipv6) + return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy, + stat_index); + else + return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy, + stat_index); + + else if (policy->is_ipv6) + + return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy); + else + return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy); +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/ipsec/ipsec_spd_policy.h b/src/vnet/ipsec/ipsec_spd_policy.h index 8b78939cafa..24c0e4cefb1 100644 --- a/src/vnet/ipsec/ipsec_spd_policy.h +++ b/src/vnet/ipsec/ipsec_spd_policy.h @@ -25,6 +25,21 @@ #define IPSEC_POLICY_PROTOCOL_ANY IP_PROTOCOL_RESERVED +/** + * This number is calculated as ceil power of 2 for the number + * sizeof(clib_bihash_kv_16_8_t)=24 * BIHASH_KVP_PER_PAGE=4 * COLLISIONS_NO=8 + * + */ + +#define IPSEC_FP_IP4_HASH_MEM_PER_BUCKET 1024 + +/** + * This number is calculated as ceil power of 2 for the number + * sizeof(clib_bihash_kv_40_8_t)=48 * BIHASH_KVP_PER_PAGE=4 * COLLISIONS_NO=8 + * + */ +#define IPSEC_FP_IP6_HASH_MEM_PER_BUCKET 2048 + #define foreach_ipsec_policy_action \ _ (0, BYPASS, "bypass") \ _ (1, DISCARD, "discard") \ @@ -79,6 +94,7 @@ typedef struct ipsec_policy_t_ ipsec_policy_action_t policy; u32 sa_id; u32 sa_index; + u32 fp_mask_type_id; } ipsec_policy_t; /** @@ -156,6 +172,68 @@ typedef union }; } ipsec_fp_lookup_value_t; +/** + * @brief add or delete a fast path policy + */ +int ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add, + u32 *stat_index); + +static_always_inline int +ipsec_policy_is_equal (ipsec_policy_t *p1, ipsec_policy_t *p2) +{ + if (p1->priority != p2->priority) + return 0; + if (p1->type != p2->type) + return (0); + if (p1->policy != p2->policy) + return (0); + if (p1->sa_id != p2->sa_id) + return (0); + if (p1->protocol != p2->protocol) + return (0); + if (p1->lport.start != p2->lport.start) + return (0); + if (p1->lport.stop != p2->lport.stop) + return (0); + if (p1->rport.start != p2->rport.start) + return (0); + if (p1->rport.stop != p2->rport.stop) + return (0); + if (p1->is_ipv6 != p2->is_ipv6) + return (0); + if (p2->is_ipv6) + { + if (p1->laddr.start.ip6.as_u64[0] != p2->laddr.start.ip6.as_u64[0]) + return (0); + if (p1->laddr.start.ip6.as_u64[1] != p2->laddr.start.ip6.as_u64[1]) + return (0); + if (p1->laddr.stop.ip6.as_u64[0] != p2->laddr.stop.ip6.as_u64[0]) + return (0); + if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1]) + return (0); + if (p1->raddr.start.ip6.as_u64[0] != p2->raddr.start.ip6.as_u64[0]) + return (0); + if (p1->raddr.start.ip6.as_u64[1] != p2->raddr.start.ip6.as_u64[1]) + return (0); + if (p1->raddr.stop.ip6.as_u64[0] != p2->raddr.stop.ip6.as_u64[0]) + return (0); + if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1]) + return (0); + } + else + { + if (p1->laddr.start.ip4.as_u32 != p2->laddr.start.ip4.as_u32) + return (0); + if (p1->laddr.stop.ip4.as_u32 != p2->laddr.stop.ip4.as_u32) + return (0); + if (p1->raddr.start.ip4.as_u32 != p2->raddr.start.ip4.as_u32) + return (0); + if (p1->raddr.stop.ip4.as_u32 != p2->raddr.stop.ip4.as_u32) + return (0); + } + return (1); +} + #endif /* __IPSEC_SPD_POLICY_H__ */ /* |