summaryrefslogtreecommitdiffstats
path: root/src/vnet/ip/ip_in_out_acl.c
diff options
context:
space:
mode:
authorBenoît Ganne <bganne@cisco.com>2022-06-08 10:49:17 +0200
committerDamjan Marion <dmarion@0xa5.net>2022-06-29 21:23:38 +0000
commitb03eec969f3db186fc354c3e885e51c0b24803f0 (patch)
tree54871a6dc00bad0644f75755d616f10ad67de6c2 /src/vnet/ip/ip_in_out_acl.c
parentb0057282d64a4f9ac5966fceb427057b0665b772 (diff)
classify: use 32 bits hash
classify hash used to be stored as u64 in buffer metadata, use 32 bits instead: - on almost all our supported arch (x86 and arm64) we use crc32c intrinsics to compute the final hash: we really get a 32-bits hash - the hash itself is used to compute a 32-bits bucket index by masking upper bits: we always discard the higher 32-bits - this allows to increase the l2 classify buffer metadata padding such as it does not overlap with the ip fib_index metadata anymore. This overlap is an issue when using the 'set metadata' action in the ip ACL node which updates both fields Type: fix Change-Id: I5d35bdae97b96c3cae534e859b63950fb500ff50 Signed-off-by: Benoît Ganne <bganne@cisco.com>
Diffstat (limited to 'src/vnet/ip/ip_in_out_acl.c')
-rw-r--r--src/vnet/ip/ip_in_out_acl.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/src/vnet/ip/ip_in_out_acl.c b/src/vnet/ip/ip_in_out_acl.c
index 9fc07176fa8..d8d6d768e93 100644
--- a/src/vnet/ip/ip_in_out_acl.c
+++ b/src/vnet/ip/ip_in_out_acl.c
@@ -130,7 +130,7 @@ ip_in_out_acl_inline_trace (
u32 sw_if_index[4];
u32 table_index[4];
vnet_classify_table_t *t[4] = { 0, 0 };
- u64 hash[4];
+ u32 hash[4];
/* calculate hashes for b[0] & b[1] */
if (n_left >= 2)
@@ -162,16 +162,16 @@ ip_in_out_acl_inline_trace (
if (is_output)
{
/* Save the rewrite length, since we are using the l2_classify struct */
- vnet_buffer (b[0])->l2_classify.pad.l2_len =
+ vnet_buffer (b[0])->l2.l2_len =
vnet_buffer (b[0])->ip.save_rewrite_length;
/* advance the match pointer so the matching happens on IP header */
- h[2] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
+ h[2] += vnet_buffer (b[0])->l2.l2_len;
/* Save the rewrite length, since we are using the l2_classify struct */
- vnet_buffer (b[1])->l2_classify.pad.l2_len =
+ vnet_buffer (b[1])->l2.l2_len =
vnet_buffer (b[1])->ip.save_rewrite_length;
/* advance the match pointer so the matching happens on IP header */
- h[3] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
+ h[3] += vnet_buffer (b[1])->l2.l2_len;
}
hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
@@ -252,16 +252,16 @@ ip_in_out_acl_inline_trace (
if (is_output)
{
/* Save the rewrite length, since we are using the l2_classify struct */
- vnet_buffer (b[2])->l2_classify.pad.l2_len =
+ vnet_buffer (b[2])->l2.l2_len =
vnet_buffer (b[2])->ip.save_rewrite_length;
/* advance the match pointer so the matching happens on IP header */
- h[2] += vnet_buffer (b[2])->l2_classify.pad.l2_len;
+ h[2] += vnet_buffer (b[2])->l2.l2_len;
/* Save the rewrite length, since we are using the l2_classify struct */
- vnet_buffer (b[3])->l2_classify.pad.l2_len =
+ vnet_buffer (b[3])->l2.l2_len =
vnet_buffer (b[3])->ip.save_rewrite_length;
/* advance the match pointer so the matching happens on IP header */
- h[3] += vnet_buffer (b[3])->l2_classify.pad.l2_len;
+ h[3] += vnet_buffer (b[3])->l2.l2_len;
}
hash[2] = vnet_classify_hash_packet_inline (t[2], (u8 *) h[2]);
@@ -351,7 +351,7 @@ ip_in_out_acl_inline_trace (
/* advance the match pointer so the matching happens on IP header */
if (is_output)
- h[0] += vnet_buffer (b[0])->l2_classify.pad.l2_len;
+ h[0] += vnet_buffer (b[0])->l2.l2_len;
hash[0] =
vnet_classify_hash_packet_inline (t[0], (u8 *) h[0]);
@@ -458,7 +458,7 @@ ip_in_out_acl_inline_trace (
/* advance the match pointer so the matching happens on IP header */
if (is_output)
- h[1] += vnet_buffer (b[1])->l2_classify.pad.l2_len;
+ h[1] += vnet_buffer (b[1])->l2.l2_len;
hash[1] =
vnet_classify_hash_packet_inline (t[1], (u8 *) h[1]);
@@ -557,7 +557,7 @@ ip_in_out_acl_inline_trace (
vnet_classify_table_t *t0 = 0;
vnet_classify_entry_t *e0 = 0;
u32 next0 = ACL_NEXT_INDEX_DENY;
- u64 hash0;
+ u32 hash0;
sw_if_index0 = ~0 == way ? 0 : vnet_buffer (b[0])->sw_if_index[way];
table_index0 = table_index_by_sw_if_index[sw_if_index0];
@@ -573,10 +573,10 @@ ip_in_out_acl_inline_trace (
if (is_output)
{
/* Save the rewrite length, since we are using the l2_classify struct */
- vnet_buffer (b[0])->l2_classify.pad.l2_len =
+ vnet_buffer (b[0])->l2.l2_len =
vnet_buffer (b[0])->ip.save_rewrite_length;
/* advance the match pointer so the matching happens on IP header */
- h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
+ h0 += vnet_buffer (b[0])->l2.l2_len;
}
vnet_buffer (b[0])->l2_classify.hash =
@@ -602,7 +602,7 @@ ip_in_out_acl_inline_trace (
/* advance the match pointer so the matching happens on IP header */
if (is_output)
- h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
+ h0 += vnet_buffer (b[0])->l2.l2_len;
e0 = vnet_classify_find_entry_inline (t0, (u8 *) h0, hash0, now);
if (e0)
@@ -660,7 +660,7 @@ ip_in_out_acl_inline_trace (
/* advance the match pointer so the matching happens on IP header */
if (is_output)
- h0 += vnet_buffer (b[0])->l2_classify.pad.l2_len;
+ h0 += vnet_buffer (b[0])->l2.l2_len;
hash0 = vnet_classify_hash_packet_inline (t0, (u8 *) h0);
e0 = vnet_classify_find_entry_inline