aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/classify
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2021-09-20 17:51:31 +0200
committerAndrew Yourtchenko <ayourtch@gmail.com>2021-09-22 08:30:01 +0000
commitc3b62d1d132453390644171673ffbcd775d19850 (patch)
treef6a85ed4ee073dbdc7559edc887624c91db53ab0 /src/vnet/classify
parentc31a234d5cadd160fc0ce027774d1437968c2c9d (diff)
classify: improve vnet_classify_hash
Type: improvement Change-Id: I8b129b71f91b12ab2d8b05fe3891a7ab8926f072 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet/classify')
-rw-r--r--src/vnet/classify/vnet_classify.h50
1 files changed, 23 insertions, 27 deletions
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h
index f635ecc49dc..baeaeaf18e9 100644
--- a/src/vnet/classify/vnet_classify.h
+++ b/src/vnet/classify/vnet_classify.h
@@ -250,60 +250,56 @@ vnet_classify_table_get (u32 table_index)
static inline u64
vnet_classify_hash_packet_inline (vnet_classify_table_t *t, const u8 *h)
{
- u32x4 *mask;
+ u64 xor_sum;
+ ASSERT (t);
+ h += t->skip_n_vectors * 16;
- union
- {
- u32x4 as_u32x4;
- u64 as_u64[2];
- } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
+#if defined(CLIB_HAVE_VEC128)
+ u64x2 *mask = (u64x2 *) t->mask;
+ u64x2u *data = (u64x2u *) h;
+ u64x2 xor_sum_x2;
+
+ xor_sum_x2 = data[0] & mask[0];
- ASSERT (t);
- mask = t->mask;
-#ifdef CLIB_HAVE_VEC128
- u32x4u *data = (u32x4u *) h;
- xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
switch (t->match_n_vectors)
{
case 5:
- xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
+ xor_sum_x2 ^= data[4] & mask[4];
/* FALLTHROUGH */
case 4:
- xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
+ xor_sum_x2 ^= data[3] & mask[3];
/* FALLTHROUGH */
case 3:
- xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
+ xor_sum_x2 ^= data[2] & mask[2];
/* FALLTHROUGH */
case 2:
- xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
+ xor_sum_x2 ^= data[1] & mask[1];
/* FALLTHROUGH */
case 1:
break;
default:
abort ();
}
+ xor_sum = xor_sum_x2[0] ^ xor_sum_x2[1];
#else
- u32 skip_u64 = t->skip_n_vectors * 2;
- u64 *data64 = (u64 *) h;
- xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *) mask)[0];
- xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *) mask)[1];
+ u64 *data = (u64 *) h;
+ u64 *mask = (u64 *) t->mask;
+
+ xor_sum = (data[0] & mask[0]) ^ (data[1] & mask[1]);
+
switch (t->match_n_vectors)
{
case 5:
- xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *) mask)[8];
- xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *) mask)[9];
+ xor_sum ^= (data[8] & mask[8]) ^ (data[9] & mask[9]);
/* FALLTHROUGH */
case 4:
- xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *) mask)[6];
- xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *) mask)[7];
+ xor_sum ^= (data[6] & mask[6]) ^ (data[7] & mask[7]);
/* FALLTHROUGH */
case 3:
- xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *) mask)[4];
- xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *) mask)[5];
+ xor_sum ^= (data[4] & mask[4]) ^ (data[5] & mask[5]);
/* FALLTHROUGH */
case 2:
- xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *) mask)[2];
- xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *) mask)[3];
+ xor_sum ^= (data[2] & mask[2]) ^ (data[3] & mask[3]);
/* FALLTHROUGH */
case 1:
break;