aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/classify
diff options
context:
space:
mode:
authorAdrian Oanca <adrian.oanca@enea.com>2018-02-23 16:27:41 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2018-02-26 22:28:50 +0000
commit22ac59b59157af057d5e1cade2f888341b92d6c0 (patch)
treeffd5cc6ab195833b166c8578f4d0bee341f025fa /src/vnet/classify
parent200fa32213fce6824bcb75c907989ef8daba4a29 (diff)
Added u8x16,u32x4,u64x2 variants of _zero_byte_mask(x) for ARM/NEON platform. VPP-1129
Change-Id: I954acb56d901e42976e71534317f38d7c4359bcf Signed-off-by: Adrian Oanca <adrian.oanca@enea.com>
Diffstat (limited to 'src/vnet/classify')
-rw-r--r--src/vnet/classify/vnet_classify.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h
index 6cbbf10aa2e..791b8fd18b7 100644
--- a/src/vnet/classify/vnet_classify.h
+++ b/src/vnet/classify/vnet_classify.h
@@ -41,10 +41,6 @@ extern vlib_node_registration_t ip6_classify_node;
#define CLASSIFY_TRACE 0
-#ifdef CLIB_HAVE_VEC128
-#define CLASSIFY_USE_SSE //Allow usage of SSE operations
-#endif
-
#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
/*
@@ -233,7 +229,7 @@ vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
ASSERT (t);
mask = t->mask;
-#ifdef CLASSIFY_USE_SSE
+#ifdef CLIB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{ //SSE can't handle unaligned data
u32x4 *data = (u32x4 *) h;
@@ -259,7 +255,7 @@ vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
}
}
else
-#endif /* CLASSIFY_USE_SSE */
+#endif /* CLIB_HAVE_VEC128 */
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
@@ -407,7 +403,7 @@ vnet_classify_find_entry_inline (vnet_classify_table_t * t,
v = vnet_classify_entry_at_index (t, v, value_index);
-#ifdef CLASSIFY_USE_SSE
+#ifdef CLIB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{
u32x4 *data = (u32x4 *) h;
@@ -452,7 +448,7 @@ vnet_classify_find_entry_inline (vnet_classify_table_t * t,
}
}
else
-#endif /* CLASSIFY_USE_SSE */
+#endif /* CLIB_HAVE_VEC128 */
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;