summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/vnet/classify/vnet_classify.h12
-rw-r--r--src/vppinfra/vector_neon.h20
2 files changed, 24 insertions, 8 deletions
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h
index 6cbbf10aa2e..791b8fd18b7 100644
--- a/src/vnet/classify/vnet_classify.h
+++ b/src/vnet/classify/vnet_classify.h
@@ -41,10 +41,6 @@ extern vlib_node_registration_t ip6_classify_node;
#define CLASSIFY_TRACE 0
-#ifdef CLIB_HAVE_VEC128
-#define CLASSIFY_USE_SSE //Allow usage of SSE operations
-#endif
-
#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
/*
@@ -233,7 +229,7 @@ vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
ASSERT (t);
mask = t->mask;
-#ifdef CLASSIFY_USE_SSE
+#ifdef CLIB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{ //SSE can't handle unaligned data
u32x4 *data = (u32x4 *) h;
@@ -259,7 +255,7 @@ vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
}
}
else
-#endif /* CLASSIFY_USE_SSE */
+#endif /* CLIB_HAVE_VEC128 */
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
@@ -407,7 +403,7 @@ vnet_classify_find_entry_inline (vnet_classify_table_t * t,
v = vnet_classify_entry_at_index (t, v, value_index);
-#ifdef CLASSIFY_USE_SSE
+#ifdef CLIB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{
u32x4 *data = (u32x4 *) h;
@@ -452,7 +448,7 @@ vnet_classify_find_entry_inline (vnet_classify_table_t * t,
}
}
else
-#endif /* CLASSIFY_USE_SSE */
+#endif /* CLIB_HAVE_VEC128 */
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h
index 331b8ed6f5d..3ed78360296 100644
--- a/src/vppinfra/vector_neon.h
+++ b/src/vppinfra/vector_neon.h
@@ -86,6 +86,26 @@ u16x8_zero_byte_mask (u16x8 input)
return (u32) (vgetq_lane_u64 (merge3, 1) << 8) + vgetq_lane_u64 (merge3, 0);
}
+always_inline u32
+u8x16_zero_byte_mask (u8x16 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+always_inline u32
+u32x4_zero_byte_mask (u32x4 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+always_inline u32
+u64x2_zero_byte_mask (u64x2 input)
+{
+ return u16x8_zero_byte_mask ((u16x8) input);
+}
+
+
+
#endif /* included_vector_neon_h */
/*