summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--src/plugins/geneve/decap.c16
-rw-r--r--src/plugins/geneve/geneve.h5
-rw-r--r--src/plugins/gtpu/gtpu.h5
-rw-r--r--src/plugins/gtpu/gtpu_decap.c16
-rw-r--r--src/vnet/ip/vtep.h10
-rw-r--r--src/vnet/vxlan-gpe/decap.c16
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.h5
-rw-r--r--src/vnet/vxlan/decap.c17
-rw-r--r--src/vnet/vxlan/vxlan.h6
9 files changed, 49 insertions, 47 deletions
diff --git a/src/plugins/geneve/decap.c b/src/plugins/geneve/decap.c
index f42c0a88a5a..bd189913f71 100644
--- a/src/plugins/geneve/decap.c
+++ b/src/plugins/geneve/decap.c
@@ -870,10 +870,6 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -968,8 +964,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -1051,8 +1047,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1171,8 +1167,8 @@ ip_geneve_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/plugins/geneve/geneve.h b/src/plugins/geneve/geneve.h
index d41a49a7ff6..0cc14214b9b 100644
--- a/src/plugins/geneve/geneve.h
+++ b/src/plugins/geneve/geneve.h
@@ -186,6 +186,11 @@ typedef struct
vnet_main_t *vnet_main;
u16 msg_id_base;
+ /* cache for last 8 geneve tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
} geneve_main_t;
extern geneve_main_t geneve_main;
diff --git a/src/plugins/gtpu/gtpu.h b/src/plugins/gtpu/gtpu.h
index 8f3b654bcc8..59e340148fb 100644
--- a/src/plugins/gtpu/gtpu.h
+++ b/src/plugins/gtpu/gtpu.h
@@ -236,6 +236,11 @@ typedef struct
vlib_main_t *vlib_main;
vnet_main_t *vnet_main;
u32 flow_id_start;
+ /* cache for last 8 gtpu tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
} gtpu_main_t;
extern gtpu_main_t gtpu_main;
diff --git a/src/plugins/gtpu/gtpu_decap.c b/src/plugins/gtpu/gtpu_decap.c
index 974ae8a8190..5657c08031b 100644
--- a/src/plugins/gtpu/gtpu_decap.c
+++ b/src/plugins/gtpu/gtpu_decap.c
@@ -804,10 +804,6 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -901,8 +897,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
+ &gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -980,8 +976,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&gtm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&gtm->vtep_table, b1, ip41, &last_vtep4,
+ &gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1096,8 +1092,8 @@ ip_gtpu_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&gtm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&gtm->vtep_table, b0, ip40, &last_vtep4,
+ &gtm->vtep4_u512))
#else
if (!vtep4_check (&gtm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/vnet/ip/vtep.h b/src/vnet/ip/vtep.h
index 4cb06122f46..418d8439744 100644
--- a/src/vnet/ip/vtep.h
+++ b/src/vnet/ip/vtep.h
@@ -111,6 +111,7 @@ vtep4_check (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
return VTEP_CHECK_PASS;
}
+#ifdef CLIB_HAVE_VEC512
typedef struct
{
vtep4_key_t vtep4_cache[8];
@@ -128,7 +129,6 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64))
return VTEP_CHECK_PASS_UNCHANGED;
-#ifdef CLIB_HAVE_VEC512
u64x8 k4_u64x8 = u64x8_splat (k4.as_u64);
u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache);
u8 result = u64x8_mask_is_equal (cache, k4_u64x8);
@@ -138,20 +138,18 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
vtep4_u512->vtep4_cache[count_trailing_zeros (result)].as_u64;
return VTEP_CHECK_PASS_UNCHANGED;
}
-#endif
if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64)))
return VTEP_CHECK_FAIL;
- last_k4->as_u64 = k4.as_u64;
-
-#ifdef CLIB_HAVE_VEC512
vtep4_u512->vtep4_cache[vtep4_u512->idx].as_u64 = k4.as_u64;
vtep4_u512->idx = (vtep4_u512->idx + 1) & 0x7;
-#endif
+
+ last_k4->as_u64 = k4.as_u64;
return VTEP_CHECK_PASS;
}
+#endif
always_inline u8
vtep6_check (vtep_table_t * t, vlib_buffer_t * b0, ip6_header_t * ip60,
diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c
index aea793b82b6..035e8a3fd6a 100644
--- a/src/vnet/vxlan-gpe/decap.c
+++ b/src/vnet/vxlan-gpe/decap.c
@@ -793,10 +793,6 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -889,8 +885,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -972,8 +968,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b1, ip41, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1091,8 +1087,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.h b/src/vnet/vxlan-gpe/vxlan_gpe.h
index e246827c156..0f8250a1788 100644
--- a/src/vnet/vxlan-gpe/vxlan_gpe.h
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.h
@@ -220,6 +220,11 @@ typedef struct
/** State convenience vnet_main_t */
vnet_main_t *vnet_main;
+ /* cache for last 8 vxlan_gpe tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
/** List of next nodes for the decap indexed on protocol */
uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
} vxlan_gpe_main_t;
diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c
index 4678aa31219..2ba24d881af 100644
--- a/src/vnet/vxlan/decap.c
+++ b/src/vnet/vxlan/decap.c
@@ -469,11 +469,6 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
last_tunnel_cache4 last4;
last_tunnel_cache6 last6;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
-
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
@@ -584,8 +579,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -672,8 +667,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -799,8 +794,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/vnet/vxlan/vxlan.h b/src/vnet/vxlan/vxlan.h
index 48c61461e7c..129bb43291b 100644
--- a/src/vnet/vxlan/vxlan.h
+++ b/src/vnet/vxlan/vxlan.h
@@ -189,6 +189,12 @@ typedef struct
/* Record used instances */
uword *instance_used;
u32 flow_id_start;
+
+ /* cache for last 8 vxlan tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
} vxlan_main_t;
extern vxlan_main_t vxlan_main;