aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorJunfeng Wang <Drenfong.Wang@intel.com>2021-03-09 16:44:57 +0800
committerJohn Lo <lojultra2020@outlook.com>2021-03-19 21:34:16 +0000
commit290526e3c72888ac05928ed0a6dddee02f7df650 (patch)
tree5117191488c7107d6404abaaca97d76022e7e759 /src/vnet
parent162b70d50aaf5daa744417818c01cae573580f6f (diff)
vxlan: add tunnel cache to graph node
Type: improvement Signed-off-by: Drenfong Wong <drenfong.wang@intel.com> Change-Id: Ia81aaa86fe071cbbed028cc85c5f3fa0f1940a0f
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/ip/vtep.h10
-rw-r--r--src/vnet/vxlan-gpe/decap.c16
-rw-r--r--src/vnet/vxlan-gpe/vxlan_gpe.h5
-rw-r--r--src/vnet/vxlan/decap.c17
-rw-r--r--src/vnet/vxlan/vxlan.h6
5 files changed, 27 insertions, 27 deletions
diff --git a/src/vnet/ip/vtep.h b/src/vnet/ip/vtep.h
index 4cb06122f46..418d8439744 100644
--- a/src/vnet/ip/vtep.h
+++ b/src/vnet/ip/vtep.h
@@ -111,6 +111,7 @@ vtep4_check (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
return VTEP_CHECK_PASS;
}
+#ifdef CLIB_HAVE_VEC512
typedef struct
{
vtep4_key_t vtep4_cache[8];
@@ -128,7 +129,6 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
if (PREDICT_TRUE (k4.as_u64 == last_k4->as_u64))
return VTEP_CHECK_PASS_UNCHANGED;
-#ifdef CLIB_HAVE_VEC512
u64x8 k4_u64x8 = u64x8_splat (k4.as_u64);
u64x8 cache = u64x8_load_unaligned (vtep4_u512->vtep4_cache);
u8 result = u64x8_mask_is_equal (cache, k4_u64x8);
@@ -138,20 +138,18 @@ vtep4_check_vector (vtep_table_t * t, vlib_buffer_t * b0, ip4_header_t * ip40,
vtep4_u512->vtep4_cache[count_trailing_zeros (result)].as_u64;
return VTEP_CHECK_PASS_UNCHANGED;
}
-#endif
if (PREDICT_FALSE (!hash_get (t->vtep4, k4.as_u64)))
return VTEP_CHECK_FAIL;
- last_k4->as_u64 = k4.as_u64;
-
-#ifdef CLIB_HAVE_VEC512
vtep4_u512->vtep4_cache[vtep4_u512->idx].as_u64 = k4.as_u64;
vtep4_u512->idx = (vtep4_u512->idx + 1) & 0x7;
-#endif
+
+ last_k4->as_u64 = k4.as_u64;
return VTEP_CHECK_PASS;
}
+#endif
always_inline u8
vtep6_check (vtep_table_t * t, vlib_buffer_t * b0, ip6_header_t * ip60,
diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c
index aea793b82b6..035e8a3fd6a 100644
--- a/src/vnet/vxlan-gpe/decap.c
+++ b/src/vnet/vxlan-gpe/decap.c
@@ -793,10 +793,6 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
vtep6_key_t last_vtep6; /* last IPv6 address / fib index
matching a local VTEP address */
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
@@ -889,8 +885,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -972,8 +968,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b1, ip41, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -1091,8 +1087,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&ngm->vtep_table, b0, ip40, &last_vtep4,
+ &ngm->vtep4_u512))
#else
if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/vnet/vxlan-gpe/vxlan_gpe.h b/src/vnet/vxlan-gpe/vxlan_gpe.h
index e246827c156..0f8250a1788 100644
--- a/src/vnet/vxlan-gpe/vxlan_gpe.h
+++ b/src/vnet/vxlan-gpe/vxlan_gpe.h
@@ -220,6 +220,11 @@ typedef struct
/** State convenience vnet_main_t */
vnet_main_t *vnet_main;
+ /* cache for last 8 vxlan_gpe tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
/** List of next nodes for the decap indexed on protocol */
uword decap_next_node_list[VXLAN_GPE_PROTOCOL_MAX];
} vxlan_gpe_main_t;
diff --git a/src/vnet/vxlan/decap.c b/src/vnet/vxlan/decap.c
index 4678aa31219..2ba24d881af 100644
--- a/src/vnet/vxlan/decap.c
+++ b/src/vnet/vxlan/decap.c
@@ -469,11 +469,6 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
last_tunnel_cache4 last4;
last_tunnel_cache6 last6;
-#ifdef CLIB_HAVE_VEC512
- vtep4_cache_t vtep4_u512;
- clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
-#endif
-
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
@@ -584,8 +579,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
@@ -672,8 +667,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b1, ip41, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
#endif
@@ -799,8 +794,8 @@ ip_vxlan_bypass_inline (vlib_main_t * vm,
if (is_ip4)
{
#ifdef CLIB_HAVE_VEC512
- if (!vtep4_check_vector
- (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+ if (!vtep4_check_vector (&vxm->vtep_table, b0, ip40, &last_vtep4,
+ &vxm->vtep4_u512))
#else
if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
#endif
diff --git a/src/vnet/vxlan/vxlan.h b/src/vnet/vxlan/vxlan.h
index 48c61461e7c..129bb43291b 100644
--- a/src/vnet/vxlan/vxlan.h
+++ b/src/vnet/vxlan/vxlan.h
@@ -189,6 +189,12 @@ typedef struct
/* Record used instances */
uword *instance_used;
u32 flow_id_start;
+
+ /* cache for last 8 vxlan tunnel */
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+#endif
+
} vxlan_main_t;
extern vxlan_main_t vxlan_main;