aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2019-03-07 10:06:52 -0500
committerDamjan Marion <dmarion@me.com>2019-03-07 19:39:28 +0000
commit974931f162820bd4ba41a43cae9578171cf0e3d1 (patch)
tree8025ada44b91bb3c2f863bc98e80894794e1fd3b /src/vnet
parentf7c1a70ba502c83d6e5813344c01edbdfbff6738 (diff)
Honor vnet_rewrite_data_t data_bytes == 0
Avoid the cache miss consequences of spraying [functionally harmless] junk into un-prefetched rewrite space. As things stand, several tunnel encap rewrites set rewrite data_bytes = 0, and take a performance hit due to unwanted speculative copying. Should be performance-neutral in speed-path cases, which won't execute the added check. Change-Id: Id83c0325e58c0f31631b4bae5a06457dfc7ed567 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/adj/rewrite.h16
1 files changed, 15 insertions, 1 deletions
diff --git a/src/vnet/adj/rewrite.h b/src/vnet/adj/rewrite.h
index 58d47e0c737..32781134ecc 100644
--- a/src/vnet/adj/rewrite.h
+++ b/src/vnet/adj/rewrite.h
@@ -189,7 +189,13 @@ _vnet_rewrite_one_header (vnet_rewrite_header_t * h0,
clib_memcpy (d, s, sizeof (eh_copy_t));
return;
}
-
+ /*
+ * Stop now if the data_bytes field is zero, to avoid the cache
+ * miss consequences of spraying [functionally harmless] junk into
+ * un-prefetched rewrite space.
+ */
+ if (PREDICT_FALSE (h0->data_bytes == 0))
+ return;
#define _(i) \
do { \
@@ -244,6 +250,14 @@ _vnet_rewrite_two_headers (vnet_rewrite_header_t * h0,
return;
}
+ /*
+ * Stop now if both rewrite data_bytes fields are zero, to avoid the cache
+ * miss consequences of spraying [functionally harmless] junk into
+ * un-prefetched rewrite space.
+ */
+ if (PREDICT_FALSE (h0->data_bytes + h1->data_bytes == 0))
+ return;
+
#define _(i) \
do { \
if (most_likely_size > ((i)-1)*sizeof (vnet_rewrite_data_t)) \