summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/bihash_template.h
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2017-07-14 12:42:21 -0400
committerJohn Lo <loj@cisco.com>2017-07-19 23:25:43 +0000
commit908a5ea6e247b4a15f0ec7e8ee8ebff799abdc4f (patch)
tree8fa18f75cce531e266535dd4bbddd24f0cc15c9a /src/vppinfra/bihash_template.h
parentc4423229e2ed51084fb277570bfde097abaf7c97 (diff)
Add a bihash prefetchable bucket-level cache
According to Maciek, the easiest way to leverage the csit "performance trend" job is to actually merge the patch once verified. Manual testing indicates that the patch improves l2 path performance. Other use-cases are TBD. It's possible that we'll need to back out the patch depending on what happens. Change-Id: Ic0a0363de35ef9be953ad7709c57c3936b73fd5a Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vppinfra/bihash_template.h')
-rw-r--r--src/vppinfra/bihash_template.h206
1 files changed, 183 insertions, 23 deletions
diff --git a/src/vppinfra/bihash_template.h b/src/vppinfra/bihash_template.h
index 4ea14ff0267..feb6fb68797 100644
--- a/src/vppinfra/bihash_template.h
+++ b/src/vppinfra/bihash_template.h
@@ -48,12 +48,10 @@ typedef struct BV (clib_bihash_value)
};
} BVT (clib_bihash_value);
-/*
- * This is shared across all uses of the template, so it needs
- * a "personal" #include recursion block
- */
-#ifndef __defined_clib_bihash_bucket_t__
-#define __defined_clib_bihash_bucket_t__
+#if BIHASH_KVP_CACHE_SIZE > 5
+#error Requested KVP cache LRU data exceeds 16 bits
+#endif
+
typedef struct
{
union
@@ -62,36 +60,139 @@ typedef struct
{
u32 offset;
u8 linear_search;
- u8 pad[2];
u8 log2_pages;
+ u16 cache_lru;
};
u64 as_u64;
};
-} clib_bihash_bucket_t;
-#endif /* __defined_clib_bihash_bucket_t__ */
+ BVT (clib_bihash_kv) cache[BIHASH_KVP_CACHE_SIZE];
+} BVT (clib_bihash_bucket);
typedef struct
{
BVT (clib_bihash_value) * values;
- clib_bihash_bucket_t *buckets;
+ BVT (clib_bihash_bucket) * buckets;
volatile u32 *writer_lock;
BVT (clib_bihash_value) ** working_copies;
int *working_copy_lengths;
- clib_bihash_bucket_t saved_bucket;
+ BVT (clib_bihash_bucket) saved_bucket;
u32 nbuckets;
u32 log2_nbuckets;
u32 linear_buckets;
u8 *name;
+ u64 cache_hits;
+ u64 cache_misses;
+
BVT (clib_bihash_value) ** freelists;
void *mheap;
} BVT (clib_bihash);
-static inline void *BV (clib_bihash_get_value) (const BVT (clib_bihash) * h,
+static inline void
+BV (clib_bihash_update_lru) (BVT (clib_bihash_bucket) * b, u8 slot)
+{
+ u16 value, tmp, mask;
+ u8 found_lru_pos;
+ u16 save_hi;
+
+ if (BIHASH_KVP_CACHE_SIZE < 2)
+ return;
+
+ ASSERT (slot < BIHASH_KVP_CACHE_SIZE);
+
+ /* First, find the slot in cache_lru */
+ mask = slot;
+ if (BIHASH_KVP_CACHE_SIZE > 1)
+ mask |= slot << 3;
+ if (BIHASH_KVP_CACHE_SIZE > 2)
+ mask |= slot << 6;
+ if (BIHASH_KVP_CACHE_SIZE > 3)
+ mask |= slot << 9;
+ if (BIHASH_KVP_CACHE_SIZE > 4)
+ mask |= slot << 12;
+
+ value = b->cache_lru;
+ tmp = value ^ mask;
+
+ /* Already the most-recently used? */
+ if ((tmp & 7) == 0)
+ return;
+
+ found_lru_pos = ((tmp & (7 << 3)) == 0) ? 1 : 0;
+ if (BIHASH_KVP_CACHE_SIZE > 2)
+ found_lru_pos = ((tmp & (7 << 6)) == 0) ? 2 : found_lru_pos;
+ if (BIHASH_KVP_CACHE_SIZE > 3)
+ found_lru_pos = ((tmp & (7 << 9)) == 0) ? 3 : found_lru_pos;
+ if (BIHASH_KVP_CACHE_SIZE > 4)
+ found_lru_pos = ((tmp & (7 << 12)) == 0) ? 4 : found_lru_pos;
+
+ ASSERT (found_lru_pos);
+
+ /* create a mask to kill bits in or above slot */
+ mask = 0xFFFF << found_lru_pos;
+ mask <<= found_lru_pos;
+ mask <<= found_lru_pos;
+ mask ^= 0xFFFF;
+ tmp = value & mask;
+
+ /* Save bits above slot */
+ mask ^= 0xFFFF;
+ mask <<= 3;
+ save_hi = value & mask;
+
+ value = save_hi | (tmp << 3) | slot;
+
+ b->cache_lru = value;
+}
+
+void
+BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b,
+ u8 slot);
+
+static inline u8 BV (clib_bihash_get_lru) (BVT (clib_bihash_bucket) * b)
+{
+ return (b->cache_lru >> (3 * (BIHASH_KVP_CACHE_SIZE - 1))) & 7;
+}
+
+static inline void BV (clib_bihash_reset_cache) (BVT (clib_bihash_bucket) * b)
+{
+ u16 initial_lru_value;
+
+ memset (b->cache, 0xff, sizeof (b->cache));
+
+ /*
+ * We'll want the cache to be loaded from slot 0 -> slot N, so
+ * the initial LRU order is reverse index order.
+ */
+ if (BIHASH_KVP_CACHE_SIZE == 1)
+ initial_lru_value = 0;
+ else if (BIHASH_KVP_CACHE_SIZE == 2)
+ initial_lru_value = (0 << 3) | (1 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 3)
+ initial_lru_value = (0 << 6) | (1 << 3) | (2 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 4)
+ initial_lru_value = (0 << 9) | (1 << 6) | (2 << 3) | (3 << 0);
+ else if (BIHASH_KVP_CACHE_SIZE == 5)
+ initial_lru_value = (0 << 12) | (1 << 9) | (2 << 6) | (3 << 3) | (4 << 0);
+
+ b->cache_lru = initial_lru_value;
+}
+
+static inline void BV (clib_bihash_cache_enable_disable)
+ (BVT (clib_bihash_bucket) * b, u8 enable)
+{
+ BVT (clib_bihash_bucket) tmp_b;
+ tmp_b.as_u64 = b->as_u64;
+ tmp_b.cache_lru &= 0x7FFF;
+ tmp_b.cache_lru |= enable << 15;
+ b->as_u64 = tmp_b.as_u64;
+}
+
+static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
uword offset)
{
u8 *hp = h->mheap;
@@ -100,7 +201,7 @@ static inline void *BV (clib_bihash_get_value) (const BVT (clib_bihash) * h,
return (void *) vp;
}
-static inline uword BV (clib_bihash_get_offset) (const BVT (clib_bihash) * h,
+static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h,
void *v)
{
u8 *hp, *vp;
@@ -119,7 +220,7 @@ void BV (clib_bihash_free) (BVT (clib_bihash) * h);
int BV (clib_bihash_add_del) (BVT (clib_bihash) * h,
BVT (clib_bihash_kv) * add_v, int is_add);
-int BV (clib_bihash_search) (const BVT (clib_bihash) * h,
+int BV (clib_bihash_search) (BVT (clib_bihash) * h,
BVT (clib_bihash_kv) * search_v,
BVT (clib_bihash_kv) * return_v);
@@ -128,18 +229,19 @@ void BV (clib_bihash_foreach_key_value_pair) (BVT (clib_bihash) * h,
format_function_t BV (format_bihash);
format_function_t BV (format_bihash_kvp);
-
+format_function_t BV (format_bihash_lru);
static inline int BV (clib_bihash_search_inline)
- (const BVT (clib_bihash) * h, BVT (clib_bihash_kv) * kvp)
+ (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * key_result)
{
u64 hash;
u32 bucket_index;
BVT (clib_bihash_value) * v;
- clib_bihash_bucket_t *b;
+ BVT (clib_bihash_bucket) * b;
+ BVT (clib_bihash_kv) * kvp;
int i, limit;
- hash = BV (clib_bihash_hash) (kvp);
+ hash = BV (clib_bihash_hash) (key_result);
bucket_index = hash & (h->nbuckets - 1);
b = &h->buckets[bucket_index];
@@ -147,6 +249,22 @@ static inline int BV (clib_bihash_search_inline)
if (b->offset == 0)
return -1;
+ /* Check the cache, if currently enabled */
+ if (PREDICT_TRUE (b->cache_lru & (1 << 15)))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, key_result->key))
+ {
+ *key_result = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+
hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
@@ -159,9 +277,22 @@ static inline int BV (clib_bihash_search_inline)
for (i = 0; i < limit; i++)
{
- if (BV (clib_bihash_key_compare) (v->kvp[i].key, kvp->key))
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, key_result->key))
{
- *kvp = v->kvp[i];
+ u8 cache_slot;
+ *key_result = v->kvp[i];
+
+ /* Shut off the cache */
+ BV (clib_bihash_cache_enable_disable) (b, 0);
+ CLIB_MEMORY_BARRIER ();
+
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Reenable the cache */
+ BV (clib_bihash_cache_enable_disable) (b, 1);
+ h->cache_misses++;
return 0;
}
}
@@ -169,13 +300,14 @@ static inline int BV (clib_bihash_search_inline)
}
static inline int BV (clib_bihash_search_inline_2)
- (const BVT (clib_bihash) * h,
+ (BVT (clib_bihash) * h,
BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
{
u64 hash;
u32 bucket_index;
BVT (clib_bihash_value) * v;
- clib_bihash_bucket_t *b;
+ BVT (clib_bihash_bucket) * b;
+ BVT (clib_bihash_kv) * kvp;
int i, limit;
ASSERT (valuep);
@@ -188,6 +320,22 @@ static inline int BV (clib_bihash_search_inline_2)
if (b->offset == 0)
return -1;
+ /* Check the cache, if currently enabled */
+ if (PREDICT_TRUE (b->cache_lru & (1 << 15)))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
+ {
+ *valuep = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+
hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
@@ -201,14 +349,26 @@ static inline int BV (clib_bihash_search_inline_2)
{
if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
{
+ u8 cache_slot;
*valuep = v->kvp[i];
+
+ /* Shut off the cache */
+ BV (clib_bihash_cache_enable_disable) (b, 0);
+ CLIB_MEMORY_BARRIER ();
+
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Reenable the cache */
+ BV (clib_bihash_cache_enable_disable) (b, 1);
+ h->cache_misses++;
return 0;
}
}
return -1;
}
-
#endif /* __included_bihash_template_h__ */
/** @endcond */