aboutsummaryrefslogtreecommitdiffstats
path: root/src/vppinfra/bihash_template.h
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2018-07-19 12:11:16 -0400
committerDamjan Marion <dmarion@me.com>2018-07-20 17:38:35 +0000
commit508498f74d2df98e70a961d030cf0128a63a926d (patch)
treec2b9b8ddf109cc9086ac39adac812d677a58e578 /src/vppinfra/bihash_template.h
parent13637632b87938a055618f17ed21b2a54b02459d (diff)
Fine-grained add / delete locking
Add a bucket-level lock bit. Use a spinlock only when actually allocating, freeing, or splitting a bucket. Should improve multi-thread add/del performance. Change-Id: I3e40e2a8371685457f340d6584dea14e3207f2b0 Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'src/vppinfra/bihash_template.h')
-rw-r--r--src/vppinfra/bihash_template.h220
1 files changed, 33 insertions, 187 deletions
diff --git a/src/vppinfra/bihash_template.h b/src/vppinfra/bihash_template.h
index eb50d32d521..6303992bc57 100644
--- a/src/vppinfra/bihash_template.h
+++ b/src/vppinfra/bihash_template.h
@@ -49,9 +49,7 @@ typedef struct BV (clib_bihash_value)
};
} BVT (clib_bihash_value);
-#if BIHASH_KVP_CACHE_SIZE > 5
-#error Requested KVP cache LRU data exceeds 16 bits
-#endif
+#define BIHASH_BUCKET_OFFSET_BITS 36
typedef struct
{
@@ -59,28 +57,23 @@ typedef struct
{
struct
{
- u64 offset:37;
+ u64 offset:BIHASH_BUCKET_OFFSET_BITS;
+ u64 lock:1;
u64 linear_search:1;
u64 log2_pages:8;
i64 refcnt:16;
};
u64 as_u64;
};
-#if BIHASH_KVP_CACHE_SIZE > 0
- u16 cache_lru;
- BVT (clib_bihash_kv) cache[BIHASH_KVP_CACHE_SIZE];
-#endif
} BVT (clib_bihash_bucket);
-#if BIHASH_KVP_CACHE_SIZE == 0
STATIC_ASSERT_SIZEOF (BVT (clib_bihash_bucket), sizeof (u64));
-#endif
typedef struct
{
BVT (clib_bihash_value) * values;
BVT (clib_bihash_bucket) * buckets;
- volatile u32 *writer_lock;
+ volatile u32 *alloc_lock;
BVT (clib_bihash_value) ** working_copies;
int *working_copy_lengths;
@@ -110,127 +103,38 @@ typedef struct
} BVT (clib_bihash);
-
-static inline void
-BV (clib_bihash_update_lru) (BVT (clib_bihash_bucket) * b, u8 slot)
+static inline void BV (clib_bihash_alloc_lock) (BVT (clib_bihash) * h)
{
-#if BIHASH_KVP_CACHE_SIZE > 1
- u16 value, tmp, mask;
- u8 found_lru_pos;
- u16 save_hi;
-
- ASSERT (slot < BIHASH_KVP_CACHE_SIZE);
-
- /* First, find the slot in cache_lru */
- mask = slot;
- if (BIHASH_KVP_CACHE_SIZE > 1)
- mask |= slot << 3;
- if (BIHASH_KVP_CACHE_SIZE > 2)
- mask |= slot << 6;
- if (BIHASH_KVP_CACHE_SIZE > 3)
- mask |= slot << 9;
- if (BIHASH_KVP_CACHE_SIZE > 4)
- mask |= slot << 12;
-
- value = b->cache_lru;
- tmp = value ^ mask;
-
- /* Already the most-recently used? */
- if ((tmp & 7) == 0)
- return;
-
- found_lru_pos = ((tmp & (7 << 3)) == 0) ? 1 : 0;
- if (BIHASH_KVP_CACHE_SIZE > 2)
- found_lru_pos = ((tmp & (7 << 6)) == 0) ? 2 : found_lru_pos;
- if (BIHASH_KVP_CACHE_SIZE > 3)
- found_lru_pos = ((tmp & (7 << 9)) == 0) ? 3 : found_lru_pos;
- if (BIHASH_KVP_CACHE_SIZE > 4)
- found_lru_pos = ((tmp & (7 << 12)) == 0) ? 4 : found_lru_pos;
-
- ASSERT (found_lru_pos);
-
- /* create a mask to kill bits in or above slot */
- mask = 0xFFFF << found_lru_pos;
- mask <<= found_lru_pos;
- mask <<= found_lru_pos;
- mask ^= 0xFFFF;
- tmp = value & mask;
-
- /* Save bits above slot */
- mask ^= 0xFFFF;
- mask <<= 3;
- save_hi = value & mask;
-
- value = save_hi | (tmp << 3) | slot;
-
- b->cache_lru = value;
-#endif
-}
-
-void
-BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b,
- u8 slot);
-
-static inline u8 BV (clib_bihash_get_lru) (BVT (clib_bihash_bucket) * b)
-{
-#if BIHASH_KVP_CACHE_SIZE > 0
- return (b->cache_lru >> (3 * (BIHASH_KVP_CACHE_SIZE - 1))) & 7;
-#else
- return 0;
-#endif
+ while (__atomic_test_and_set (h->alloc_lock, __ATOMIC_ACQUIRE))
+ ;
}
-static inline void BV (clib_bihash_reset_cache) (BVT (clib_bihash_bucket) * b)
+static inline void BV (clib_bihash_alloc_unlock) (BVT (clib_bihash) * h)
{
-#if BIHASH_KVP_CACHE_SIZE > 0
- u16 initial_lru_value;
-
- memset (b->cache, 0xff, sizeof (b->cache));
-
- /*
- * We'll want the cache to be loaded from slot 0 -> slot N, so
- * the initial LRU order is reverse index order.
- */
- if (BIHASH_KVP_CACHE_SIZE == 1)
- initial_lru_value = 0;
- else if (BIHASH_KVP_CACHE_SIZE == 2)
- initial_lru_value = (0 << 3) | (1 << 0);
- else if (BIHASH_KVP_CACHE_SIZE == 3)
- initial_lru_value = (0 << 6) | (1 << 3) | (2 << 0);
- else if (BIHASH_KVP_CACHE_SIZE == 4)
- initial_lru_value = (0 << 9) | (1 << 6) | (2 << 3) | (3 << 0);
- else if (BIHASH_KVP_CACHE_SIZE == 5)
- initial_lru_value = (0 << 12) | (1 << 9) | (2 << 6) | (3 << 3) | (4 << 0);
-
- b->cache_lru = initial_lru_value;
-#endif
+ __atomic_clear (h->alloc_lock, __ATOMIC_RELEASE);
}
-static inline int BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b)
+static inline void BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b)
{
-#if BIHASH_KVP_CACHE_SIZE > 0
- u16 cache_lru_bit;
- u16 rv;
-
- cache_lru_bit = 1 << 15;
+ BVT (clib_bihash_bucket) unlocked_bucket, locked_bucket;
- rv = __sync_fetch_and_or (&b->cache_lru, cache_lru_bit);
- /* Was already locked? */
- if (rv & (1 << 15))
- return 0;
-#endif
- return 1;
+ do
+ {
+ locked_bucket.as_u64 = unlocked_bucket.as_u64 = b->as_u64;
+ unlocked_bucket.lock = 0;
+ locked_bucket.lock = 1;
+ }
+ while (__atomic_compare_exchange_n (&b->as_u64, &unlocked_bucket.as_u64,
+ locked_bucket.as_u64, 1 /* weak */ ,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0);
}
static inline void BV (clib_bihash_unlock_bucket)
(BVT (clib_bihash_bucket) * b)
{
-#if BIHASH_KVP_CACHE_SIZE > 0
- u16 cache_lru;
-
- cache_lru = b->cache_lru & ~(1 << 15);
- b->cache_lru = cache_lru;
-#endif
+ CLIB_MEMORY_BARRIER ();
+ b->lock = 0;
}
static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
@@ -245,7 +149,8 @@ static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
static inline int BV (clib_bihash_bucket_is_empty)
(BVT (clib_bihash_bucket) * b)
{
- return b->as_u64 == 0;
+ /* Note: applied to locked buckets, test offset */
+ return b->offset == 0;
}
static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h,
@@ -286,9 +191,6 @@ static inline int BV (clib_bihash_search_inline_with_hash)
u32 bucket_index;
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) * b;
-#if BIHASH_KVP_CACHE_SIZE > 0
- BVT (clib_bihash_kv) * kvp;
-#endif
int i, limit;
bucket_index = hash & (h->nbuckets - 1);
@@ -297,23 +199,12 @@ static inline int BV (clib_bihash_search_inline_with_hash)
if (PREDICT_FALSE (BV (clib_bihash_bucket_is_empty) (b)))
return -1;
-#if BIHASH_KVP_CACHE_SIZE > 0
- /* Check the cache, if not currently locked */
- if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ if (PREDICT_FALSE (b->lock))
{
- limit = BIHASH_KVP_CACHE_SIZE;
- kvp = b->cache;
- for (i = 0; i < limit; i++)
- {
- if (BV (clib_bihash_key_compare) (kvp[i].key, key_result->key))
- {
- *key_result = kvp[i];
- h->cache_hits++;
- return 0;
- }
- }
+ volatile BVT (clib_bihash_bucket) * bv = b;
+ while (bv->lock)
+ ;
}
-#endif
hash >>= h->log2_nbuckets;
@@ -330,21 +221,6 @@ static inline int BV (clib_bihash_search_inline_with_hash)
if (BV (clib_bihash_key_compare) (v->kvp[i].key, key_result->key))
{
*key_result = v->kvp[i];
-
-#if BIHASH_KVP_CACHE_SIZE > 0
- u8 cache_slot;
- /* Try to lock the bucket */
- if (BV (clib_bihash_lock_bucket) (b))
- {
- cache_slot = BV (clib_bihash_get_lru) (b);
- b->cache[cache_slot] = v->kvp[i];
- BV (clib_bihash_update_lru) (b, cache_slot);
-
- /* Unlock the bucket */
- BV (clib_bihash_unlock_bucket) (b);
- h->cache_misses++;
- }
-#endif
return 0;
}
}
@@ -401,9 +277,6 @@ static inline int BV (clib_bihash_search_inline_2_with_hash)
u32 bucket_index;
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) * b;
-#if BIHASH_KVP_CACHE_SIZE > 0
- BVT (clib_bihash_kv) * kvp;
-#endif
int i, limit;
ASSERT (valuep);
@@ -414,23 +287,12 @@ static inline int BV (clib_bihash_search_inline_2_with_hash)
if (PREDICT_FALSE (BV (clib_bihash_bucket_is_empty) (b)))
return -1;
- /* Check the cache, if currently unlocked */
-#if BIHASH_KVP_CACHE_SIZE > 0
- if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ if (PREDICT_FALSE (b->lock))
{
- limit = BIHASH_KVP_CACHE_SIZE;
- kvp = b->cache;
- for (i = 0; i < limit; i++)
- {
- if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
- {
- *valuep = kvp[i];
- h->cache_hits++;
- return 0;
- }
- }
+ volatile BVT (clib_bihash_bucket) * bv = b;
+ while (bv->lock)
+ ;
}
-#endif
hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
@@ -446,22 +308,6 @@ static inline int BV (clib_bihash_search_inline_2_with_hash)
if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
{
*valuep = v->kvp[i];
-
-#if BIHASH_KVP_CACHE_SIZE > 0
- u8 cache_slot;
-
- /* Try to lock the bucket */
- if (BV (clib_bihash_lock_bucket) (b))
- {
- cache_slot = BV (clib_bihash_get_lru) (b);
- b->cache[cache_slot] = v->kvp[i];
- BV (clib_bihash_update_lru) (b, cache_slot);
-
- /* Reenable the cache */
- BV (clib_bihash_unlock_bucket) (b);
- h->cache_misses++;
- }
-#endif
return 0;
}
}