diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:52:30 +0100 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-08-14 18:53:17 +0100 |
commit | b63264c8342e6a1b6971c79550d2af2024b6a4de (patch) | |
tree | 83114aac64286fe616506c0b3dfaec2ab86ef835 /lib/librte_hash/rte_cuckoo_hash.c | |
parent | ca33590b6af032bff57d9cc70455660466a654b2 (diff) |
New upstream version 18.08upstream/18.08
Change-Id: I32fdf5e5016556d9c0a6d88ddaf1fc468961790a
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib/librte_hash/rte_cuckoo_hash.c')
-rw-r--r-- | lib/librte_hash/rte_cuckoo_hash.c | 700 |
1 files changed, 442 insertions, 258 deletions
diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c index 9b1387b5..f7b86c8c 100644 --- a/lib/librte_hash/rte_cuckoo_hash.c +++ b/lib/librte_hash/rte_cuckoo_hash.c @@ -31,9 +31,6 @@ #include "rte_hash.h" #include "rte_cuckoo_hash.h" -#if defined(RTE_ARCH_X86) -#include "rte_cuckoo_hash_x86.h" -#endif TAILQ_HEAD(rte_hash_list, rte_tailq_entry); @@ -93,8 +90,10 @@ rte_hash_create(const struct rte_hash_parameters *params) void *buckets = NULL; char ring_name[RTE_RING_NAMESIZE]; unsigned num_key_slots; - unsigned hw_trans_mem_support = 0; unsigned i; + unsigned int hw_trans_mem_support = 0, multi_writer_support = 0; + unsigned int readwrite_concur_support = 0; + rte_hash_function default_hash_func = (rte_hash_function)rte_jhash; hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list); @@ -107,7 +106,6 @@ rte_hash_create(const struct rte_hash_parameters *params) /* Check for valid parameters */ if ((params->entries > RTE_HASH_ENTRIES_MAX) || (params->entries < RTE_HASH_BUCKET_ENTRIES) || - !rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) || (params->key_len == 0)) { rte_errno = EINVAL; RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n"); @@ -118,21 +116,29 @@ rte_hash_create(const struct rte_hash_parameters *params) if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT) hw_trans_mem_support = 1; + if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) + multi_writer_support = 1; + + if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) { + readwrite_concur_support = 1; + multi_writer_support = 1; + } + /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */ - if (hw_trans_mem_support) + if (multi_writer_support) /* * Increase number of slots by total number of indices * that can be stored in the lcore caches * except for the first cache */ num_key_slots = params->entries + (RTE_MAX_LCORE - 1) * - LCORE_CACHE_SIZE + 1; + (LCORE_CACHE_SIZE - 1) + 1; else num_key_slots = params->entries + 1; snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name); /* Create ring (Dummy slot index is not enqueued) */ - r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots - 1), + r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots), params->socket_id, 0); if (r == NULL) { RTE_LOG(ERR, HASH, "memory allocation failed\n"); @@ -233,7 +239,7 @@ rte_hash_create(const struct rte_hash_parameters *params) h->cmp_jump_table_idx = KEY_OTHER_BYTES; #endif - if (hw_trans_mem_support) { + if (multi_writer_support) { h->local_free_slots = rte_zmalloc_socket(NULL, sizeof(struct lcore_cache) * RTE_MAX_LCORE, RTE_CACHE_LINE_SIZE, params->socket_id); @@ -261,6 +267,8 @@ rte_hash_create(const struct rte_hash_parameters *params) h->key_store = k; h->free_slots = r; h->hw_trans_mem_support = hw_trans_mem_support; + h->multi_writer_support = multi_writer_support; + h->readwrite_concur_support = readwrite_concur_support; #if defined(RTE_ARCH_X86) if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) @@ -271,24 +279,20 @@ rte_hash_create(const struct rte_hash_parameters *params) #endif h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR; - /* Turn on multi-writer only with explicit flat from user and TM + /* Turn on multi-writer only with explicit flag from user and TM * support. */ - if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) { - if (h->hw_trans_mem_support) { - h->add_key = ADD_KEY_MULTIWRITER_TM; - } else { - h->add_key = ADD_KEY_MULTIWRITER; - h->multiwriter_lock = rte_malloc(NULL, - sizeof(rte_spinlock_t), - LCORE_CACHE_SIZE); - rte_spinlock_init(h->multiwriter_lock); - } - } else - h->add_key = ADD_KEY_SINGLEWRITER; + if (h->multi_writer_support) { + h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t), + RTE_CACHE_LINE_SIZE); + if (h->readwrite_lock == NULL) + goto err_unlock; + + rte_rwlock_init(h->readwrite_lock); + } /* Populate free slots ring. Entry zero is reserved for key misses. */ - for (i = 1; i < params->entries + 1; i++) + for (i = 1; i < num_key_slots; i++) rte_ring_sp_enqueue(r, (void *)((uintptr_t) i)); te->data = (void *) h; @@ -335,11 +339,10 @@ rte_hash_free(struct rte_hash *h) rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK); - if (h->hw_trans_mem_support) + if (h->multi_writer_support) { rte_free(h->local_free_slots); - - if (h->add_key == ADD_KEY_MULTIWRITER) - rte_free(h->multiwriter_lock); + rte_free(h->readwrite_lock); + } rte_ring_free(h->free_slots); rte_free(h->key_store); rte_free(h->buckets); @@ -366,15 +369,78 @@ rte_hash_secondary_hash(const hash_sig_t primary_hash) return primary_hash ^ ((tag + 1) * alt_bits_xor); } +int32_t +rte_hash_count(const struct rte_hash *h) +{ + uint32_t tot_ring_cnt, cached_cnt = 0; + uint32_t i, ret; + + if (h == NULL) + return -EINVAL; + + if (h->multi_writer_support) { + tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) * + (LCORE_CACHE_SIZE - 1); + for (i = 0; i < RTE_MAX_LCORE; i++) + cached_cnt += h->local_free_slots[i].len; + + ret = tot_ring_cnt - rte_ring_count(h->free_slots) - + cached_cnt; + } else { + tot_ring_cnt = h->entries; + ret = tot_ring_cnt - rte_ring_count(h->free_slots); + } + return ret; +} + +/* Read write locks implemented using rte_rwlock */ +static inline void +__hash_rw_writer_lock(const struct rte_hash *h) +{ + if (h->multi_writer_support && h->hw_trans_mem_support) + rte_rwlock_write_lock_tm(h->readwrite_lock); + else if (h->multi_writer_support) + rte_rwlock_write_lock(h->readwrite_lock); +} + + +static inline void +__hash_rw_reader_lock(const struct rte_hash *h) +{ + if (h->readwrite_concur_support && h->hw_trans_mem_support) + rte_rwlock_read_lock_tm(h->readwrite_lock); + else if (h->readwrite_concur_support) + rte_rwlock_read_lock(h->readwrite_lock); +} + +static inline void +__hash_rw_writer_unlock(const struct rte_hash *h) +{ + if (h->multi_writer_support && h->hw_trans_mem_support) + rte_rwlock_write_unlock_tm(h->readwrite_lock); + else if (h->multi_writer_support) + rte_rwlock_write_unlock(h->readwrite_lock); +} + +static inline void +__hash_rw_reader_unlock(const struct rte_hash *h) +{ + if (h->readwrite_concur_support && h->hw_trans_mem_support) + rte_rwlock_read_unlock_tm(h->readwrite_lock); + else if (h->readwrite_concur_support) + rte_rwlock_read_unlock(h->readwrite_lock); +} + void rte_hash_reset(struct rte_hash *h) { void *ptr; - unsigned i; + uint32_t tot_ring_cnt, i; if (h == NULL) return; + __hash_rw_writer_lock(h); memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket)); memset(h->key_store, 0, h->key_entry_size * (h->entries + 1)); @@ -383,97 +449,260 @@ rte_hash_reset(struct rte_hash *h) rte_pause(); /* Repopulate the free slots ring. Entry zero is reserved for key misses */ - for (i = 1; i < h->entries + 1; i++) + if (h->multi_writer_support) + tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) * + (LCORE_CACHE_SIZE - 1); + else + tot_ring_cnt = h->entries; + + for (i = 1; i < tot_ring_cnt + 1; i++) rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i)); - if (h->hw_trans_mem_support) { + if (h->multi_writer_support) { /* Reset local caches per lcore */ for (i = 0; i < RTE_MAX_LCORE; i++) h->local_free_slots[i].len = 0; } + __hash_rw_writer_unlock(h); } -/* Search for an entry that can be pushed to its alternative location */ -static inline int -make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt, - unsigned int *nr_pushes) +/* + * Function called to enqueue back an index in the cache/ring, + * as slot has not being used and it can be used in the + * next addition attempt. + */ +static inline void +enqueue_slot_back(const struct rte_hash *h, + struct lcore_cache *cached_free_slots, + void *slot_id) { - unsigned i, j; - int ret; - uint32_t next_bucket_idx; - struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES]; + if (h->multi_writer_support) { + cached_free_slots->objs[cached_free_slots->len] = slot_id; + cached_free_slots->len++; + } else + rte_ring_sp_enqueue(h->free_slots, slot_id); +} + +/* Search a key from bucket and update its data */ +static inline int32_t +search_and_update(const struct rte_hash *h, void *data, const void *key, + struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash) +{ + int i; + struct rte_hash_key *k, *keys = h->key_store; - /* - * Push existing item (search for bucket with space in - * alternative locations) to its alternative location - */ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - /* Search for space in alternative locations */ - next_bucket_idx = bkt->sig_alt[i] & h->bucket_bitmask; - next_bkt[i] = &h->buckets[next_bucket_idx]; - for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) { - if (next_bkt[i]->key_idx[j] == EMPTY_SLOT) - break; + if (bkt->sig_current[i] == sig && + bkt->sig_alt[i] == alt_hash) { + k = (struct rte_hash_key *) ((char *)keys + + bkt->key_idx[i] * h->key_entry_size); + if (rte_hash_cmp_eq(key, k->key, h) == 0) { + /* Update data */ + k->pdata = data; + /* + * Return index where key is stored, + * subtracting the first dummy index + */ + return bkt->key_idx[i] - 1; + } } - - if (j != RTE_HASH_BUCKET_ENTRIES) - break; } + return -1; +} - /* Alternative location has spare room (end of recursive function) */ - if (i != RTE_HASH_BUCKET_ENTRIES) { - next_bkt[i]->sig_alt[j] = bkt->sig_current[i]; - next_bkt[i]->sig_current[j] = bkt->sig_alt[i]; - next_bkt[i]->key_idx[j] = bkt->key_idx[i]; - return i; +/* Only tries to insert at one bucket (@prim_bkt) without trying to push + * buckets around. + * return 1 if matching existing key, return 0 if succeeds, return -1 for no + * empty entry. + */ +static inline int32_t +rte_hash_cuckoo_insert_mw(const struct rte_hash *h, + struct rte_hash_bucket *prim_bkt, + struct rte_hash_bucket *sec_bkt, + const struct rte_hash_key *key, void *data, + hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx, + int32_t *ret_val) +{ + unsigned int i; + struct rte_hash_bucket *cur_bkt = prim_bkt; + int32_t ret; + + __hash_rw_writer_lock(h); + /* Check if key was inserted after last check but before this + * protected region in case of inserting duplicated keys. + */ + ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash); + if (ret != -1) { + __hash_rw_writer_unlock(h); + *ret_val = ret; + return 1; + } + ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig); + if (ret != -1) { + __hash_rw_writer_unlock(h); + *ret_val = ret; + return 1; } - /* Pick entry that has not been pushed yet */ - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) - if (bkt->flag[i] == 0) + /* Insert new entry if there is room in the primary + * bucket. + */ + for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { + /* Check if slot is available */ + if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) { + prim_bkt->sig_current[i] = sig; + prim_bkt->sig_alt[i] = alt_hash; + prim_bkt->key_idx[i] = new_idx; break; + } + } + __hash_rw_writer_unlock(h); - /* All entries have been pushed, so entry cannot be added */ - if (i == RTE_HASH_BUCKET_ENTRIES || ++(*nr_pushes) > RTE_HASH_MAX_PUSHES) - return -ENOSPC; + if (i != RTE_HASH_BUCKET_ENTRIES) + return 0; - /* Set flag to indicate that this entry is going to be pushed */ - bkt->flag[i] = 1; + /* no empty entry */ + return -1; +} - /* Need room in alternative bucket to insert the pushed entry */ - ret = make_space_bucket(h, next_bkt[i], nr_pushes); - /* - * After recursive function. - * Clear flags and insert the pushed entry - * in its alternative location if successful, - * or return error +/* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill + * the path head with new entry (sig, alt_hash, new_idx) + * return 1 if matched key found, return -1 if cuckoo path invalided and fail, + * return 0 if succeeds. + */ +static inline int +rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h, + struct rte_hash_bucket *bkt, + struct rte_hash_bucket *alt_bkt, + const struct rte_hash_key *key, void *data, + struct queue_node *leaf, uint32_t leaf_slot, + hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx, + int32_t *ret_val) +{ + uint32_t prev_alt_bkt_idx; + struct rte_hash_bucket *cur_bkt = bkt; + struct queue_node *prev_node, *curr_node = leaf; + struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt; + uint32_t prev_slot, curr_slot = leaf_slot; + int32_t ret; + + __hash_rw_writer_lock(h); + + /* In case empty slot was gone before entering protected region */ + if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) { + __hash_rw_writer_unlock(h); + return -1; + } + + /* Check if key was inserted after last check but before this + * protected region. */ - bkt->flag[i] = 0; - if (ret >= 0) { - next_bkt[i]->sig_alt[ret] = bkt->sig_current[i]; - next_bkt[i]->sig_current[ret] = bkt->sig_alt[i]; - next_bkt[i]->key_idx[ret] = bkt->key_idx[i]; - return i; - } else - return ret; + ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash); + if (ret != -1) { + __hash_rw_writer_unlock(h); + *ret_val = ret; + return 1; + } + + ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig); + if (ret != -1) { + __hash_rw_writer_unlock(h); + *ret_val = ret; + return 1; + } + + while (likely(curr_node->prev != NULL)) { + prev_node = curr_node->prev; + prev_bkt = prev_node->bkt; + prev_slot = curr_node->prev_slot; + + prev_alt_bkt_idx = + prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask; + + if (unlikely(&h->buckets[prev_alt_bkt_idx] + != curr_bkt)) { + /* revert it to empty, otherwise duplicated keys */ + curr_bkt->key_idx[curr_slot] = EMPTY_SLOT; + __hash_rw_writer_unlock(h); + return -1; + } + + /* Need to swap current/alt sig to allow later + * Cuckoo insert to move elements back to its + * primary bucket if available + */ + curr_bkt->sig_alt[curr_slot] = + prev_bkt->sig_current[prev_slot]; + curr_bkt->sig_current[curr_slot] = + prev_bkt->sig_alt[prev_slot]; + curr_bkt->key_idx[curr_slot] = + prev_bkt->key_idx[prev_slot]; + + curr_slot = prev_slot; + curr_node = prev_node; + curr_bkt = curr_node->bkt; + } + + curr_bkt->sig_current[curr_slot] = sig; + curr_bkt->sig_alt[curr_slot] = alt_hash; + curr_bkt->key_idx[curr_slot] = new_idx; + + __hash_rw_writer_unlock(h); + + return 0; } /* - * Function called to enqueue back an index in the cache/ring, - * as slot has not being used and it can be used in the - * next addition attempt. + * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe + * Cuckoo */ -static inline void -enqueue_slot_back(const struct rte_hash *h, - struct lcore_cache *cached_free_slots, - void *slot_id) +static inline int +rte_hash_cuckoo_make_space_mw(const struct rte_hash *h, + struct rte_hash_bucket *bkt, + struct rte_hash_bucket *sec_bkt, + const struct rte_hash_key *key, void *data, + hash_sig_t sig, hash_sig_t alt_hash, + uint32_t new_idx, int32_t *ret_val) { - if (h->hw_trans_mem_support) { - cached_free_slots->objs[cached_free_slots->len] = slot_id; - cached_free_slots->len++; - } else - rte_ring_sp_enqueue(h->free_slots, slot_id); + unsigned int i; + struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN]; + struct queue_node *tail, *head; + struct rte_hash_bucket *curr_bkt, *alt_bkt; + + tail = queue; + head = queue + 1; + tail->bkt = bkt; + tail->prev = NULL; + tail->prev_slot = -1; + + /* Cuckoo bfs Search */ + while (likely(tail != head && head < + queue + RTE_HASH_BFS_QUEUE_MAX_LEN - + RTE_HASH_BUCKET_ENTRIES)) { + curr_bkt = tail->bkt; + for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { + if (curr_bkt->key_idx[i] == EMPTY_SLOT) { + int32_t ret = rte_hash_cuckoo_move_insert_mw(h, + bkt, sec_bkt, key, data, + tail, i, sig, alt_hash, + new_idx, ret_val); + if (likely(ret != -1)) + return ret; + } + + /* Enqueue new node and keep prev node info */ + alt_bkt = &(h->buckets[curr_bkt->sig_alt[i] + & h->bucket_bitmask]); + head->bkt = alt_bkt; + head->prev = tail; + head->prev_slot = i; + head++; + } + tail++; + } + + return -ENOSPC; } static inline int32_t @@ -482,19 +711,15 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, { hash_sig_t alt_hash; uint32_t prim_bucket_idx, sec_bucket_idx; - unsigned i; struct rte_hash_bucket *prim_bkt, *sec_bkt; - struct rte_hash_key *new_k, *k, *keys = h->key_store; + struct rte_hash_key *new_k, *keys = h->key_store; void *slot_id = NULL; uint32_t new_idx; int ret; unsigned n_slots; unsigned lcore_id; struct lcore_cache *cached_free_slots = NULL; - unsigned int nr_pushes = 0; - - if (h->add_key == ADD_KEY_MULTIWRITER) - rte_spinlock_lock(h->multiwriter_lock); + int32_t ret_val; prim_bucket_idx = sig & h->bucket_bitmask; prim_bkt = &h->buckets[prim_bucket_idx]; @@ -505,8 +730,24 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, sec_bkt = &h->buckets[sec_bucket_idx]; rte_prefetch0(sec_bkt); - /* Get a new slot for storing the new key */ - if (h->hw_trans_mem_support) { + /* Check if key is already inserted in primary location */ + __hash_rw_writer_lock(h); + ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash); + if (ret != -1) { + __hash_rw_writer_unlock(h); + return ret; + } + + /* Check if key is already inserted in secondary location */ + ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig); + if (ret != -1) { + __hash_rw_writer_unlock(h); + return ret; + } + __hash_rw_writer_unlock(h); + + /* Did not find a match, so get a new slot for storing the new key */ + if (h->multi_writer_support) { lcore_id = rte_lcore_id(); cached_free_slots = &h->local_free_slots[lcore_id]; /* Try to get a free slot from the local cache */ @@ -516,8 +757,7 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, cached_free_slots->objs, LCORE_CACHE_SIZE, NULL); if (n_slots == 0) { - ret = -ENOSPC; - goto failure; + return -ENOSPC; } cached_free_slots->len += n_slots; @@ -528,122 +768,50 @@ __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key, slot_id = cached_free_slots->objs[cached_free_slots->len]; } else { if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) { - ret = -ENOSPC; - goto failure; + return -ENOSPC; } } new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size); - rte_prefetch0(new_k); new_idx = (uint32_t)((uintptr_t) slot_id); - - /* Check if key is already inserted in primary location */ - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - if (prim_bkt->sig_current[i] == sig && - prim_bkt->sig_alt[i] == alt_hash) { - k = (struct rte_hash_key *) ((char *)keys + - prim_bkt->key_idx[i] * h->key_entry_size); - if (rte_hash_cmp_eq(key, k->key, h) == 0) { - /* Enqueue index of free slot back in the ring. */ - enqueue_slot_back(h, cached_free_slots, slot_id); - /* Update data */ - k->pdata = data; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return prim_bkt->key_idx[i] - 1; - } - } - } - - /* Check if key is already inserted in secondary location */ - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - if (sec_bkt->sig_alt[i] == sig && - sec_bkt->sig_current[i] == alt_hash) { - k = (struct rte_hash_key *) ((char *)keys + - sec_bkt->key_idx[i] * h->key_entry_size); - if (rte_hash_cmp_eq(key, k->key, h) == 0) { - /* Enqueue index of free slot back in the ring. */ - enqueue_slot_back(h, cached_free_slots, slot_id); - /* Update data */ - k->pdata = data; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return sec_bkt->key_idx[i] - 1; - } - } - } - /* Copy key */ rte_memcpy(new_k->key, key, h->key_len); new_k->pdata = data; -#if defined(RTE_ARCH_X86) /* currently only x86 support HTM */ - if (h->add_key == ADD_KEY_MULTIWRITER_TM) { - ret = rte_hash_cuckoo_insert_mw_tm(prim_bkt, - sig, alt_hash, new_idx); - if (ret >= 0) - return new_idx - 1; - /* Primary bucket full, need to make space for new entry */ - ret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig, - alt_hash, new_idx); + /* Find an empty slot and insert */ + ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data, + sig, alt_hash, new_idx, &ret_val); + if (ret == 0) + return new_idx - 1; + else if (ret == 1) { + enqueue_slot_back(h, cached_free_slots, slot_id); + return ret_val; + } - if (ret >= 0) - return new_idx - 1; + /* Primary bucket full, need to make space for new entry */ + ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data, + sig, alt_hash, new_idx, &ret_val); + if (ret == 0) + return new_idx - 1; + else if (ret == 1) { + enqueue_slot_back(h, cached_free_slots, slot_id); + return ret_val; + } - /* Also search secondary bucket to get better occupancy */ - ret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig, - alt_hash, new_idx); + /* Also search secondary bucket to get better occupancy */ + ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data, + alt_hash, sig, new_idx, &ret_val); - if (ret >= 0) - return new_idx - 1; + if (ret == 0) + return new_idx - 1; + else if (ret == 1) { + enqueue_slot_back(h, cached_free_slots, slot_id); + return ret_val; } else { -#endif - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - /* Check if slot is available */ - if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) { - prim_bkt->sig_current[i] = sig; - prim_bkt->sig_alt[i] = alt_hash; - prim_bkt->key_idx[i] = new_idx; - break; - } - } - - if (i != RTE_HASH_BUCKET_ENTRIES) { - if (h->add_key == ADD_KEY_MULTIWRITER) - rte_spinlock_unlock(h->multiwriter_lock); - return new_idx - 1; - } - - /* Primary bucket full, need to make space for new entry - * After recursive function. - * Insert the new entry in the position of the pushed entry - * if successful or return error and - * store the new slot back in the ring - */ - ret = make_space_bucket(h, prim_bkt, &nr_pushes); - if (ret >= 0) { - prim_bkt->sig_current[ret] = sig; - prim_bkt->sig_alt[ret] = alt_hash; - prim_bkt->key_idx[ret] = new_idx; - if (h->add_key == ADD_KEY_MULTIWRITER) - rte_spinlock_unlock(h->multiwriter_lock); - return new_idx - 1; - } -#if defined(RTE_ARCH_X86) + enqueue_slot_back(h, cached_free_slots, slot_id); + return ret; } -#endif - /* Error in addition, store new slot back in the ring and return error */ - enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx)); - -failure: - if (h->add_key == ADD_KEY_MULTIWRITER) - rte_spinlock_unlock(h->multiwriter_lock); - return ret; } int32_t @@ -688,20 +856,15 @@ rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data) else return ret; } + +/* Search one bucket to find the match key */ static inline int32_t -__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, - hash_sig_t sig, void **data) +search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig, + void **data, const struct rte_hash_bucket *bkt) { - uint32_t bucket_idx; - hash_sig_t alt_hash; - unsigned i; - struct rte_hash_bucket *bkt; + int i; struct rte_hash_key *k, *keys = h->key_store; - bucket_idx = sig & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - - /* Check if key is in primary location */ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { if (bkt->sig_current[i] == sig && bkt->key_idx[i] != EMPTY_SLOT) { @@ -718,30 +881,41 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, } } } + return -1; +} + +static inline int32_t +__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key, + hash_sig_t sig, void **data) +{ + uint32_t bucket_idx; + hash_sig_t alt_hash; + struct rte_hash_bucket *bkt; + int ret; + + bucket_idx = sig & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + __hash_rw_reader_lock(h); + + /* Check if key is in primary location */ + ret = search_one_bucket(h, key, sig, data, bkt); + if (ret != -1) { + __hash_rw_reader_unlock(h); + return ret; + } /* Calculate secondary hash */ alt_hash = rte_hash_secondary_hash(sig); bucket_idx = alt_hash & h->bucket_bitmask; bkt = &h->buckets[bucket_idx]; /* Check if key is in secondary location */ - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - if (bkt->sig_current[i] == alt_hash && - bkt->sig_alt[i] == sig) { - k = (struct rte_hash_key *) ((char *)keys + - bkt->key_idx[i] * h->key_entry_size); - if (rte_hash_cmp_eq(key, k->key, h) == 0) { - if (data != NULL) - *data = k->pdata; - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - return bkt->key_idx[i] - 1; - } - } + ret = search_one_bucket(h, key, alt_hash, data, bkt); + if (ret != -1) { + __hash_rw_reader_unlock(h); + return ret; } - + __hash_rw_reader_unlock(h); return -ENOENT; } @@ -783,7 +957,7 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) bkt->sig_current[i] = NULL_SIGNATURE; bkt->sig_alt[i] = NULL_SIGNATURE; - if (h->hw_trans_mem_support) { + if (h->multi_writer_support) { lcore_id = rte_lcore_id(); cached_free_slots = &h->local_free_slots[lcore_id]; /* Cache full, need to free it. */ @@ -804,20 +978,15 @@ remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i) } } +/* Search one bucket and remove the matched key */ static inline int32_t -__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, - hash_sig_t sig) +search_and_remove(const struct rte_hash *h, const void *key, + struct rte_hash_bucket *bkt, hash_sig_t sig) { - uint32_t bucket_idx; - hash_sig_t alt_hash; - unsigned i; - struct rte_hash_bucket *bkt; struct rte_hash_key *k, *keys = h->key_store; + unsigned int i; int32_t ret; - bucket_idx = sig & h->bucket_bitmask; - bkt = &h->buckets[bucket_idx]; - /* Check if key is in primary location */ for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { if (bkt->sig_current[i] == sig && @@ -837,32 +1006,42 @@ __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, } } } + return -1; +} + +static inline int32_t +__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key, + hash_sig_t sig) +{ + uint32_t bucket_idx; + hash_sig_t alt_hash; + struct rte_hash_bucket *bkt; + int32_t ret; + + bucket_idx = sig & h->bucket_bitmask; + bkt = &h->buckets[bucket_idx]; + + __hash_rw_writer_lock(h); + /* look for key in primary bucket */ + ret = search_and_remove(h, key, bkt, sig); + if (ret != -1) { + __hash_rw_writer_unlock(h); + return ret; + } /* Calculate secondary hash */ alt_hash = rte_hash_secondary_hash(sig); bucket_idx = alt_hash & h->bucket_bitmask; bkt = &h->buckets[bucket_idx]; - /* Check if key is in secondary location */ - for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) { - if (bkt->sig_current[i] == alt_hash && - bkt->key_idx[i] != EMPTY_SLOT) { - k = (struct rte_hash_key *) ((char *)keys + - bkt->key_idx[i] * h->key_entry_size); - if (rte_hash_cmp_eq(key, k->key, h) == 0) { - remove_entry(h, bkt, i); - - /* - * Return index where key is stored, - * subtracting the first dummy index - */ - ret = bkt->key_idx[i] - 1; - bkt->key_idx[i] = EMPTY_SLOT; - return ret; - } - } + /* look for key in secondary bucket */ + ret = search_and_remove(h, key, bkt, alt_hash); + if (ret != -1) { + __hash_rw_writer_unlock(h); + return ret; } + __hash_rw_writer_unlock(h); return -ENOENT; } @@ -1004,6 +1183,7 @@ __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys, rte_prefetch0(secondary_bkt[i]); } + __hash_rw_reader_lock(h); /* Compare signatures and prefetch key slot of first hit */ for (i = 0; i < num_keys; i++) { compare_signatures(&prim_hitmask[i], &sec_hitmask[i], @@ -1086,6 +1266,8 @@ next_key: continue; } + __hash_rw_reader_unlock(h); + if (hit_mask != NULL) *hit_mask = hits; } @@ -1144,7 +1326,7 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES; idx = *next % RTE_HASH_BUCKET_ENTRIES; } - + __hash_rw_reader_lock(h); /* Get position of entry in key table */ position = h->buckets[bucket_idx].key_idx[idx]; next_key = (struct rte_hash_key *) ((char *)h->key_store + @@ -1153,6 +1335,8 @@ rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32 *key = next_key->key; *data = next_key->pdata; + __hash_rw_reader_unlock(h); + /* Increment iterator */ (*next)++; |