summaryrefslogtreecommitdiffstats
path: root/test/test_ip4.py
AgeCommit message (Collapse)AuthorFilesLines
2018-12-17Fix TestIPv4FibCrud interdependencies.Paul Vinciguerra1-0/+14
The tests fail if run independently or out of order. This change breaks the dependency between the tests. ERROR: Add 1k routes ------------------------------------------------------------------------------ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 514, in test_3_add_new_routes self.deleted_routes.remove(x) ValueError: list.remove(x): x not in list Change-Id: I344ceba6bd8b86556f92e50080be6c43092b9faf Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-12-12PAPI: Allow ipaddress object as argument and return values from API callsOle Troan1-6/+6
The API calls that use any of vl_api_address_t, vl_api_ip4_address, vl_api_ip6_address_t, vl_api_prefix_t, vl_api_ip4_prefix_t, vl_api_ip6_prefix_t now accepts either the old style dictionary, a text string (2001:db8::/32) or an ipaddress ojbect. Unless it is called with '_no_type_conversion':True, it will also return an appropriate ipaddress object. Change-Id: I84e4a1577bd57f6b5ae725f316a523988b6a955b Signed-off-by: Ole Troan <ot@cisco.com>
2018-12-10IP-local: any IP can appear as the source (VPP-1522)Neale Ranns1-0/+19
Change-Id: Ib0d9b533d72c899b77c9a7bd1daa9b4a55b7221c Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-12-06API: Change ip4_address and ip6_address to use type alias.Ole Troan1-1/+1
Change-Id: Id8669bbadd1d6b2054865a310a654e9b38d1667d Signed-off-by: Ole Troan <ot@cisco.com>
2018-12-05VPP-1508 python3 tests: xrangePaul Vinciguerra1-11/+13
xrange is not supported. Use six.range. py27 runtests: commands[5] | stestr --test-path ./test run --slowest test_ip4 test_ip6 ============================================================================== IPv4 disabled ============================================================================== ============================================================================== ICMP Echo Test Case ============================================================================== {0} test.test_ip4.TestIPDisabled.test_ip_disabled [5.256819s] ... ok 07:24:41,902 Couldn't stat : /tmp/vpp-unittest-TestICMPEcho-hU4IsB/stats.sock {1} test.test_ip4.TestICMPEcho.test_icmp_echo [0.367035s] ... ok ============================================================================== IPv4 Deaggregate Routes ============================================================================== ============================================================================== IPv4 Input Exceptions ============================================================================== 07:24:47,314 Couldn't stat : /tmp/vpp-unittest-TestIPDeag-eE1VgC/stats.sock {1} test.test_ip4.TestIPDeag.test_ip_deag [5.895646s] ... ok {0} test.test_ip4.TestIPInput.test_ip_input [5.819001s] ... ok ============================================================================== IPv4 longest Prefix Match ... output truncated ... ============================== Failed 4 tests - output below: ============================== test.test_ip4.TestIPv4FibCrud.test_3_add_new_routes --------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 509, in test_3_add_new_routes self.deleted_routes.remove(x) ValueError: list.remove(x): x not in list test.test_ip4.TestIPv4FibCrud.test_2_del_routes ----------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4.py", line 478, in test_2_del_routes self.configured_routes.remove(x) ValueError: list.remove(x): x not in list test.test_ip4_vrf_multi_instance.TestIp4VrfMultiInst.test_ip4_vrf_03 -------------------------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4_vrf_multi_instance.py", line 465, in test_ip4_vrf_03 self.create_vrf_and_assign_interfaces(1) File "/vpp/test/test_ip4_vrf_multi_instance.py", line 189, in create_vrf_and_assign_interfaces pg_if.set_table_ip4(vrf_id) File "/vpp/test/vpp_interface.py", line 322, in set_table_ip4 self.sw_if_index, 0, self.ip4_table_id) File "/vpp/test/vpp_papi_provider.py", line 264, in sw_interface_set_table 'vrf_id': table_id}) File "/vpp/test/vpp_papi_provider.py", line 196, in api raise UnexpectedApiReturnValueError(msg) test.vpp_papi_provider.UnexpectedApiReturnValueError: API call failed, expected 0 return value instead of -114 in sw_interface_set_table_reply(_0=91, context=1007, retval=-114) test.test_ip4_vrf_multi_instance.TestIp4VrfMultiInst.test_ip4_vrf_02 -------------------------------------------------------------------- Captured traceback: ~~~~~~~~~~~~~~~~~~ Traceback (most recent call last): File "/vpp/test/test_ip4_vrf_multi_instance.py", line 445, in test_ip4_vrf_02 self.reset_vrf_and_remove_from_vrf_list(1) File "/vpp/test/test_ip4_vrf_multi_instance.py", line 208, in reset_vrf_and_remove_from_vrf_list self.vapi.reset_fib(vrf_id, is_ipv6=0) File "/vpp/test/vpp_papi_provider.py", line 1137, in reset_fib 'is_ipv6': is_ipv6, File "/vpp/test/vpp_papi_provider.py", line 196, in api raise UnexpectedApiReturnValueError(msg) test.vpp_papi_provider.UnexpectedApiReturnValueError: API call failed, expected 0 return value instead of -3 in reset_fib_reply(_0=259, context=1198, retval=-3) ====== Totals ====== Ran: 57 tests in 266.0000 sec. - Passed: 53 - Skipped: 0 - Expected Fail: 0 - Unexpected Success: 0 - Failed: 4 Sum of execute time for each test: 157.3925 sec. ============== Worker Balance ============== - Worker 0 (29 tests) => 0:03:52.608995 - Worker 1 (28 tests) => 0:04:08.615473 Test id Runtime (s) --------------------------------------------- ----------- test.test_ip_mcast.TestIPMcast.test_ip6_mcast 8.535 test.test_ip4.TestIPPunt.test_ip_punt 8.082 test.test_ip6.TestIP6Punt.test_ip_punt 6.582 test.test_ip6.TestIPDeag.test_ip_deag 6.175 test.test_ip6.TestIPv6.test_ns 6.171 test.test_ip4.TestIPDeag.test_ip_deag 5.896 test.test_ip6.TestIPv6.test_fib 5.846 test.test_ip4.TestIPInput.test_ip_input 5.819 test.test_ip6.TestIPv6.test_rs 5.737 test.test_ip4.TestIPv4.test_fib 5.267 ERROR: InvocationError for command '/vpp/.tox/py27/bin/stestr --test-path ./test run --slowest test_ip4 test_ip6' (exited with code 1) ______________________________________________________ summary ______________________________________________________ ERROR: py27: commands failed Change-Id: Id9f6ecb4897386f790d82ab908963e4971a3aac8 Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-29VPP-1507: Added binary api to dump configured ip_punt_redirectPavel Kotucek1-4/+37
Change-Id: I790f7785e183cc9aaffd5b593617c4e12a32e20d Signed-off-by: Pavel Kotucek <pavel.kotucek@pantheon.tech>
2018-11-27VPP-1508 python3 tests: .encode('hex')Paul Vinciguerra1-6/+5
Change to binascii.hexlify() for consistent bahavior. Change-Id: Ie430cdd1ffeb6510db4aa037546e42d85992093b Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-11-26Python3 tests: Fix asserts.Paul Vinciguerra1-2/+2
Use assert(Not)Equal() Use assert{Greater,Less}[Equal] Change-Id: I7c14570b8dce463ee13a67e9c1f10beb1a0308a8 Signed-off-by: Paul Vinciguerra <pvinci@vinciconsulting.com>
2018-10-22Fix buffer overflow when fragmenting packets (VPP-1383)Juraj Sloboda1-0/+50
Change-Id: Idcda9ae55fa2efb0b2e928bac3e8e86ff8d19eba Signed-off-by: Juraj Sloboda <jsloboda@cisco.com>
2018-10-15VPP-1448: Fix error when recurse on down the trie.mu.duojiao1-0/+49
Change-Id: Idfed8243643780d3f52dfe6e6ec621c440daa6ae Signed-off-by: mu.duojiao <mu.duojiao@zte.com.cn>
2018-09-07IP route update fix when multipath and drop setNeale Ranns1-1/+27
Change-Id: I9cec7486cb6e3c5261d74d2b15a4d19469285a30 Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-08-03loop counter to prevent infiinte number of look ups per-packetNeale Ranns1-0/+18
Change-Id: I59235d11baac18785a4c90cdaf14e8f3ddf06dab Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
2018-07-20IP directed broadcastNeale Ranns1-0/+69
with ip direct broadcast enable a packet to the interface's subnet broadcast address with be sent L2 broadcast on the interface. dissabled, it will be dropped. it is disabled by default, which preserves current behaviour Change-Id: If154cb92e64834e97a541b32624354348a0eafb3 Signed-off-by: Neale Ranns <nranns@cisco.com>
2018-07-02make_test: add icmp packet size sweep and icmp echo testsJan Gelety1-24/+100
Jira: CSIT-1141 Change-Id: I162bb4e718bff188abefc7b2f33501de9c55bb03 Signed-off-by: Jan Gelety <jgelety@cisco.com>
2018-06-11MTU: Software interface / Per-protocol MTU supportOle Troan1-2/+4
This patch separates setting of hardware interfaec and software interface MTU. Software MTU is L2 payload MTU (i.e. not including L2 header). Per-protocol MTU for IPv4, IPv6 and MPLS can also be set. Currently only IP4, IP6 are enabled in adjacency / rewrite code. Documentation in src/vnet/MTU.md Change-Id: Iee2fd6f0bbc8210748dd8e073ab9fab87d323690 Signed-off-by: Ole Troan <ot@cisco.com>
2018-05-23VPP-1283: IPv4 PMTU missing MTU value in ICMP4 message.Ole Troan1-1/+1
Change-Id: I7a4133c59ff45b0744b48e246a049d9f015026fc Signed-off-by: Ole Troan <ot@cisco.com>
2018-04-13Revert "MTU: Setting of MTU on software interface (instead of hardware ↵Damjan Marion1-1/+1
interface)" This reverts commit 70083ee74c3141bbefb185525315f1b34497dcaa. Reverting as this patch is causing following crash: 0: /home/damarion/cisco/vpp3/build-data/../src/vnet/devices/devices.h:131 (vnet_get_device_input_thread_index) assertion `queue_id < vec_len (hw->input_node_thread_index_by_queue)' fails Aborted Change-Id: Ie2a365032110b1f67be7a9d832885b9899813d39 Signed-off-by: Damjan Marion <damarion@cisco.com>
2018-04-13MTU: Setting of MTU on software interface (instead of hardware interface)Ole Troan1-1/+1
Change-Id: I98bd454a761a1032738a21edeb0fe847e801f901 Signed-off-by: Ole Troan <ot@cisco.com>
2018-02-12Improve MTU handlingNeale Ranns1-0/+26
- setting MTU on an interface updates the L3 max bytes too - value cached in the adjacency is also updated - MTU exceeded generates ICMP to sender Change-Id: I343ec71d8e903b529594c4bd0543f04bc7f370b3 Signed-off-by: Neale Ranns <neale.ranns@cisco.com>
2018-01-09test: consolidate the multiple versions of send_and_*Neale Ranns1-73/+0
Change-Id: I7fa7d0ebf73dab8264a2e5ddbd412600d78ead05 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-11-11MPLS disposition actions at the tail of unicast LSPsNeale Ranns1-1/+0
Change-Id: I8c42e26152f2ed1246f91b789887bfc923418bdf Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-31Refactor IP input checks for re-use at MPLS dispositionNeale Ranns1-0/+135
Change-Id: I7aafdecd6f370411138e6ab67b2ff72cda6e0666 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-14Source Lookup progammable via APINeale Ranns1-1/+109
Change-Id: I5d5d4f22b6369d504455a644f73076d772fbcfb4 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-10-10punt and drop features:Neale Ranns1-1/+111
- new IPv4 and IPv6 feature arcs on the punt and drop nodes - new features: - redirect punted traffic to an interface and nexthop - police punted traffic. Change-Id: I53be8bf4e06545add8a3619e462de5ffedd0a95c Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-09-11FIB table add/delete APINeale Ranns1-3/+8
part 2; - this adds the code to create an IP and MPLS table via the API. - but the enforcement that the table must be created before it is used is still missing, this is so that CSIT can pass. Change-Id: Id124d884ade6cb7da947225200e3bb193454c555 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-08-01Tests for recursive load-balancing with no choices.Neale Ranns1-5/+35
Change-Id: I90bb3369576741d03628a818ffa63cc99d6e4c98 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-06-06Packets recieved on VLAN-0 map to the main interfaceNeale Ranns1-0/+46
Change-Id: I21b1ad39275495d4d006023b58f630a213445854 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-05-25MPLS hash function improvementsNeale Ranns1-18/+43
Change-Id: I28e98f445c01493562b6196a4f5b532a51f178af Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-04-26IP Flow Hash Config fixesNeale Ranns1-0/+137
- the flow hash config is (and was) cached on the load-balance object so the fib_table_t struct is not used a switch time. Therefore changes to the table's flow hash config need to be propagated to all load-balances and hance all FIB entries in the table. - enable API for setting the IPv6 table flow hash config - use only the hash config in the fib_table_t object and not on the ipX_fib_t - add tests. Change-Id: Ib804c11162c6d4972c764957562c372f663e05d4 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-29Sub-net broadcast addresses for IPv4Neale Ranns1-1/+124
Change-Id: Ib2189d01e8bc61de57404159690fb70f89c47277 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-17Fix IP feature ordering.Neale Ranns1-1/+98
Drop comes before lookup when enabled. is_first_or_last is not required when setting a feature, the anchor is added in find_config_with_features(). Don't make the PG interfaces automatically L3 enabled, this way we can have tests that check the L3 protocol disbaled behaviour. Change-Id: Icef22a920b27ff9cec6ab2da6b05f05c532cb60f Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-03-09Tests to target holes in adjacency and DPO test coverageNeale Ranns1-1/+82
Change-Id: Ic6ac7e441a7b75baa02f03c1585d1ae00903a399 Signed-off-by: Neale Ranns <nranns@cisco.com>
2017-01-11make test: improve documentation and PEP8 complianceKlement Sekera1-7/+10
Change-Id: Ib4f0353aab6112fcc3c3d8f0bcbed5bc4b567b9b Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-12-23make test: improve handling of packet capturesKlement Sekera1-11/+8
Perform accounting of expected packets based on created packet infos. Use this accounting info to automatically expect (and verify) the correct number of packets to be captured. Automatically retry the read of the capture file if scapy raises an exception while doing so to handle rare cases when capture file is read while only partially written during busy wait. Don't fail assert_nothing_captured if only junk packets arrived. Change-Id: I16ec2e9410ef510d313ec16b7e13c57d0b2a63f5 Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-12-09make test: FIB add/update/delete - ip4 routesMatej Klotton1-2/+266
- JIRA:CSIT-483 Change-Id: Idb4c5bd7a234bc975f3380ece58c0e8d4bfdafd9 Signed-off-by: Matej Klotton <mklotton@cisco.com>
2016-12-05make test: fix missing log/packet messagesKlement Sekera1-7/+5
Change-Id: Idb3119792943664748c4abc3829ad723f4156dfe Signed-off-by: Klement Sekera <ksekera@cisco.com>
2016-11-15Update test documentation.Matej Klotton1-18/+60
- update IRB, IPv4, ipv6 doc - revert 778c2765c8ea5c6628f6d668847f0b9ae06dbf3d Change-Id: I9af5ed9329ce5fe01392cf28d5bf321cfc647e48 Signed-off-by: Matej Klotton <mklotton@cisco.com>
2016-11-09Disable non-working checks in load-balancer test and rename ip->ip4Klement Sekera1-0/+165
Change-Id: If62011e29e912bf0c47625b0d3b3624ef6375013 Signed-off-by: Klement Sekera <ksekera@cisco.com>
1245 1246 1247
/*-
 *   BSD LICENSE
 *
 *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
 *   All rights reserved.
 *
 *   Redistribution and use in source and binary forms, with or without
 *   modification, are permitted provided that the following conditions
 *   are met:
 *
 *     * Redistributions of source code must retain the above copyright
 *       notice, this list of conditions and the following disclaimer.
 *     * Redistributions in binary form must reproduce the above copyright
 *       notice, this list of conditions and the following disclaimer in
 *       the documentation and/or other materials provided with the
 *       distribution.
 *     * Neither the name of Intel Corporation nor the names of its
 *       contributors may be used to endorse or promote products derived
 *       from this software without specific prior written permission.
 *
 *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

#include <string.h>
#include <stdint.h>
#include <errno.h>
#include <stdio.h>
#include <stdarg.h>
#include <sys/queue.h>

#include <rte_common.h>
#include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
#include <rte_log.h>
#include <rte_memcpy.h>
#include <rte_prefetch.h>
#include <rte_branch_prediction.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_eal.h>
#include <rte_eal_memconfig.h>
#include <rte_per_lcore.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_cpuflags.h>
#include <rte_log.h>
#include <rte_rwlock.h>
#include <rte_spinlock.h>
#include <rte_ring.h>
#include <rte_compat.h>

#include "rte_hash.h"
#include "rte_cuckoo_hash.h"

#if defined(RTE_ARCH_X86)
#include "rte_cuckoo_hash_x86.h"
#endif

TAILQ_HEAD(rte_hash_list, rte_tailq_entry);

static struct rte_tailq_elem rte_hash_tailq = {
	.name = "RTE_HASH",
};
EAL_REGISTER_TAILQ(rte_hash_tailq)

struct rte_hash *
rte_hash_find_existing(const char *name)
{
	struct rte_hash *h = NULL;
	struct rte_tailq_entry *te;
	struct rte_hash_list *hash_list;

	hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);

	rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
	TAILQ_FOREACH(te, hash_list, next) {
		h = (struct rte_hash *) te->data;
		if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
			break;
	}
	rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);

	if (te == NULL) {
		rte_errno = ENOENT;
		return NULL;
	}
	return h;
}

void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
{
	h->rte_hash_custom_cmp_eq = func;
}

static inline int
rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
{
	if (h->cmp_jump_table_idx == KEY_CUSTOM)
		return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
	else
		return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
}

struct rte_hash *
rte_hash_create(const struct rte_hash_parameters *params)
{
	struct rte_hash *h = NULL;
	struct rte_tailq_entry *te = NULL;
	struct rte_hash_list *hash_list;
	struct rte_ring *r = NULL;
	char hash_name[RTE_HASH_NAMESIZE];
	void *k = NULL;
	void *buckets = NULL;
	char ring_name[RTE_RING_NAMESIZE];
	unsigned num_key_slots;
	unsigned hw_trans_mem_support = 0;
	unsigned i;

	hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);

	if (params == NULL) {
		RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
		return NULL;
	}

	/* Check for valid parameters */
	if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
			(params->entries < RTE_HASH_BUCKET_ENTRIES) ||
			!rte_is_power_of_2(RTE_HASH_BUCKET_ENTRIES) ||
			(params->key_len == 0)) {
		rte_errno = EINVAL;
		RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
		return NULL;
	}

	/* Check extra flags field to check extra options. */
	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
		hw_trans_mem_support = 1;

	/* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
	if (hw_trans_mem_support)
		/*
		 * Increase number of slots by total number of indices
		 * that can be stored in the lcore caches
		 * except for the first cache
		 */
		num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
					LCORE_CACHE_SIZE + 1;
	else
		num_key_slots = params->entries + 1;

	snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
	r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
			params->socket_id, 0);
	if (r == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto err;
	}

	snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* guarantee there's no existing: this is normally already checked
	 * by ring creation above */
	TAILQ_FOREACH(te, hash_list, next) {
		h = (struct rte_hash *) te->data;
		if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
			break;
	}
	h = NULL;
	if (te != NULL) {
		rte_errno = EEXIST;
		te = NULL;
		goto err_unlock;
	}

	te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
	if (te == NULL) {
		RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
		goto err_unlock;
	}

	h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
					RTE_CACHE_LINE_SIZE, params->socket_id);

	if (h == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto err_unlock;
	}

	const uint32_t num_buckets = rte_align32pow2(params->entries)
					/ RTE_HASH_BUCKET_ENTRIES;

	buckets = rte_zmalloc_socket(NULL,
				num_buckets * sizeof(struct rte_hash_bucket),
				RTE_CACHE_LINE_SIZE, params->socket_id);

	if (buckets == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto err_unlock;
	}

	const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
	const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;

	k = rte_zmalloc_socket(NULL, key_tbl_size,
			RTE_CACHE_LINE_SIZE, params->socket_id);

	if (k == NULL) {
		RTE_LOG(ERR, HASH, "memory allocation failed\n");
		goto err_unlock;
	}

/*
 * If x86 architecture is used, select appropriate compare function,
 * which may use x86 intrinsics, otherwise use memcmp
 */
#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
	/* Select function to compare keys */
	switch (params->key_len) {
	case 16:
		h->cmp_jump_table_idx = KEY_16_BYTES;
		break;
	case 32:
		h->cmp_jump_table_idx = KEY_32_BYTES;
		break;
	case 48:
		h->cmp_jump_table_idx = KEY_48_BYTES;
		break;
	case 64:
		h->cmp_jump_table_idx = KEY_64_BYTES;
		break;
	case 80:
		h->cmp_jump_table_idx = KEY_80_BYTES;
		break;
	case 96:
		h->cmp_jump_table_idx = KEY_96_BYTES;
		break;
	case 112:
		h->cmp_jump_table_idx = KEY_112_BYTES;
		break;
	case 128:
		h->cmp_jump_table_idx = KEY_128_BYTES;
		break;
	default:
		/* If key is not multiple of 16, use generic memcmp */
		h->cmp_jump_table_idx = KEY_OTHER_BYTES;
	}
#else
	h->cmp_jump_table_idx = KEY_OTHER_BYTES;
#endif

	if (hw_trans_mem_support) {
		h->local_free_slots = rte_zmalloc_socket(NULL,
				sizeof(struct lcore_cache) * RTE_MAX_LCORE,
				RTE_CACHE_LINE_SIZE, params->socket_id);
	}

	/* Setup hash context */
	snprintf(h->name, sizeof(h->name), "%s", params->name);
	h->entries = params->entries;
	h->key_len = params->key_len;
	h->key_entry_size = key_entry_size;
	h->hash_func_init_val = params->hash_func_init_val;

	h->num_buckets = num_buckets;
	h->bucket_bitmask = h->num_buckets - 1;
	h->buckets = buckets;
	h->hash_func = (params->hash_func == NULL) ?
		DEFAULT_HASH_FUNC : params->hash_func;
	h->key_store = k;
	h->free_slots = r;
	h->hw_trans_mem_support = hw_trans_mem_support;

	/* Turn on multi-writer only with explicit flat from user and TM
	 * support.
	 */
	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
		if (h->hw_trans_mem_support) {
			h->add_key = ADD_KEY_MULTIWRITER_TM;
		} else {
			h->add_key = ADD_KEY_MULTIWRITER;
			h->multiwriter_lock = rte_malloc(NULL,
							sizeof(rte_spinlock_t),
							LCORE_CACHE_SIZE);
			rte_spinlock_init(h->multiwriter_lock);
		}
	} else
		h->add_key = ADD_KEY_SINGLEWRITER;

	/* Populate free slots ring. Entry zero is reserved for key misses. */
	for (i = 1; i < params->entries + 1; i++)
		rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));

	te->data = (void *) h;
	TAILQ_INSERT_TAIL(hash_list, te, next);
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	return h;
err_unlock:
	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
err:
	rte_ring_free(r);
	rte_free(te);
	rte_free(h);
	rte_free(buckets);
	rte_free(k);
	return NULL;
}

void
rte_hash_free(struct rte_hash *h)
{
	struct rte_tailq_entry *te;
	struct rte_hash_list *hash_list;

	if (h == NULL)
		return;

	hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);

	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);

	/* find out tailq entry */
	TAILQ_FOREACH(te, hash_list, next) {
		if (te->data == (void *) h)
			break;
	}

	if (te == NULL) {
		rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
		return;
	}

	TAILQ_REMOVE(hash_list, te, next);

	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);

	if (h->hw_trans_mem_support)
		rte_free(h->local_free_slots);

	if (h->add_key == ADD_KEY_MULTIWRITER)
		rte_free(h->multiwriter_lock);
	rte_ring_free(h->free_slots);
	rte_free(h->key_store);
	rte_free(h->buckets);
	rte_free(h);
	rte_free(te);
}

hash_sig_t
rte_hash_hash(const struct rte_hash *h, const void *key)
{
	/* calc hash result by key */
	return h->hash_func(key, h->key_len, h->hash_func_init_val);
}

/* Calc the secondary hash value from the primary hash value of a given key */
static inline hash_sig_t
rte_hash_secondary_hash(const hash_sig_t primary_hash)
{
	static const unsigned all_bits_shift = 12;
	static const unsigned alt_bits_xor = 0x5bd1e995;

	uint32_t tag = primary_hash >> all_bits_shift;

	return primary_hash ^ ((tag + 1) * alt_bits_xor);
}

void
rte_hash_reset(struct rte_hash *h)
{
	void *ptr;
	unsigned i;

	if (h == NULL)
		return;

	memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
	memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));

	/* clear the free ring */
	while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
		rte_pause();

	/* Repopulate the free slots ring. Entry zero is reserved for key misses */
	for (i = 1; i < h->entries + 1; i++)
		rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));

	if (h->hw_trans_mem_support) {
		/* Reset local caches per lcore */
		for (i = 0; i < RTE_MAX_LCORE; i++)
			h->local_free_slots[i].len = 0;
	}
}

/* Search for an entry that can be pushed to its alternative location */
static inline int
make_space_bucket(const struct rte_hash *h, struct rte_hash_bucket *bkt)
{
	unsigned i, j;
	int ret;
	uint32_t next_bucket_idx;
	struct rte_hash_bucket *next_bkt[RTE_HASH_BUCKET_ENTRIES];

	/*
	 * Push existing item (search for bucket with space in
	 * alternative locations) to its alternative location
	 */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		/* Search for space in alternative locations */
		next_bucket_idx = bkt->signatures[i].alt & h->bucket_bitmask;
		next_bkt[i] = &h->buckets[next_bucket_idx];
		for (j = 0; j < RTE_HASH_BUCKET_ENTRIES; j++) {
			if (next_bkt[i]->signatures[j].sig == NULL_SIGNATURE)
				break;
		}

		if (j != RTE_HASH_BUCKET_ENTRIES)
			break;
	}

	/* Alternative location has spare room (end of recursive function) */
	if (i != RTE_HASH_BUCKET_ENTRIES) {
		next_bkt[i]->signatures[j].alt = bkt->signatures[i].current;
		next_bkt[i]->signatures[j].current = bkt->signatures[i].alt;
		next_bkt[i]->key_idx[j] = bkt->key_idx[i];
		return i;
	}

	/* Pick entry that has not been pushed yet */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++)
		if (bkt->flag[i] == 0)
			break;

	/* All entries have been pushed, so entry cannot be added */
	if (i == RTE_HASH_BUCKET_ENTRIES)
		return -ENOSPC;

	/* Set flag to indicate that this entry is going to be pushed */
	bkt->flag[i] = 1;
	/* Need room in alternative bucket to insert the pushed entry */
	ret = make_space_bucket(h, next_bkt[i]);
	/*
	 * After recursive function.
	 * Clear flags and insert the pushed entry
	 * in its alternative location if successful,
	 * or return error
	 */
	bkt->flag[i] = 0;
	if (ret >= 0) {
		next_bkt[i]->signatures[ret].alt = bkt->signatures[i].current;
		next_bkt[i]->signatures[ret].current = bkt->signatures[i].alt;
		next_bkt[i]->key_idx[ret] = bkt->key_idx[i];
		return i;
	} else
		return ret;

}

/*
 * Function called to enqueue back an index in the cache/ring,
 * as slot has not being used and it can be used in the
 * next addition attempt.
 */
static inline void
enqueue_slot_back(const struct rte_hash *h,
		struct lcore_cache *cached_free_slots,
		void *slot_id)
{
	if (h->hw_trans_mem_support) {
		cached_free_slots->objs[cached_free_slots->len] = slot_id;
		cached_free_slots->len++;
	} else
		rte_ring_sp_enqueue(h->free_slots, slot_id);
}

static inline int32_t
__rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
						hash_sig_t sig, void *data)
{
	hash_sig_t alt_hash;
	uint32_t prim_bucket_idx, sec_bucket_idx;
	unsigned i;
	struct rte_hash_bucket *prim_bkt, *sec_bkt;
	struct rte_hash_key *new_k, *k, *keys = h->key_store;
	void *slot_id = NULL;
	uint32_t new_idx;
	int ret;
	unsigned n_slots;
	unsigned lcore_id;
	struct lcore_cache *cached_free_slots = NULL;

	if (h->add_key == ADD_KEY_MULTIWRITER)
		rte_spinlock_lock(h->multiwriter_lock);

	prim_bucket_idx = sig & h->bucket_bitmask;
	prim_bkt = &h->buckets[prim_bucket_idx];
	rte_prefetch0(prim_bkt);

	alt_hash = rte_hash_secondary_hash(sig);
	sec_bucket_idx = alt_hash & h->bucket_bitmask;
	sec_bkt = &h->buckets[sec_bucket_idx];
	rte_prefetch0(sec_bkt);

	/* Get a new slot for storing the new key */
	if (h->hw_trans_mem_support) {
		lcore_id = rte_lcore_id();
		cached_free_slots = &h->local_free_slots[lcore_id];
		/* Try to get a free slot from the local cache */
		if (cached_free_slots->len == 0) {
			/* Need to get another burst of free slots from global ring */
			n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
					cached_free_slots->objs, LCORE_CACHE_SIZE);
			if (n_slots == 0)
				return -ENOSPC;

			cached_free_slots->len += n_slots;
		}

		/* Get a free slot from the local cache */
		cached_free_slots->len--;
		slot_id = cached_free_slots->objs[cached_free_slots->len];
	} else {
		if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0)
			return -ENOSPC;
	}

	new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
	rte_prefetch0(new_k);
	new_idx = (uint32_t)((uintptr_t) slot_id);

	/* Check if key is already inserted in primary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (prim_bkt->signatures[i].current == sig &&
				prim_bkt->signatures[i].alt == alt_hash) {
			k = (struct rte_hash_key *) ((char *)keys +
					prim_bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				/* Enqueue index of free slot back in the ring. */
				enqueue_slot_back(h, cached_free_slots, slot_id);
				/* Update data */
				k->pdata = data;
				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return prim_bkt->key_idx[i] - 1;
			}
		}
	}

	/* Check if key is already inserted in secondary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (sec_bkt->signatures[i].alt == sig &&
				sec_bkt->signatures[i].current == alt_hash) {
			k = (struct rte_hash_key *) ((char *)keys +
					sec_bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				/* Enqueue index of free slot back in the ring. */
				enqueue_slot_back(h, cached_free_slots, slot_id);
				/* Update data */
				k->pdata = data;
				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return sec_bkt->key_idx[i] - 1;
			}
		}
	}

	/* Copy key */
	rte_memcpy(new_k->key, key, h->key_len);
	new_k->pdata = data;

#if defined(RTE_ARCH_X86) /* currently only x86 support HTM */
	if (h->add_key == ADD_KEY_MULTIWRITER_TM) {
		ret = rte_hash_cuckoo_insert_mw_tm(prim_bkt,
				sig, alt_hash, new_idx);
		if (ret >= 0)
			return new_idx - 1;

		/* Primary bucket full, need to make space for new entry */
		ret = rte_hash_cuckoo_make_space_mw_tm(h, prim_bkt, sig,
							alt_hash, new_idx);

		if (ret >= 0)
			return new_idx - 1;

		/* Also search secondary bucket to get better occupancy */
		ret = rte_hash_cuckoo_make_space_mw_tm(h, sec_bkt, sig,
							alt_hash, new_idx);

		if (ret >= 0)
			return new_idx - 1;
	} else {
#endif
		for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
			/* Check if slot is available */
			if (likely(prim_bkt->signatures[i].sig == NULL_SIGNATURE)) {
				prim_bkt->signatures[i].current = sig;
				prim_bkt->signatures[i].alt = alt_hash;
				prim_bkt->key_idx[i] = new_idx;
				break;
			}
		}

		if (i != RTE_HASH_BUCKET_ENTRIES) {
			if (h->add_key == ADD_KEY_MULTIWRITER)
				rte_spinlock_unlock(h->multiwriter_lock);
			return new_idx - 1;
		}

		/* Primary bucket full, need to make space for new entry
		 * After recursive function.
		 * Insert the new entry in the position of the pushed entry
		 * if successful or return error and
		 * store the new slot back in the ring
		 */
		ret = make_space_bucket(h, prim_bkt);
		if (ret >= 0) {
			prim_bkt->signatures[ret].current = sig;
			prim_bkt->signatures[ret].alt = alt_hash;
			prim_bkt->key_idx[ret] = new_idx;
			if (h->add_key == ADD_KEY_MULTIWRITER)
				rte_spinlock_unlock(h->multiwriter_lock);
			return new_idx - 1;
		}
#if defined(RTE_ARCH_X86)
	}
#endif
	/* Error in addition, store new slot back in the ring and return error */
	enqueue_slot_back(h, cached_free_slots, (void *)((uintptr_t) new_idx));

	if (h->add_key == ADD_KEY_MULTIWRITER)
		rte_spinlock_unlock(h->multiwriter_lock);
	return ret;
}

int32_t
rte_hash_add_key_with_hash(const struct rte_hash *h,
			const void *key, hash_sig_t sig)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_add_key_with_hash(h, key, sig, 0);
}

int32_t
rte_hash_add_key(const struct rte_hash *h, const void *key)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
}

int
rte_hash_add_key_with_hash_data(const struct rte_hash *h,
			const void *key, hash_sig_t sig, void *data)
{
	int ret;

	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	ret = __rte_hash_add_key_with_hash(h, key, sig, data);
	if (ret >= 0)
		return 0;
	else
		return ret;
}

int
rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
{
	int ret;

	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);

	ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
	if (ret >= 0)
		return 0;
	else
		return ret;
}
static inline int32_t
__rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
					hash_sig_t sig, void **data)
{
	uint32_t bucket_idx;
	hash_sig_t alt_hash;
	unsigned i;
	struct rte_hash_bucket *bkt;
	struct rte_hash_key *k, *keys = h->key_store;

	bucket_idx = sig & h->bucket_bitmask;
	bkt = &h->buckets[bucket_idx];

	/* Check if key is in primary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (bkt->signatures[i].current == sig &&
				bkt->signatures[i].sig != NULL_SIGNATURE) {
			k = (struct rte_hash_key *) ((char *)keys +
					bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				if (data != NULL)
					*data = k->pdata;
				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return bkt->key_idx[i] - 1;
			}
		}
	}

	/* Calculate secondary hash */
	alt_hash = rte_hash_secondary_hash(sig);
	bucket_idx = alt_hash & h->bucket_bitmask;
	bkt = &h->buckets[bucket_idx];

	/* Check if key is in secondary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (bkt->signatures[i].current == alt_hash &&
				bkt->signatures[i].alt == sig) {
			k = (struct rte_hash_key *) ((char *)keys +
					bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				if (data != NULL)
					*data = k->pdata;
				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return bkt->key_idx[i] - 1;
			}
		}
	}

	return -ENOENT;
}

int32_t
rte_hash_lookup_with_hash(const struct rte_hash *h,
			const void *key, hash_sig_t sig)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_lookup_with_hash(h, key, sig, NULL);
}

int32_t
rte_hash_lookup(const struct rte_hash *h, const void *key)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
}

int
rte_hash_lookup_with_hash_data(const struct rte_hash *h,
			const void *key, hash_sig_t sig, void **data)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_lookup_with_hash(h, key, sig, data);
}

int
rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
}

static inline void
remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
{
	unsigned lcore_id, n_slots;
	struct lcore_cache *cached_free_slots;

	bkt->signatures[i].sig = NULL_SIGNATURE;
	if (h->hw_trans_mem_support) {
		lcore_id = rte_lcore_id();
		cached_free_slots = &h->local_free_slots[lcore_id];
		/* Cache full, need to free it. */
		if (cached_free_slots->len == LCORE_CACHE_SIZE) {
			/* Need to enqueue the free slots in global ring. */
			n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
						cached_free_slots->objs,
						LCORE_CACHE_SIZE);
			cached_free_slots->len -= n_slots;
		}
		/* Put index of new free slot in cache. */
		cached_free_slots->objs[cached_free_slots->len] =
				(void *)((uintptr_t)bkt->key_idx[i]);
		cached_free_slots->len++;
	} else {
		rte_ring_sp_enqueue(h->free_slots,
				(void *)((uintptr_t)bkt->key_idx[i]));
	}
}

static inline int32_t
__rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
						hash_sig_t sig)
{
	uint32_t bucket_idx;
	hash_sig_t alt_hash;
	unsigned i;
	struct rte_hash_bucket *bkt;
	struct rte_hash_key *k, *keys = h->key_store;

	bucket_idx = sig & h->bucket_bitmask;
	bkt = &h->buckets[bucket_idx];

	/* Check if key is in primary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (bkt->signatures[i].current == sig &&
				bkt->signatures[i].sig != NULL_SIGNATURE) {
			k = (struct rte_hash_key *) ((char *)keys +
					bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				remove_entry(h, bkt, i);

				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return bkt->key_idx[i] - 1;
			}
		}
	}

	/* Calculate secondary hash */
	alt_hash = rte_hash_secondary_hash(sig);
	bucket_idx = alt_hash & h->bucket_bitmask;
	bkt = &h->buckets[bucket_idx];

	/* Check if key is in secondary location */
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		if (bkt->signatures[i].current == alt_hash &&
				bkt->signatures[i].sig != NULL_SIGNATURE) {
			k = (struct rte_hash_key *) ((char *)keys +
					bkt->key_idx[i] * h->key_entry_size);
			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
				remove_entry(h, bkt, i);

				/*
				 * Return index where key is stored,
				 * substracting the first dummy index
				 */
				return bkt->key_idx[i] - 1;
			}
		}
	}

	return -ENOENT;
}

int32_t
rte_hash_del_key_with_hash(const struct rte_hash *h,
			const void *key, hash_sig_t sig)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_del_key_with_hash(h, key, sig);
}

int32_t
rte_hash_del_key(const struct rte_hash *h, const void *key)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
	return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
}

int
rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
			       void **key)
{
	RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);

	struct rte_hash_key *k, *keys = h->key_store;
	k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
				     h->key_entry_size);
	*key = k->key;

	if (position !=
	    __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
					NULL)) {
		return -ENOENT;
	}

	return 0;
}

/* Lookup bulk stage 0: Prefetch input key */
static inline void
lookup_stage0(unsigned *idx, uint64_t *lookup_mask,
		const void * const *keys)
{
	*idx = __builtin_ctzl(*lookup_mask);
	if (*lookup_mask == 0)
		*idx = 0;

	rte_prefetch0(keys[*idx]);
	*lookup_mask &= ~(1llu << *idx);
}

/*
 * Lookup bulk stage 1: Calculate primary/secondary hashes
 * and prefetch primary/secondary buckets
 */
static inline void
lookup_stage1(unsigned idx, hash_sig_t *prim_hash, hash_sig_t *sec_hash,
		const struct rte_hash_bucket **primary_bkt,
		const struct rte_hash_bucket **secondary_bkt,
		hash_sig_t *hash_vals, const void * const *keys,
		const struct rte_hash *h)
{
	*prim_hash = rte_hash_hash(h, keys[idx]);
	hash_vals[idx] = *prim_hash;
	*sec_hash = rte_hash_secondary_hash(*prim_hash);

	*primary_bkt = &h->buckets[*prim_hash & h->bucket_bitmask];
	*secondary_bkt = &h->buckets[*sec_hash & h->bucket_bitmask];

	rte_prefetch0(*primary_bkt);
	rte_prefetch0(*secondary_bkt);
}

/*
 * Lookup bulk stage 2:  Search for match hashes in primary/secondary locations
 * and prefetch first key slot
 */
static inline void
lookup_stage2(unsigned idx, hash_sig_t prim_hash, hash_sig_t sec_hash,
		const struct rte_hash_bucket *prim_bkt,
		const struct rte_hash_bucket *sec_bkt,
		const struct rte_hash_key **key_slot, int32_t *positions,
		uint64_t *extra_hits_mask, const void *keys,
		const struct rte_hash *h)
{
	unsigned prim_hash_matches, sec_hash_matches, key_idx, i;
	unsigned total_hash_matches;

	prim_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
	sec_hash_matches = 1 << RTE_HASH_BUCKET_ENTRIES;
	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
		prim_hash_matches |= ((prim_hash == prim_bkt->signatures[i].current) << i);
		sec_hash_matches |= ((sec_hash == sec_bkt->signatures[i].current) << i);
	}

	key_idx = prim_bkt->key_idx[__builtin_ctzl(prim_hash_matches)];
	if (key_idx == 0)
		key_idx = sec_bkt->key_idx[__builtin_ctzl(sec_hash_matches)];

	total_hash_matches = (prim_hash_matches |
				(sec_hash_matches << (RTE_HASH_BUCKET_ENTRIES + 1)));
	*key_slot = (const struct rte_hash_key *) ((const char *)keys +
					key_idx * h->key_entry_size);

	rte_prefetch0(*key_slot);
	/*
	 * Return index where key is stored,
	 * substracting the first dummy index
	 */
	positions[idx] = (key_idx - 1);

	*extra_hits_mask |= (uint64_t)(__builtin_popcount(total_hash_matches) > 3) << idx;

}


/* Lookup bulk stage 3: Check if key matches, update hit mask and return data */
static inline void
lookup_stage3(unsigned idx, const struct rte_hash_key *key_slot, const void * const *keys,
		const int32_t *positions, void *data[], uint64_t *hits,
		const struct rte_hash *h)
{
	unsigned hit;
	unsigned key_idx;

	hit = !rte_hash_cmp_eq(key_slot->key, keys[idx], h);
	if (data != NULL)
		data[idx] = key_slot->pdata;

	key_idx = positions[idx] + 1;
	/*
	 * If key index is 0, force hit to be 0, in case key to be looked up
	 * is all zero (as in the dummy slot), which would result in a wrong hit
	 */
	*hits |= (uint64_t)(hit && !!key_idx)  << idx;
}

static inline void
__rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
			uint32_t num_keys, int32_t *positions,
			uint64_t *hit_mask, void *data[])
{
	uint64_t hits = 0;
	uint64_t extra_hits_mask = 0;
	uint64_t lookup_mask, miss_mask;
	unsigned idx;
	const void *key_store = h->key_store;
	int ret;
	hash_sig_t hash_vals[RTE_HASH_LOOKUP_BULK_MAX];

	unsigned idx00, idx01, idx10, idx11, idx20, idx21, idx30, idx31;
	const struct rte_hash_bucket *primary_bkt10, *primary_bkt11;
	const struct rte_hash_bucket *secondary_bkt10, *secondary_bkt11;
	const struct rte_hash_bucket *primary_bkt20, *primary_bkt21;
	const struct rte_hash_bucket *secondary_bkt20, *secondary_bkt21;
	const struct rte_hash_key *k_slot20, *k_slot21, *k_slot30, *k_slot31;
	hash_sig_t primary_hash10, primary_hash11;
	hash_sig_t secondary_hash10, secondary_hash11;
	hash_sig_t primary_hash20, primary_hash21;
	hash_sig_t secondary_hash20, secondary_hash21;

	lookup_mask = (uint64_t) -1 >> (64 - num_keys);
	miss_mask = lookup_mask;

	lookup_stage0(&idx00, &lookup_mask, keys);
	lookup_stage0(&idx01, &lookup_mask, keys);

	idx10 = idx00, idx11 = idx01;

	lookup_stage0(&idx00, &lookup_mask, keys);
	lookup_stage0(&idx01, &lookup_mask, keys);
	lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
			&primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
	lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
			&primary_bkt11,	&secondary_bkt11, hash_vals, keys, h);

	primary_bkt20 = primary_bkt10;
	primary_bkt21 = primary_bkt11;
	secondary_bkt20 = secondary_bkt10;
	secondary_bkt21 = secondary_bkt11;
	primary_hash20 = primary_hash10;
	primary_hash21 = primary_hash11;
	secondary_hash20 = secondary_hash10;
	secondary_hash21 = secondary_hash11;
	idx20 = idx10, idx21 = idx11;
	idx10 = idx00, idx11 = idx01;

	lookup_stage0(&idx00, &lookup_mask, keys);
	lookup_stage0(&idx01, &lookup_mask, keys);
	lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
			&primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
	lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
			&primary_bkt11,	&secondary_bkt11, hash_vals, keys, h);
	lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
			secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
			key_store, h);
	lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
			secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
			key_store, h);

	while (lookup_mask) {
		k_slot30 = k_slot20, k_slot31 = k_slot21;
		idx30 = idx20, idx31 = idx21;
		primary_bkt20 = primary_bkt10;
		primary_bkt21 = primary_bkt11;
		secondary_bkt20 = secondary_bkt10;
		secondary_bkt21 = secondary_bkt11;
		primary_hash20 = primary_hash10;
		primary_hash21 = primary_hash11;
		secondary_hash20 = secondary_hash10;
		secondary_hash21 = secondary_hash11;
		idx20 = idx10, idx21 = idx11;
		idx10 = idx00, idx11 = idx01;

		lookup_stage0(&idx00, &lookup_mask, keys);
		lookup_stage0(&idx01, &lookup_mask, keys);
		lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
			&primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
		lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
			&primary_bkt11,	&secondary_bkt11, hash_vals, keys, h);
		lookup_stage2(idx20, primary_hash20, secondary_hash20,
			primary_bkt20, secondary_bkt20, &k_slot20, positions,
			&extra_hits_mask, key_store, h);
		lookup_stage2(idx21, primary_hash21, secondary_hash21,
			primary_bkt21, secondary_bkt21,	&k_slot21, positions,
			&extra_hits_mask, key_store, h);
		lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
		lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);
	}

	k_slot30 = k_slot20, k_slot31 = k_slot21;
	idx30 = idx20, idx31 = idx21;
	primary_bkt20 = primary_bkt10;
	primary_bkt21 = primary_bkt11;
	secondary_bkt20 = secondary_bkt10;
	secondary_bkt21 = secondary_bkt11;
	primary_hash20 = primary_hash10;
	primary_hash21 = primary_hash11;
	secondary_hash20 = secondary_hash10;
	secondary_hash21 = secondary_hash11;
	idx20 = idx10, idx21 = idx11;
	idx10 = idx00, idx11 = idx01;

	lookup_stage1(idx10, &primary_hash10, &secondary_hash10,
		&primary_bkt10, &secondary_bkt10, hash_vals, keys, h);
	lookup_stage1(idx11, &primary_hash11, &secondary_hash11,
		&primary_bkt11,	&secondary_bkt11, hash_vals, keys, h);
	lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
		secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
		key_store, h);
	lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
		secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
		key_store, h);
	lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
	lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);

	k_slot30 = k_slot20, k_slot31 = k_slot21;
	idx30 = idx20, idx31 = idx21;
	primary_bkt20 = primary_bkt10;
	primary_bkt21 = primary_bkt11;
	secondary_bkt20 = secondary_bkt10;
	secondary_bkt21 = secondary_bkt11;
	primary_hash20 = primary_hash10;
	primary_hash21 = primary_hash11;
	secondary_hash20 = secondary_hash10;
	secondary_hash21 = secondary_hash11;
	idx20 = idx10, idx21 = idx11;

	lookup_stage2(idx20, primary_hash20, secondary_hash20, primary_bkt20,
		secondary_bkt20, &k_slot20, positions, &extra_hits_mask,
		key_store, h);
	lookup_stage2(idx21, primary_hash21, secondary_hash21, primary_bkt21,
		secondary_bkt21, &k_slot21, positions, &extra_hits_mask,
		key_store, h);
	lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
	lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);

	k_slot30 = k_slot20, k_slot31 = k_slot21;
	idx30 = idx20, idx31 = idx21;

	lookup_stage3(idx30, k_slot30, keys, positions, data, &hits, h);
	lookup_stage3(idx31, k_slot31, keys, positions, data, &hits, h);

	/* ignore any items we have already found */
	extra_hits_mask &= ~hits;

	if (unlikely(extra_hits_mask)) {
		/* run a single search for each remaining item */
		do {
			idx = __builtin_ctzl(extra_hits_mask);
			if (data != NULL) {
				ret = rte_hash_lookup_with_hash_data(h,
						keys[idx], hash_vals[idx], &data[idx]);
				if (ret >= 0)
					hits |= 1ULL << idx;
			} else {
				positions[idx] = rte_hash_lookup_with_hash(h,
							keys[idx], hash_vals[idx]);
				if (positions[idx] >= 0)
					hits |= 1llu << idx;
			}
			extra_hits_mask &= ~(1llu << idx);
		} while (extra_hits_mask);
	}

	miss_mask &= ~hits;
	if (unlikely(miss_mask)) {
		do {
			idx = __builtin_ctzl(miss_mask);
			positions[idx] = -ENOENT;
			miss_mask &= ~(1llu << idx);
		} while (miss_mask);
	}

	if (hit_mask != NULL)
		*hit_mask = hits;
}

int
rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
		      uint32_t num_keys, int32_t *positions)
{
	RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
			(num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
			(positions == NULL)), -EINVAL);

	__rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
	return 0;
}

int
rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
		      uint32_t num_keys, uint64_t *hit_mask, void *data[])
{
	RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
			(num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
			(hit_mask == NULL)), -EINVAL);

	int32_t positions[num_keys];

	__rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);

	/* Return number of hits */
	return __builtin_popcountl(*hit_mask);
}

int32_t
rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
{
	uint32_t bucket_idx, idx, position;
	struct rte_hash_key *next_key;

	RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);

	const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
	/* Out of bounds */
	if (*next >= total_entries)
		return -ENOENT;

	/* Calculate bucket and index of current iterator */
	bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
	idx = *next % RTE_HASH_BUCKET_ENTRIES;

	/* If current position is empty, go to the next one */
	while (h->buckets[bucket_idx].signatures[idx].sig == NULL_SIGNATURE) {
		(*next)++;
		/* End of table */
		if (*next == total_entries)
			return -ENOENT;
		bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
		idx = *next % RTE_HASH_BUCKET_ENTRIES;
	}

	/* Get position of entry in key table */
	position = h->buckets[bucket_idx].key_idx[idx];
	next_key = (struct rte_hash_key *) ((char *)h->key_store +
				position * h->key_entry_size);
	/* Return key and data */
	*key = next_key->key;
	*data = next_key->pdata;

	/* Increment iterator */
	(*next)++;

	return position - 1;
}