/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * node_funcs.h: processing nodes global functions/inlines * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** \file vlib node functions */ #ifndef included_vlib_node_funcs_h #define included_vlib_node_funcs_h #include #include /** \brief Get vlib node by index. @warning This function will ASSERT if @c i is out of range. @param vm vlib_main_t pointer, varies by thread @param i node index. @return pointer to the requested vlib_node_t. */ always_inline vlib_node_t * vlib_get_node (vlib_main_t * vm, u32 i) { return vec_elt (vm->node_main.nodes, i); } /** \brief Get vlib node by graph arc (next) index. @param vm vlib_main_t pointer, varies by thread @param node_index index of original node @param next_index graph arc index @return pointer to the vlib_node_t at the end of the indicated arc */ always_inline vlib_node_t * vlib_get_next_node (vlib_main_t * vm, u32 node_index, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n; n = vec_elt (nm->nodes, node_index); ASSERT (next_index < vec_len (n->next_nodes)); return vlib_get_node (vm, n->next_nodes[next_index]); } /** \brief Get node runtime by node index. @param vm vlib_main_t pointer, varies by thread @param node_index index of node @return pointer to the indicated vlib_node_runtime_t */ always_inline vlib_node_runtime_t * vlib_node_get_runtime (vlib_main_t * vm, u32 node_index) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n = vec_elt (nm->nodes, node_index); vlib_process_t *p; if (n->type != VLIB_NODE_TYPE_PROCESS) return vec_elt_at_index (nm->nodes_by_type[n->type], n->runtime_index); else { p = vec_elt (nm->processes, n->runtime_index); return &p->node_runtime; } } /** \brief Get node runtime private data by node index. @param vm vlib_main_t pointer, varies by thread @param node_index index of the node @return pointer to the indicated vlib_node_runtime_t private data */ always_inline void * vlib_node_get_runtime_data (vlib_main_t * vm, u32 node_index) { vlib_node_runtime_t *r = vlib_node_get_runtime (vm, node_index); return r->runtime_data; } /** \brief Set node runtime private data. @param vm vlib_main_t pointer, varies by thread @param node_index index of the node @param runtime_data arbitrary runtime private data @param n_runtime_data_bytes size of runtime private data */ always_inline void vlib_node_set_runtime_data (vlib_main_t * vm, u32 node_index, void *runtime_data, u32 n_runtime_data_bytes) { vlib_node_t *n = vlib_get_node (vm, node_index); vlib_node_runtime_t *r = vlib_node_get_runtime (vm, node_index); n->runtime_data_bytes = n_runtime_data_bytes; vec_free (n->runtime_data); vec_add (n->runtime_data, runtime_data, n_runtime_data_bytes); ASSERT (vec_len (n->runtime_data) <= sizeof (vlib_node_runtime_t) - STRUCT_OFFSET_OF (vlib_node_runtime_t, runtime_data)); if (vec_len (n->runtime_data) > 0) clib_memcpy_fast (r->runtime_data, n->runtime_data, vec_len (n->runtime_data)); } /** \brief Set node dispatch state. @param vm vlib_main_t pointer, varies by thread @param node_index index of the node @param new_state new state for node, see vlib_node_state_t */ always_inline void vlib_node_set_state (vlib_main_t * vm, u32 node_index, vlib_node_state_t new_state) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n; vlib_node_runtime_t *r; n = vec_elt (nm->nodes, node_index); if (n->type == VLIB_NODE_TYPE_PROCESS) { vlib_process_t *p = vec_elt (nm->processes, n->runtime_index); r = &p->node_runtime; /* When disabling make sure flags are cleared. */ p->flags &= ~(VLIB_PROCESS_RESUME_PENDING | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK | VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT); } else r = vec_elt_at_index (nm->nodes_by_type[n->type], n->runtime_index); ASSERT (new_state < VLIB_N_NODE_STATE); if (n->type == VLIB_NODE_TYPE_INPUT) { ASSERT (nm->input_node_counts_by_state[n->state] > 0); nm->input_node_counts_by_state[n->state] -= 1; nm->input_node_counts_by_state[new_state] += 1; } n->state = new_state; r->state = new_state; } /** \brief Get node dispatch state. @param vm vlib_main_t pointer, varies by thread @param node_index index of the node @return state for node, see vlib_node_state_t */ always_inline vlib_node_state_t vlib_node_get_state (vlib_main_t * vm, u32 node_index) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n; n = vec_elt (nm->nodes, node_index); return n->state; } always_inline void vlib_node_set_interrupt_pending (vlib_main_t * vm, u32 node_index) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n = vec_elt (nm->nodes, node_index); ASSERT (n->type == VLIB_NODE_TYPE_INPUT); clib_spinlock_lock_if_init (&nm->pending_interrupt_lock); vec_add1 (nm->pending_interrupt_node_runtime_indices, n->runtime_index); clib_spinlock_unlock_if_init (&nm->pending_interrupt_lock); } always_inline vlib_process_t * vlib_get_process_from_node (vlib_main_t * vm, vlib_node_t * node) { vlib_node_main_t *nm = &vm->node_main; ASSERT (node->type == VLIB_NODE_TYPE_PROCESS); return vec_elt (nm->processes, node->runtime_index); } /* Fetches frame with given handle. */ always_inline vlib_frame_t * vlib_get_frame_no_check (vlib_main_t * vm, uword frame_index) { vlib_frame_t *f; f = vm->heap_aligned_base + (frame_index * VLIB_FRAME_ALIGN); return f; } always_inline u32 vlib_frame_index_no_check (vlib_main_t * vm, vlib_frame_t * f) { uword i; ASSERT (((uword) f & (VLIB_FRAME_ALIGN - 1)) == 0); i = ((u8 *) f - (u8 *) vm->heap_aligned_base); ASSERT ((i / VLIB_FRAME_ALIGN) <= 0xFFFFFFFFULL); return i / VLIB_FRAME_ALIGN; } always_inline vlib_frame_t * vlib_get_frame (vlib_main_t * vm, uword frame_index) { vlib_frame_t *f = vlib_get_frame_no_check (vm, frame_index); ASSERT (f->frame_flags & VLIB_FRAME_IS_ALLOCATED); return f; } always_inline u32 vlib_frame_index (vlib_main_t * vm, vlib_frame_t * f) { uword i = vlib_frame_index_no_check (vm, f); ASSERT (vlib_get_frame (vm, i) == f); return i; } /* Byte alignment for vector arguments. */ #define VLIB_FRAME_VECTOR_ALIGN (1 << 4) always_inline u32 vlib_frame_vector_byte_offset (u32 scalar_size) { return round_pow2 (sizeof (vlib_frame_t) + scalar_size, VLIB_FRAME_VECTOR_ALIGN); } /** \brief Get pointer to frame vector data. @param f vlib_frame_t pointer @return pointer to first vector element in frame */ always_inline void * vlib_frame_vector_args (vlib_frame_t * f) { return (void *) f + vlib_frame_vector_byte_offset (f->scalar_size); } /** \brief Get pointer to frame scalar data. @param f vlib_frame_t pointer @return arbitrary node scalar data @sa vlib_frame_vector_args */ always_inline void * vlib_frame_scalar_args (vlib_frame_t * f) { return vlib_frame_vector_args (f) - f->scalar_size; } always_inline vlib_next_frame_t * vlib_node_runtime_get_next_frame (vlib_main_t * vm, vlib_node_runtime_t * n, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; ASSERT (next_index < n->n_next_nodes); nf = vec_elt_at_index (nm->next_frames, n->next_frame_index + next_index); if (CLIB_DEBUG > 0) { vlib_node_t *node, *next; node = vec_elt (nm->nodes, n->node_index); next = vec_elt (nm->nodes, node->next_nodes[next_index]); ASSERT (nf->node_runtime_index == next->runtime_index); } return nf; } /** \brief Get pointer to frame by (@c node_index, @c next_index). @warning This is not a function that you should call directly. See @ref vlib_get_next_frame instead. @param vm vlib_main_t pointer, varies by thread @param node_index index of the node @param next_index graph arc index @return pointer to the requested vlib_next_frame_t @sa vlib_get_next_frame */ always_inline vlib_next_frame_t * vlib_node_get_next_frame (vlib_main_t * vm, u32 node_index, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n; vlib_node_runtime_t *r; n = vec_elt (nm->nodes, node_index); r = vec_elt_at_index (nm->nodes_by_type[n->type], n->runtime_index); return vlib_node_runtime_get_next_frame (vm, r, next_index); } vlib_frame_t *vlib_get_next_frame_internal (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 alloc_new_frame); #define vlib_get_next_frame_macro(vm,node,next_index,vectors,n_vectors_left,alloc_new_frame) \ do { \ vlib_frame_t * _f \ = vlib_get_next_frame_internal ((vm), (node), (next_index), \ (alloc_new_frame)); \ u32 _n = _f->n_vectors; \ (vectors) = vlib_frame_vector_args (_f) + _n * sizeof ((vectors)[0]); \ (n_vectors_left) = VLIB_FRAME_SIZE - _n; \ } while (0) /** \brief Get pointer to next frame vector data by (@c vlib_node_runtime_t, @c next_index). Standard single/dual loop boilerplate element. @attention This is a MACRO, with SIDE EFFECTS. @param vm vlib_main_t pointer, varies by thread @param node current node vlib_node_runtime_t pointer @param next_index requested graph arc index @return @c vectors -- pointer to next available vector slot @return @c n_vectors_left -- number of vector slots available */ #define vlib_get_next_frame(vm,node,next_index,vectors,n_vectors_left) \ vlib_get_next_frame_macro (vm, node, next_index, \ vectors, n_vectors_left, \ /* alloc new frame */ 0) #define vlib_get_new_next_frame(vm,node,next_index,vectors,n_vectors_left) \ vlib_get_next_frame_macro (vm, node, next_index, \ vectors, n_vectors_left, \ /* alloc new frame */ 1) /** \brief Release pointer to next frame vector data. Standard single/dual loop boilerplate element. @param vm vlib_main_t pointer, varies by thread @param r current node vlib_node_runtime_t pointer @param next_index graph arc index @param n_packets_left number of slots still available in vector */ void vlib_put_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, u32 n_packets_left); /* Combination get plus put. Returns vector argument just added. */ #define vlib_set_next_frame(vm,node,next_index,v) \ ({ \ uword _n_left; \ vlib_get_next_frame ((vm), (node), (next_index), (v), _n_left); \ ASSERT (_n_left > 0); \ vlib_put_next_frame ((vm), (node), (next_index), _n_left - 1); \ (v); \ }) always_inline void vlib_set_next_frame_buffer (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 buffer_index) { u32 *p; p = vlib_set_next_frame (vm, node, next_index, p); p[0] = buffer_index; } vlib_frame_t *vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index); void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f); always_inline uword vlib_in_process_context (vlib_main_t * vm) { return vm->node_main.current_process_index != ~0; } always_inline vlib_process_t * vlib_get_current_process (vlib_main_t * vm) { vlib_node_main_t *nm = &vm->node_main; if (vlib_in_process_context (vm)) return vec_elt (nm->processes, nm->current_process_index); return 0; } always_inline uword vlib_current_process (vlib_main_t * vm) { return vlib_get_current_process (vm)->node_runtime.node_index; } /** Returns TRUE if a process suspend time is less than 10us @param dt - remaining poll time in seconds @returns 1 if dt < 10e-6, 0 otherwise */ always_inline uword vlib_process_suspend_time_is_zero (f64 dt) { return dt < 10e-6; } /** Suspend a vlib cooperative multi-tasking thread for a period of time @param vm - vlib_main_t * @param dt - suspend interval in seconds @returns VLIB_PROCESS_RESUME_LONGJMP_RESUME, routinely ignored */ always_inline uword vlib_process_suspend (vlib_main_t * vm, f64 dt) { uword r; vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p = vec_elt (nm->processes, nm->current_process_index); if (vlib_process_suspend_time_is_zero (dt)) return VLIB_PROCESS_RESUME_LONGJMP_RESUME; p->flags |= VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK; r = clib_setjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_SUSPEND); if (r == VLIB_PROCESS_RESUME_LONGJMP_SUSPEND) { /* expiration time in 10us ticks */ p->resume_clock_interval = dt * 1e5; clib_longjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_SUSPEND); } return r; } always_inline void vlib_process_free_event_type (vlib_process_t * p, uword t, uword is_one_time_event) { ASSERT (!pool_is_free_index (p->event_type_pool, t)); pool_put_index (p->event_type_pool, t); if (is_one_time_event) p->one_time_event_type_bitmap = clib_bitmap_andnoti (p->one_time_event_type_bitmap, t); } always_inline void vlib_process_maybe_free_event_type (vlib_process_t * p, uword t) { ASSERT (!pool_is_free_index (p->event_type_pool, t)); if (clib_bitmap_get (p->one_time_event_type_bitmap, t)) vlib_process_free_event_type (p, t, /* is_one_time_event */ 1); } always_inline void * vlib_process_get_event_data (vlib_main_t * vm, uword * return_event_type_opaque) { vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p; vlib_process_event_type_t *et; uword t; void *event_data_vector; p = vec_elt (nm->processes, nm->current_process_index); /* Find first type with events ready. Return invalid type when there's nothing there. */ t = clib_bitmap_first_set (p->non_empty_event_type_bitmap); if (t == ~0) return 0; p->non_empty_event_type_bitmap = clib_bitmap_andnoti (p->non_empty_event_type_bitmap, t); ASSERT (_vec_len (p->pending_event_data_by_type_index[t]) > 0); event_data_vector = p->pending_event_data_by_type_index[t]; p->pending_event_data_by_type_index[t] = 0; et = pool_elt_at_index (p->event_type_pool, t); /* Return user's opaque value and possibly index. */ *return_event_type_opaque = et->opaque; vlib_process_maybe_free_event_type (p, t); return event_data_vector; } /* Return event data vector for later reuse. We reuse event data to avoid repeatedly allocating event vectors in cases where we care about speed. */ always_inline void vlib_process_put_event_data (vlib_main_t * vm, void *event_data) { vlib_node_main_t *nm = &vm->node_main; vec_add1 (nm->recycled_event_data_vectors, event_data); } /** Return the first event type which has occurred and a vector of per-event data of that type, or a timeout indication @param vm - vlib_main_t pointer @param data_vector - pointer to a (uword *) vector to receive event data @returns either an event type and a vector of per-event instance data, or ~0 to indicate a timeout. */ always_inline uword vlib_process_get_events (vlib_main_t * vm, uword ** data_vector) { vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p; vlib_process_event_type_t *et; uword r, t, l; p = vec_elt (nm->processes, nm->current_process_index); /* Find first type with events ready. Return invalid type when there's nothing there. */ t = clib_bitmap_first_set (p->non_empty_event_type_bitmap); if (t == ~0) return t; p->non_empty_event_type_bitmap = clib_bitmap_andnoti (p->non_empty_event_type_bitmap, t); l = _vec_len (p->pending_event_data_by_type_index[t]); if (data_vector) vec_add (*data_vector, p->pending_event_data_by_type_index[t], l); _vec_len (p->pending_event_data_by_type_index[t]) = 0; et = pool_elt_at_index (p->event_type_pool, t); /* Return user's opaque value. */ r = et->opaque; vlib_process_maybe_free_event_type (p, t); return r; } always_inline uword vlib_process_get_events_helper (vlib_process_t * p, uword t, uword ** data_vector) { uword l; p->non_empty_event_type_bitmap = clib_bitmap_andnoti (p->non_empty_event_type_bitmap, t); l = _vec_len (p->pending_event_data_by_type_index[t]); if (data_vector) vec_add (*data_vector, p->pending_event_data_by_type_index[t], l); _vec_len (p->pending_event_data_by_type_index[t]) = 0; vlib_process_maybe_free_event_type (p, t); return l; } /* As above but query as specified type of event. Returns number of events found. */ always_inline uword vlib_process_get_events_with_type (vlib_main_t * vm, uword ** data_vector, uword with_type_opaque) { vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p; uword t, *h; p = vec_elt (nm->processes, nm->current_process_index); h = hash_get (p->event_type_index_by_type_opaque, with_type_opaque); if (!h) /* This can happen when an event has not yet been signaled with given opaque type. */ return 0; t = h[0]; if (!clib_bitmap_get (p->non_empty_event_type_bitmap, t)) return 0; return vlib_process_get_events_helper (p, t, data_vector); } always_inline uword * vlib_process_wait_for_event (vlib_main_t * vm) { vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p; uword r; p = vec_elt (nm->processes, nm->current_process_index); if (clib_bitmap_is_zero (p->non_empty_event_type_bitmap)) { p->flags |= VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT; r = clib_setjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_SUSPEND); if (r == VLIB_PROCESS_RESUME_LONGJMP_SUSPEND) clib_longjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_SUSPEND); } return p->non_empty_event_type_bitmap; } always_inline uword vlib_process_wait_for_one_time_event (vlib_main_t * vm, uword ** data_vector, uword with_type_index) { vlib_node_main_t *nm = &vm->node_main; vlib_process_t *p; uword r; p = vec_elt (nm->processes, nm->current_process_index); ASSERT (!pool_is_free_index (p->event_type_pool, with_type_index)); while (!clib_bitmap_get (p->non_empty_event_type_bitmap, with_type_index)) { p->flags |= VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_EVENT; r = clib_setjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_SUSPEND); if (r == VLIB_PROCESS_RESUME_LONGJMP_SUSPEND) clib_longjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_SUSPEND); } return vlib_process_get_events_helper (p, with_type_index, data_vector); } always_i
/*
  Copyright (c) 2017 Cisco and/or its affiliates.

  * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
*/

/*
 * cuckoo hash implementation based on paper
 * 'Algorithmic Improvements for Fast Concurrent Cuckoo Hashing'
 * by Xiaozhou Li, David G. Andersen, Michael Kaminsky, Michael J. Freedman
 * and their libcuckoo implementation (https://github.com/efficient/libcuckoo)
 */

/*
 * Note: to instantiate the template multiple times in a single file,
 * #undef __included_cuckoo_template_h__...
 */
#ifndef __included_cuckoo_template_h__
#define __included_cuckoo_template_h__

#include <vppinfra/heap.h>
#include <vppinfra/format.h>
#include <vppinfra/pool.h>
#include <vppinfra/lock.h>
#include <vppinfra/error.h>
#include <vppinfra/hash.h>
#include <vppinfra/cache.h>
#include <vppinfra/cuckoo_8_8.h>

#ifndef CLIB_CUCKOO_TYPE
#error CLIB_CUCKOO_TYPE not defined
#endif

#ifndef CLIB_CUCKOO_BFS_MAX_STEPS
#error CLIB_CUCKOO_BFS_MAX_STEPS not defined
#endif

#ifndef CLIB_CUCKOO_KVP_PER_BUCKET
#error CLIB_CUCKOO_KVP_PER_BUCKET not defined
#endif

#ifndef CLIB_CUCKOO_LOG2_KVP_PER_BUCKET
#error CLIB_CUCKOO_LOG2_KVP_PER_BUCKET not defined
#endif

#ifndef CLIB_CUCKOO_BFS_MAX_PATH_LENGTH
#error CLIB_CUCKOO_BFS_MAX_PATH_LENGTH not defined
#endif

STATIC_ASSERT (CLIB_CUCKOO_KVP_PER_BUCKET ==
	       (1 << CLIB_CUCKOO_LOG2_KVP_PER_BUCKET),
	       "CLIB_CUCKOO_KVP_PER_BUCKET != (1 << CLIB_CUCKOO_LOG2_KVP_PER_BUCKET");

#define _cv(a, b) a##b
#define __cv(a, b) _cv (a, b)
#define CV(a) __cv (a, CLIB_CUCKOO_TYPE)

#define _cvt(a, b) a##b##_t
#define __cvt(a, b) _cvt (a, b)
#define CVT(a) __cvt (a, CLIB_CUCKOO_TYPE)

typedef u64 clib_cuckoo_bucket_aux_t;

#define CLIB_CUCKOO_USE_COUNT_BIT_WIDTH (1 + CLIB_CUCKOO_LOG2_KVP_PER_BUCKET)

always_inline u64
clib_cuckoo_bucket_aux_get_version (clib_cuckoo_bucket_aux_t aux)
{
  return aux >> (1 + CLIB_CUCKOO_USE_COUNT_BIT_WIDTH);
}

always_inline int
clib_cuckoo_bucket_aux_get_use_count (clib_cuckoo_bucket_aux_t aux)
{
  u64 use_count_mask = (1 << CLIB_CUCKOO_USE_COUNT_BIT_WIDTH) - 1;
  return (aux >> 1) & use_count_mask;
}

always_inline int
clib_cuckoo_bucket_aux_get_writer_flag (clib_cuckoo_bucket_aux_t aux)
{
  return aux & 1;
}

always_inline clib_cuckoo_bucket_aux_t
clib_cuckoo_bucket_aux_pack (u64 version, int use_count, int writer_flag)
{
  return (version << (1 + CLIB_CUCKOO_USE_COUNT_BIT_WIDTH)) +
    (use_count << 1) + writer_flag;
}<