aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/fib/ip4_fib_hash.h
blob: 84b3b9ae8341f4eb95d2bfef6526fd768705fc05 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @brief The IPv4 FIB Hash table
 */

#ifndef __IP4_FIB_HASH_H__
#define __IP4_FIB_HASH_H__

#include <vlib/vlib.h>
#include <vnet/ip/ip.h>

typedef struct ip4_fib_hash_t_
{
  /* Hash table for each prefix length mapping. */
  uword *fib_entry_by_dst_address[33];

  /* Table ID (hash key) for this FIB. */
  u32 table_id;
} ip4_fib_hash_t;

extern fib_node_index_t ip4_fib_hash_table_lookup(const ip4_fib_hash_t *fib,
                                                  const ip4_address_t *addr,
                                                  u32 len);
extern index_t ip4_fib_hash_table_lookup_lb(const ip4_fib_hash_t *fib,
                                            const ip4_address_t *addr);
extern fib_node_index_t ip4_fib_hash_table_lookup_exact_match(const ip4_fib_hash_t *fib,
                                                              const ip4_address_t *addr,
                                                              u32 len);

extern void ip4_fib_hash_table_entry_remove(ip4_fib_hash_t *fib,
                                            const ip4_address_t *addr,
                                            u32 len);

extern void ip4_fib_hash_table_entry_insert(ip4_fib_hash_t *fib,
                                            const ip4_address_t *addr,
                                            u32 len,
                                            fib_node_index_t fib_entry_index);
extern void ip4_fib_hash_table_init(ip4_fib_hash_t *fib);
extern void ip4_fib_hash_table_destroy(ip4_fib_hash_t *fib);

/**
 * @brief Walk all entries in a FIB table
 * N.B: This is NOT safe to deletes. If you need to delete walk the whole
 * table and store elements in a vector, then delete the elements
 */
extern void ip4_fib_hash_table_walk(ip4_fib_hash_t *fib,
                                    fib_table_walk_fn_t fn,
                                    void *ctx);

/**
 * @brief Walk all entries in a sub-tree of the FIB table
 * N.B: This is NOT safe to deletes. If you need to delete walk the whole
 * table and store elements in a vector, then delete the elements
 */
extern void ip4_fib_hash_table_sub_tree_walk(ip4_fib_hash_t *fib,
                                             const fib_prefix_t *root,
                                             fib_table_walk_fn_t fn,
                                             void *ctx);

#endif
ead_index; /* +2 bytes = 64 */ u64 link_enqueue_time; /* 8 byte = 8 */ u32 link_prev_idx; /* +4 bytes = 12 */ u32 link_next_idx; /* +4 bytes = 16 */ u8 link_list_id; /* +1 bytes = 17 */ u8 reserved1[7]; /* +7 bytes = 24 */ u64 reserved2[5]; /* +5*8 bytes = 64 */ } fa_session_t; /* This structure is used to fill in the u64 value in the per-sw-if-index hash table */ typedef struct { union { u64 as_u64; struct { u32 session_index; u16 thread_index; u16 reserved0; }; }; } fa_full_session_id_t; /* * A few compile-time constraints on the size and the layout of the union, to ensure * it makes sense both for bihash and for us. */ #define CT_ASSERT_EQUAL(name, x,y) typedef int assert_ ## name ## _compile_time_assertion_failed[((x) == (y))-1] CT_ASSERT_EQUAL(fa_l3_key_size_is_40, offsetof(fa_5tuple_t, pkt), offsetof(clib_bihash_kv_40_8_t, value)); CT_ASSERT_EQUAL(fa_l4_key_t_is_8, sizeof(fa_session_l4_key_t), sizeof(u64)); CT_ASSERT_EQUAL(fa_packet_info_t_is_8, sizeof(fa_packet_info_t), sizeof(u64)); CT_ASSERT_EQUAL(fa_l3_kv_size_is_48, sizeof(fa_5tuple_t), sizeof(clib_bihash_kv_40_8_t)); /* Let's try to fit within two cachelines */ CT_ASSERT_EQUAL(fa_session_t_size_is_128, sizeof(fa_session_t), 128); /* Session ID MUST be the same as u64 */ CT_ASSERT_EQUAL(fa_full_session_id_size_is_64, sizeof(fa_full_session_id_t), sizeof(u64)); #undef CT_ASSERT_EQUAL typedef struct { /* The pool of sessions managed by this worker */ fa_session_t *fa_sessions_pool; /* per-worker ACL_N_TIMEOUTS of conn lists */ u32 *fa_conn_list_head; u32 *fa_conn_list_tail; /* adds and deletes per-worker-per-interface */ u64 *fa_session_dels_by_sw_if_index; u64 *fa_session_adds_by_sw_if_index; /* Vector of expired connections retrieved from lists */ u32 *expired; /* the earliest next expiry time */ u64 next_expiry_time; /* if not zero, look at all the elements until their enqueue timestamp is after below one */ u64 requeue_until_time; /* Current time between the checks */ u64 current_time_wait_interval; /* Counter of how many sessions we did delete */ u64 cnt_deleted_sessions; /* Counter of already deleted sessions being deleted - should not increment unless a bug */ u64 cnt_already_deleted_sessions; /* Number of times we requeued a session to a head of the list */ u64 cnt_session_timer_restarted; /* swipe up to this enqueue time, rather than following the timeouts */ u64 swipe_end_time; /* bitmap of sw_if_index serviced by this worker */ uword *serviced_sw_if_index_bitmap; /* bitmap of sw_if_indices to clear. set by main thread, cleared by worker */ uword *pending_clear_sw_if_index_bitmap; /* atomic, indicates that the swipe-deletion of connections is in progress */ u32 clear_in_process; /* Interrupt is pending from main thread */ int interrupt_is_pending; /* * Interrupt node on the worker thread sets this if it knows there is * more work to do, but it has to finish to avoid hogging the * core for too long. */ int interrupt_is_needed; /* * Set to indicate that the interrupt node wants to get less interrupts * because there is not enough work for the current rate. */ int interrupt_is_unwanted; /* * Set to copy of a "generation" counter in main thread so we can sync the interrupts. */ int interrupt_generation; } acl_fa_per_worker_data_t; typedef enum { ACL_FA_ERROR_DROP, ACL_FA_N_NEXT, } acl_fa_next_t; enum { ACL_FA_CLEANER_RESCHEDULE = 1, ACL_FA_CLEANER_DELETE_BY_SW_IF_INDEX, } acl_fa_cleaner_process_event_e; void acl_fa_enable_disable(u32 sw_if_index, int is_input, int enable_disable); void show_fa_sessions_hash(vlib_main_t * vm, u32 verbose); #endif