diff options
author | Klement Sekera <ksekera@cisco.com> | 2019-10-01 13:08:43 +0000 |
---|---|---|
committer | Ole Trøan <otroan@employees.org> | 2019-10-03 09:24:01 +0000 |
commit | cfca8451f461f65532c75a498bf1bf1056c3af2a (patch) | |
tree | ba3d8c383ada1b381acdbd28cb1edc2b80b5f5e5 | |
parent | a8df85ce1fe957efa8301bd5b5ac3c03737d31f1 (diff) |
map: fix indent-off
Replace erroneous INDENT-OFF with INDENT-ON and reformat code which is
non-conforming wrt to code style.
Type: style
Change-Id: Ia3310371e96b248e5da143fad8fdca3b2fb6650b
Signed-off-by: Klement Sekera <ksekera@cisco.com>
-rw-r--r-- | src/plugins/map/map.h | 254 |
1 files changed, 133 insertions, 121 deletions
diff --git a/src/plugins/map/map.h b/src/plugins/map/map.h index 57ca1363c1b..8098a798aee 100644 --- a/src/plugins/map/map.h +++ b/src/plugins/map/map.h @@ -150,23 +150,25 @@ typedef union { u64 as_u64[5]; u32 as_u32[10]; } map_ip6_reass_key_t; -/* *INDENT-OFF* */ +/* *INDENT-ON* */ -typedef struct { - u32 pi; //Cached packet or ~0 - u16 next_data_offset; //The data offset of the additional 20 bytes or ~0 - u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment) - u8 next_data[20]; //The 20 additional bytes +typedef struct +{ + u32 pi; //Cached packet or ~0 + u16 next_data_offset; //The data offset of the additional 20 bytes or ~0 + u8 next_data_len; //Number of bytes ready to be copied (20 if not last fragment) + u8 next_data[20]; //The 20 additional bytes } map_ip6_fragment_t; -typedef struct { +typedef struct +{ map_ip6_reass_key_t key; f64 ts; #ifdef MAP_IP6_REASS_COUNT_BYTES u16 expected_total; u16 forwarded; #endif - u16 bucket; //What hash bucket this element is linked in + u16 bucket; //What hash bucket this element is linked in u16 bucket_next; u16 fifo_prev; u16 fifo_next; @@ -208,7 +210,8 @@ typedef struct map_main_pre_resolved_t_ extern map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]; #endif -typedef struct { +typedef struct +{ /* pool of MAP domains */ map_domain_t *domains; map_domain_extra_t *domain_extras; @@ -246,14 +249,14 @@ typedef struct { * IPv6 decap reassembly */ /* Configuration */ - f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) - u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures - u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms - u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly + f32 ip6_reass_conf_ht_ratio; //Size of ht is 2^ceil(log2(ratio*pool_size)) + u16 ip6_reass_conf_pool_size; //Max number of allocated reass structures + u16 ip6_reass_conf_lifetime_ms; //Time a reassembly struct is considered valid in ms + u32 ip6_reass_conf_buffers; //Maximum number of buffers used by ip6 reassembly /* Runtime */ map_ip6_reass_t *ip6_reass_pool; - u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len + u8 ip6_reass_ht_log2len; //Hash table size is 2^log2len u16 ip6_reass_allocated; u16 *ip6_reass_hash_table; u16 ip6_reass_fifo_last; @@ -296,23 +299,25 @@ typedef struct { _(MALFORMED, "malformed packet") \ _(DF_SET, "can't fragment, DF set") -typedef enum { +typedef enum +{ #define _(sym,str) MAP_ERROR_##sym, - foreach_map_error + foreach_map_error #undef _ - MAP_N_ERROR, - } map_error_t; + MAP_N_ERROR, +} map_error_t; -u64 map_error_counter_get(u32 node_index, map_error_t map_error); +u64 map_error_counter_get (u32 node_index, map_error_t map_error); -typedef struct { +typedef struct +{ u32 map_domain_index; u16 port; } map_trace_t; always_inline void -map_add_trace (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, - u32 map_domain_index, u16 port) +map_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node, + vlib_buffer_t * b, u32 map_domain_index, u16 port) { map_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr)); tr->map_domain_index = map_domain_index; @@ -338,42 +343,43 @@ extern vlib_node_registration_t ip6_map_t_icmp_node; * map_get_pfx */ static_always_inline u64 -map_get_pfx (map_domain_t *d, u32 addr, u16 port) +map_get_pfx (map_domain_t * d, u32 addr, u16 port) { u16 psid = (port >> d->psid_shift) & d->psid_mask; if (d->ea_bits_len == 0 && d->rules) - return clib_net_to_host_u64(d->rules[psid].as_u64[0]); + return clib_net_to_host_u64 (d->rules[psid].as_u64[0]); u32 suffix = (addr >> d->suffix_shift) & d->suffix_mask; - u64 ea = d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid; + u64 ea = + d->ea_bits_len == 0 ? 0 : (((u64) suffix << d->psid_length)) | psid; - return clib_net_to_host_u64(d->ip6_prefix.as_u64[0]) | ea << d->ea_shift; + return clib_net_to_host_u64 (d->ip6_prefix.as_u64[0]) | ea << d->ea_shift; } static_always_inline u64 -map_get_pfx_net (map_domain_t *d, u32 addr, u16 port) +map_get_pfx_net (map_domain_t * d, u32 addr, u16 port) { - return clib_host_to_net_u64(map_get_pfx(d, clib_net_to_host_u32(addr), - clib_net_to_host_u16(port))); + return clib_host_to_net_u64 (map_get_pfx (d, clib_net_to_host_u32 (addr), + clib_net_to_host_u16 (port))); } /* * map_get_sfx */ static_always_inline u64 -map_get_sfx (map_domain_t *d, u32 addr, u16 port) +map_get_sfx (map_domain_t * d, u32 addr, u16 port) { u16 psid = (port >> d->psid_shift) & d->psid_mask; /* Shared 1:1 mode. */ if (d->ea_bits_len == 0 && d->rules) - return clib_net_to_host_u64(d->rules[psid].as_u64[1]); + return clib_net_to_host_u64 (d->rules[psid].as_u64[1]); if (d->ip6_prefix_len == 128) - return clib_net_to_host_u64(d->ip6_prefix.as_u64[1]); + return clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]); if (d->ip6_src_len == 96) - return (clib_net_to_host_u64(d->ip6_prefix.as_u64[1]) | addr); + return (clib_net_to_host_u64 (d->ip6_prefix.as_u64[1]) | addr); /* IPv4 prefix */ if (d->flags & MAP_DOMAIN_PREFIX) @@ -384,34 +390,36 @@ map_get_sfx (map_domain_t *d, u32 addr, u16 port) } static_always_inline u64 -map_get_sfx_net (map_domain_t *d, u32 addr, u16 port) +map_get_sfx_net (map_domain_t * d, u32 addr, u16 port) { - return clib_host_to_net_u64(map_get_sfx(d, clib_net_to_host_u32(addr), - clib_net_to_host_u16(port))); + return clib_host_to_net_u64 (map_get_sfx (d, clib_net_to_host_u32 (addr), + clib_net_to_host_u16 (port))); } static_always_inline u32 -map_get_ip4 (ip6_address_t *addr, u16 prefix_len) +map_get_ip4 (ip6_address_t * addr, u16 prefix_len) { - ASSERT(prefix_len == 64 || prefix_len == 96); + ASSERT (prefix_len == 64 || prefix_len == 96); if (prefix_len == 96) - return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1])); + return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1])); else - return clib_host_to_net_u32(clib_net_to_host_u64(addr->as_u64[1]) >> 16); + return clib_host_to_net_u32 (clib_net_to_host_u64 (addr->as_u64[1]) >> + 16); } static_always_inline map_domain_t * -ip4_map_get_domain (ip4_address_t *addr, u32 *map_domain_index, u8 *error) +ip4_map_get_domain (ip4_address_t * addr, u32 * map_domain_index, u8 * error) { map_main_t *mm = &map_main; - u32 mdi = mm->ip4_prefix_tbl->lookup(mm->ip4_prefix_tbl, addr, 32); - if (mdi == ~0) { - *error = MAP_ERROR_NO_DOMAIN; - return 0; - } + u32 mdi = mm->ip4_prefix_tbl->lookup (mm->ip4_prefix_tbl, addr, 32); + if (mdi == ~0) + { + *error = MAP_ERROR_NO_DOMAIN; + return 0; + } *map_domain_index = mdi; - return pool_elt_at_index(mm->domains, mdi); + return pool_elt_at_index (mm->domains, mdi); } /* @@ -420,77 +428,78 @@ ip4_map_get_domain (ip4_address_t *addr, u32 *map_domain_index, u8 *error) * prefix is shared the IPv4 address must be used. */ static_always_inline map_domain_t * -ip6_map_get_domain (ip6_address_t *addr, - u32 *map_domain_index, - u8 *error) +ip6_map_get_domain (ip6_address_t * addr, u32 * map_domain_index, u8 * error) { map_main_t *mm = &map_main; - u32 mdi = mm->ip6_src_prefix_tbl->lookup(mm->ip6_src_prefix_tbl, addr, 128); - if (mdi == ~0) { - *error = MAP_ERROR_NO_DOMAIN; - return 0; - } + u32 mdi = + mm->ip6_src_prefix_tbl->lookup (mm->ip6_src_prefix_tbl, addr, 128); + if (mdi == ~0) + { + *error = MAP_ERROR_NO_DOMAIN; + return 0; + } *map_domain_index = mdi; - return pool_elt_at_index(mm->domains, mdi); + return pool_elt_at_index (mm->domains, mdi); } -clib_error_t * map_plugin_api_hookup (vlib_main_t * vm); +clib_error_t *map_plugin_api_hookup (vlib_main_t * vm); -map_ip6_reass_t * -map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, - u8 protocol, u32 **pi_to_drop); -void -map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop); +map_ip6_reass_t *map_ip6_reass_get (ip6_address_t * src, ip6_address_t * dst, + u32 fragment_id, u8 protocol, + u32 ** pi_to_drop); +void map_ip6_reass_free (map_ip6_reass_t * r, u32 ** pi_to_drop); #define map_ip6_reass_lock() clib_spinlock_lock (&map_main.ip6_reass_lock) #define map_ip6_reass_unlock() clib_spinlock_unlock (&map_main.ip6_reass_lock) int -map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, - u16 data_offset, u16 next_data_offset, - u8 *data_start, u16 data_len); +map_ip6_reass_add_fragment (map_ip6_reass_t * r, u32 pi, + u16 data_offset, u16 next_data_offset, + u8 * data_start, u16 data_len); -void map_ip4_drop_pi(u32 pi); +void map_ip4_drop_pi (u32 pi); -void map_ip6_drop_pi(u32 pi); +void map_ip6_drop_pi (u32 pi); -int map_ip6_reass_conf_ht_ratio(f32 ht_ratio, u32 *trashed_reass, u32 *dropped_packets); +int map_ip6_reass_conf_ht_ratio (f32 ht_ratio, u32 * trashed_reass, + u32 * dropped_packets); #define MAP_IP6_REASS_CONF_HT_RATIO_MAX 100 -int map_ip6_reass_conf_pool_size(u16 pool_size, u32 *trashed_reass, u32 *dropped_packets); +int map_ip6_reass_conf_pool_size (u16 pool_size, u32 * trashed_reass, + u32 * dropped_packets); #define MAP_IP6_REASS_CONF_POOL_SIZE_MAX (0xfeff) -int map_ip6_reass_conf_lifetime(u16 lifetime_ms); +int map_ip6_reass_conf_lifetime (u16 lifetime_ms); #define MAP_IP6_REASS_CONF_LIFETIME_MAX 0xffff -int map_ip6_reass_conf_buffers(u32 buffers); +int map_ip6_reass_conf_buffers (u32 buffers); #define MAP_IP6_REASS_CONF_BUFFERS_MAX (0xffffffff) /* * Supports prefix of 96 or 64 (with u-octet) */ static_always_inline void -ip4_map_t_embedded_address (map_domain_t *d, - ip6_address_t *ip6, const ip4_address_t *ip4) +ip4_map_t_embedded_address (map_domain_t * d, + ip6_address_t * ip6, const ip4_address_t * ip4) { - ASSERT(d->ip6_src_len == 96 || d->ip6_src_len == 64); //No support for other lengths for now + ASSERT (d->ip6_src_len == 96 || d->ip6_src_len == 64); //No support for other lengths for now u8 offset = d->ip6_src_len == 64 ? 9 : 12; ip6->as_u64[0] = d->ip6_src.as_u64[0]; ip6->as_u64[1] = d->ip6_src.as_u64[1]; - clib_memcpy_fast(&ip6->as_u8[offset], ip4, 4); + clib_memcpy_fast (&ip6->as_u8[offset], ip4, 4); } static_always_inline u32 -ip6_map_t_embedded_address (map_domain_t *d, ip6_address_t *addr) +ip6_map_t_embedded_address (map_domain_t * d, ip6_address_t * addr) { - ASSERT(d->ip6_src_len == 64 || d->ip6_src_len == 96); + ASSERT (d->ip6_src_len == 64 || d->ip6_src_len == 96); u32 x; u8 offset = d->ip6_src_len == 64 ? 9 : 12; - clib_memcpy(&x, &addr->as_u8[offset], 4); + clib_memcpy (&x, &addr->as_u8[offset], 4); return x; } static inline void -map_domain_counter_lock (map_main_t *mm) +map_domain_counter_lock (map_main_t * mm) { if (mm->counter_lock) while (clib_atomic_test_and_set (mm->counter_lock)) @@ -498,7 +507,7 @@ map_domain_counter_lock (map_main_t *mm) } static inline void -map_domain_counter_unlock (map_main_t *mm) +map_domain_counter_unlock (map_main_t * mm) { if (mm->counter_lock) clib_atomic_release (mm->counter_lock); @@ -506,29 +515,32 @@ map_domain_counter_unlock (map_main_t *mm) static_always_inline void -map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, - vlib_node_runtime_t *node, vlib_error_t *error, - u32 next) +map_send_all_to_node (vlib_main_t * vm, u32 * pi_vector, + vlib_node_runtime_t * node, vlib_error_t * error, + u32 next) { u32 n_left_from, *from, next_index, *to_next, n_left_to_next; //Deal with fragments that are ready from = pi_vector; - n_left_from = vec_len(pi_vector); + n_left_from = vec_len (pi_vector); next_index = node->cached_next_index; - while (n_left_from > 0) { - vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next); - while (n_left_from > 0 && n_left_to_next > 0) { - u32 pi0 = to_next[0] = from[0]; - from += 1; - n_left_from -= 1; - to_next += 1; - n_left_to_next -= 1; - vlib_buffer_t *p0 = vlib_get_buffer(vm, pi0); - p0->error = *error; - vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, pi0, next); + while (n_left_from > 0) + { + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + while (n_left_from > 0 && n_left_to_next > 0) + { + u32 pi0 = to_next[0] = from[0]; + from += 1; + n_left_from -= 1; + to_next += 1; + n_left_to_next -= 1; + vlib_buffer_t *p0 = vlib_get_buffer (vm, pi0); + p0->error = *error; + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, + n_left_to_next, pi0, next); + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_put_next_frame(vm, node, next_index, n_left_to_next); - } } static_always_inline void @@ -537,7 +549,7 @@ map_mss_clamping (tcp_header_t * tcp, ip_csum_t * sum, u16 mss_clamping) u8 *data; u8 opt_len, opts_len, kind; u16 mss; - u16 mss_value_net = clib_host_to_net_u16(mss_clamping); + u16 mss_value_net = clib_host_to_net_u16 (mss_clamping); if (!tcp_syn (tcp)) return; @@ -549,34 +561,34 @@ map_mss_clamping (tcp_header_t * tcp, ip_csum_t * sum, u16 mss_clamping) kind = data[0]; if (kind == TCP_OPTION_EOL) - break; + break; else if (kind == TCP_OPTION_NOOP) - { - opt_len = 1; - continue; - } + { + opt_len = 1; + continue; + } else - { - if (opts_len < 2) - return; - opt_len = data[1]; + { + if (opts_len < 2) + return; + opt_len = data[1]; - if (opt_len < 2 || opt_len > opts_len) - return; - } + if (opt_len < 2 || opt_len > opts_len) + return; + } if (kind == TCP_OPTION_MSS) - { - mss = *(u16 *) (data + 2); - if (clib_net_to_host_u16 (mss) > mss_clamping) - { - *sum = - ip_csum_update (*sum, mss, mss_value_net, ip4_header_t, - length); - clib_memcpy (data + 2, &mss_value_net, 2); - } - return; - } + { + mss = *(u16 *) (data + 2); + if (clib_net_to_host_u16 (mss) > mss_clamping) + { + *sum = + ip_csum_update (*sum, mss, mss_value_net, ip4_header_t, + length); + clib_memcpy (data + 2, &mss_value_net, 2); + } + return; + } } } |