summaryrefslogtreecommitdiffstats
path: root/src/vppinfra/byte_order.h
blob: 9beb44706348e1cb03c62a7f6cbf6931c18f9f1f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
  Copyright (c) 2004 Eliot Dresselhaus

  Permission is hereby granted, free of charge, to any person obtaining
  a copy of this software and associated documentation files (the
  "Software"), to deal in the Software without restriction, including
  without limitation the rights to use, copy, modify, merge, publish,
  distribute, sublicense, and/or sell copies of the Software, and to
  permit persons to whom the Software is furnished to do so, subject to
  the following conditions:

  The above copyright notice and this permission notice shall be
  included in all copies or substantial portions of the Software.

  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/

#ifndef included_clib_byte_order_h
#define included_clib_byte_order_h

#include <vppinfra/clib.h>

#if (__BYTE_ORDER__)==( __ORDER_LITTLE_ENDIAN__)
#define CLIB_ARCH_IS_BIG_ENDIAN (0)
#define CLIB_ARCH_IS_LITTLE_ENDIAN (1)
#else
/* Default is big endian. */
#define CLIB_ARCH_IS_BIG_ENDIAN (1)
#define CLIB_ARCH_IS_LITTLE_ENDIAN (0)
#endif

/* Big/little endian. */
#define clib_arch_is_big_endian    CLIB_ARCH_IS_BIG_ENDIAN
#define clib_arch_is_little_endian CLIB_ARCH_IS_LITTLE_ENDIAN

always_inline u16
clib_byte_swap_u16 (u16 x)
{
  return __builtin_bswap16 (x);
}

always_inline i16
clib_byte_swap_i16 (i16 x)
{
  return clib_byte_swap_u16 (x);
}

always_inline u32
clib_byte_swap_u32 (u32 x)
{
  return __builtin_bswap32 (x);
}

always_inline i32
clib_byte_swap_i32 (i32 x)
{
  return clib_byte_swap_u32 (x);
}

always_inline u64
clib_byte_swap_u64 (u64 x)
{
  return __builtin_bswap64 (x);
}

always_inline i64
clib_byte_swap_i64 (i64 x)
{
  return clib_byte_swap_u64 (x);
}

#define _(sex,type)						\
/* HOST -> SEX */						\
always_inline type						\
clib_host_to_##sex##_##type (type x)				\
{								\
  if (! clib_arch_is_##sex##_endian)				\
    x = clib_byte_swap_##type (x);				\
  return x;							\
}								\
								\
always_inline type						\
clib_host_to_##sex##_mem_##type (type * x)			\
{								\
  type v = x[0];						\
  return clib_host_to_##sex##_##type (v);			\
}								\
								\
always_inline type						\
clib_host_to_##sex##_unaligned_mem_##type (type * x)		\
{								\
  type v = clib_mem_unaligned (x, type);			\
  return clib_host_to_##sex##_##type (v);			\
}								\
								\
/* SEX -> HOST */						\
always_inline type						\
clib_##sex##_to_host_##type (type x)				\
{ return clib_host_to_##sex##_##type (x); }			\
								\
always_inline type						\
clib_##sex##_to_host_mem_##type (type * x)			\
{ return clib_host_to_##sex##_mem_##type (x); }			\
								\
always_inline type						\
clib_##sex##_to_host_unaligned_mem_##type (type * x)		\
{ return clib_host_to_##sex##_unaligned_mem_##type (x); }

#ifndef __cplusplus
_(little, u16)
_(little, u32)
_(little, u64)
_(little, i16)
_(little, i32)
_(little, i64)
_(big, u16) _(big, u32) _(big, u64) _(big, i16) _(big, i32) _(big, i64)
#endif
#undef _
/* Network "net" alias for "big". */
#define _(type)						\
always_inline type					\
clib_net_to_host_##type (type x)			\
{ return clib_big_to_host_##type (x); }			\
							\
always_inline type					\
clib_net_to_host_mem_##type (type * x)			\
{ return clib_big_to_host_mem_##type (x); }		\
							\
always_inline type					\
clib_net_to_host_unaligned_mem_##type (type * x)	\
{ return clib_big_to_host_unaligned_mem_##type (x); }	\
							\
always_inline type					\
clib_host_to_net_##type (type x)			\
{ return clib_host_to_big_##type (x); }			\
							\
always_inline type					\
clib_host_to_net_mem_##type (type * x)			\
{ return clib_host_to_big_mem_##type (x); }		\
							\
always_inline type					\
clib_host_to_net_unaligned_mem_##type (type * x)	\
{ return clib_host_to_big_unaligned_mem_##type (x); }
#ifndef __cplusplus
  _(u16);
_(i16);
_(u32);
_(i32);
_(u64);
_(i64);
#endif

#undef _

/* Dummy endian swap functions for IEEE floating-point numbers */
/* *INDENT-OFF* */
always_inline f64 clib_net_to_host_f64 (f64 x) { return x; }
always_inline f64 clib_host_to_net_f64 (f64 x) { return x; }
always_inline f32 clib_net_to_host_f32 (f32 x) { return x; }
always_inline f32 clib_host_to_net_f32 (f32 x) { return x; }
/* *INDENT-ON* */

#endif /* included_clib_byte_order_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
el); return user_id; } /* * Allocate a new lookup context index. * Supply the id assigned to your module during registration, * and two values of your choice identifying instances * of use within your module. They are useful for debugging. * If >= 0 - context id. If < 0 - error code. */ static int acl_plugin_get_lookup_context_index (u32 acl_user_id, u32 val1, u32 val2) { acl_main_t *am = &acl_main; acl_lookup_context_t *acontext; if (!acl_user_id_valid(am, acl_user_id)) return VNET_API_ERROR_INVALID_REGISTRATION; /* * The lookup context index allocation is * an operation done within the global heap, * so no heap switching necessary. */ pool_get(am->acl_lookup_contexts, acontext); acontext->acl_indices = 0; acontext->context_user_id = acl_user_id; acontext->user_val1 = val1; acontext->user_val2 = val2; u32 new_context_id = acontext - am->acl_lookup_contexts; vec_add1(am->acl_users[acl_user_id].lookup_contexts, new_context_id); return new_context_id; } static void lock_acl(acl_main_t *am, u32 acl, u32 lc_index) { vec_validate(am->lc_index_vec_by_acl, acl); elog_acl_cond_trace_X2(am, (am->trace_acl), "lock acl %d in lc_index %d", "i4i4", acl, lc_index); vec_add1(am->lc_index_vec_by_acl[acl], lc_index); } static void lock_acl_vec(u32 lc_index, u32 *acls) { int i; acl_main_t *am = &acl_main; for(i=0; i<vec_len(acls); i++) { lock_acl(am, acls[i], lc_index); } } static void unlock_acl(acl_main_t *am, u32 acl, u32 lc_index) { vec_validate(am->lc_index_vec_by_acl, acl); elog_acl_cond_trace_X2(am, (am->trace_acl), "unlock acl %d in lc_index %d", "i4i4", acl, lc_index); u32 index = vec_search(am->lc_index_vec_by_acl[acl], lc_index); if (index != ~0) vec_del1(am->lc_index_vec_by_acl[acl], index); else clib_warning("BUG: can not unlock acl %d lc_index %d", acl, lc_index); } static void unlock_acl_vec(u32 lc_index, u32 *acls) { int i; acl_main_t *am = &acl_main; for(i=0; i<vec_len(acls); i++) unlock_acl(am, acls[i], lc_index); } static void apply_acl_vec(u32 lc_index, u32 *acls) { int i; acl_main_t *am = &acl_main; for(i=0; i<vec_len(acls); i++) hash_acl_apply(am, lc_index, acls[i], i); } static void unapply_acl_vec(u32 lc_index, u32 *acls) { int i; acl_main_t *am = &acl_main; if (vec_len(acls) == 0) return; for(i=vec_len(acls); i > 0; i--) hash_acl_unapply(am, lc_index, acls[i-1]); } /* * Release the lookup context index and destroy * any associated data structures. */ static void acl_plugin_put_lookup_context_index (u32 lc_index) { acl_main_t *am = &acl_main; elog_acl_cond_trace_X1(am, (am->trace_acl), "LOOKUP-CONTEXT: put-context lc_index %d", "i4", lc_index); if (!acl_lc_index_valid(am, lc_index)) { clib_warning("BUG: lc_index %d is not valid", lc_index); return; } acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index); u32 index = vec_search(am->acl_users[acontext->context_user_id].lookup_contexts, lc_index); ASSERT(index != ~0); vec_del1(am->acl_users[acontext->context_user_id].lookup_contexts, index); unapply_acl_vec(lc_index, acontext->acl_indices); unlock_acl_vec(lc_index, acontext->acl_indices); vec_free(acontext->acl_indices); pool_put(am->acl_lookup_contexts, acontext); } /* * Prepare the sequential vector of ACL#s to lookup within a given context. * Any existing list will be overwritten. acl_list is a vector. */ static int acl_plugin_set_acl_vec_for_context (u32 lc_index, u32 *acl_list) { int rv = 0; uword *seen_acl_bitmap = 0; u32 *pacln = 0; acl_main_t *am = &acl_main; acl_lookup_context_t *acontext; if (am->trace_acl) { u32 i; elog_acl_cond_trace_X1(am, (1), "LOOKUP-CONTEXT: set-acl-list lc_index %d", "i4", lc_index); for(i=0; i<vec_len(acl_list); i++) { elog_acl_cond_trace_X2(am, (1), " acl-list[%d]: %d", "i4i4", i, acl_list[i]); } } if (!acl_lc_index_valid(am, lc_index)) { clib_warning("BUG: lc_index %d is not valid", lc_index); return -1; } vec_foreach (pacln, acl_list) { if (pool_is_free_index (am->acls, *pacln)) { /* ACL is not defined. Can not apply */ clib_warning ("ERROR: ACL %d not defined", *pacln); rv = VNET_API_ERROR_NO_SUCH_ENTRY; goto done; } if (clib_bitmap_get (seen_acl_bitmap, *pacln)) { /* ACL being applied twice within the list. error. */ clib_warning ("ERROR: ACL %d being applied twice", *pacln); rv = VNET_API_ERROR_ENTRY_ALREADY_EXISTS; goto done; } seen_acl_bitmap = clib_bitmap_set (seen_acl_bitmap, *pacln, 1); } acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index); u32 *old_acl_vector = acontext->acl_indices; acontext->acl_indices = vec_dup(acl_list); unapply_acl_vec(lc_index, old_acl_vector); unlock_acl_vec(lc_index, old_acl_vector); lock_acl_vec(lc_index, acontext->acl_indices); apply_acl_vec(lc_index, acontext->acl_indices); vec_free(old_acl_vector); done: clib_bitmap_free (seen_acl_bitmap); return rv; } void acl_plugin_lookup_context_notify_acl_change(u32 acl_num) { acl_main_t *am = &acl_main; if (acl_plugin_acl_exists(acl_num)) { if (hash_acl_exists(am, acl_num)) { /* this is a modification, clean up the older entries */ hash_acl_delete(am, acl_num); } hash_acl_add(am, acl_num); } else { /* this is a deletion notification */ hash_acl_delete(am, acl_num); } } /* Fill the 5-tuple from the packet */ static void acl_plugin_fill_5tuple (u32 lc_index, vlib_buffer_t * b0, int is_ip6, int is_input, int is_l2_path, fa_5tuple_opaque_t * p5tuple_pkt) { acl_plugin_fill_5tuple_inline(&acl_main, lc_index, b0, is_ip6, is_input, is_l2_path, p5tuple_pkt); } static int acl_plugin_match_5tuple (u32 lc_index, fa_5tuple_opaque_t * pkt_5tuple, int is_ip6, u8 * r_action, u32 * r_acl_pos_p, u32 * r_acl_match_p, u32 * r_rule_match_p, u32 * trace_bitmap) { return acl_plugin_match_5tuple_inline (&acl_main, lc_index, pkt_5tuple, is_ip6, r_action, r_acl_pos_p, r_acl_match_p, r_rule_match_p, trace_bitmap); } void acl_plugin_show_lookup_user (u32 user_index) { acl_main_t *am = &acl_main; vlib_main_t *vm = am->vlib_main; acl_lookup_context_user_t *auser; pool_foreach (auser, am->acl_users) { u32 curr_user_index = (auser - am->acl_users); if (user_index == ~0 || (curr_user_index == user_index)) { vlib_cli_output (vm, "index %d:%s:%s:%s", curr_user_index, auser->user_module_name, auser->val1_label, auser->val2_label); } } } void acl_plugin_show_lookup_context (u32 lc_index) { acl_main_t *am = &acl_main; vlib_main_t *vm = am->vlib_main; acl_lookup_context_t *acontext; // clib_warning("LOOKUP-CONTEXT: lc_index %d acl_list [ %U ]", lc_index, format_vec32, acl_list, "%d"); if (!am->acl_lookup_contexts) { vlib_cli_output(vm, "ACL lookup contexts are not initialized"); return; } pool_foreach (acontext, am->acl_lookup_contexts) { u32 curr_lc_index = (acontext - am->acl_lookup_contexts); if ((lc_index == ~0) || (curr_lc_index == lc_index)) { if (acl_user_id_valid(am, acontext->context_user_id)) { acl_lookup_context_user_t *auser = pool_elt_at_index(am->acl_users, acontext->context_user_id); vlib_cli_output (vm, "index %d:%s %s: %d %s: %d, acl_indices: %U", curr_lc_index, auser->user_module_name, auser->val1_label, acontext->user_val1, auser->val2_label, acontext->user_val2, format_vec32, acontext->acl_indices, "%d"); } else { vlib_cli_output (vm, "index %d: user_id: %d user_val1: %d user_val2: %d, acl_indices: %U", curr_lc_index, acontext->context_user_id, acontext->user_val1, acontext->user_val2, format_vec32, acontext->acl_indices, "%d"); } } } } void * acl_plugin_get_p_acl_main(void) { return &acl_main; } __clib_export clib_error_t * acl_plugin_methods_vtable_init(acl_plugin_methods_t *m) { m->p_acl_main = &acl_main; #define _(name) m->name = acl_plugin_ ## name; foreach_acl_plugin_exported_method_name #undef _ return 0; }