aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan-gbp/vxlan_gbp_packet.h
blob: 33bccd6aed6d1771666f6082d756f572f58e5466 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
/*
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef __included_vxlan_gbp_packet_h__
#define __included_vxlan_gbp_packet_h__ 1

#include <vlib/vlib.h>

/*
 * From draft-smith-vxlan-group-policy-04.txt
 *
 *  0                   1                   2                   3
 *  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |G|R|R|R|I|R|R|R|R|D|E|S|A|R|R|R|        Group Policy ID        |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 * |                VXLAN Network Identifier (VNI) |   Reserved    |
 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
 *
 * G bit: Bit 0 of the initial word is defined as the G (Group Based
 *   Policy Extension) bit.
 *
 * I bit: where the I flag MUST be set to 1 for a valid
 *   VXLAN Network ID (VNI).
 *
 * D bit: Bit 9 of the initial word is defined as the Don't Learn bit.
 *   When set, this bit indicates that the egress VTEP MUST NOT learn the
 *   source address of the encapsulated frame.
 *
 * E bit: Bit 10 of the initial word is defined as the bounce packet.
 *   When set, this bit indicates that packet is bounced and must be
 *   dropped.
 *
 * S bit: Bit 11 of the initial word is defined as the source policy
 *   applied bit.
 *
 * A bit: Bit 12 of the initial word is defined as the A (Policy
 *   Applied) bit.  This bit is only defined as the A bit when the G bit
 *   is set to 1.
 *
 *    A = 1 indicates that the group policy has already been applied to
 *    this packet.  Policies MUST NOT be applied by devices when the A
 *    bit is set.
 *
 *    A = 0 indicates that the group policy has not been applied to this
 *    packet.  Group policies MUST be applied by devices when the A bit
 *    is set to 0 and the destination Group has been determined.
 *    Devices that apply the Group policy MUST set the A bit to 1 after
 *    the policy has been applied.
 *
 * Group Policy ID: 16 bit identifier that indicates the source TSI
 *   Group membership being encapsulated by VXLAN. Its value is source
 *   class id.
 *
 */

typedef struct
{
  union
  {
    struct
    {
      union
      {
	struct
	{
	  u8 flag_g_i;
	  u8 gpflags;
	};
	u16 flags;
      };
      u16 sclass;
    };
    u32 flags_sclass_as_u32;
  };
  u32 vni_reserved;
} vxlan_gbp_header_t;

#define foreach_vxlan_gbp_flags    \
  _ (0x80, G)                      \
  _ (0x08, I)

typedef enum
{
  VXLAN_GBP_FLAGS_NONE = 0,
#define _(n,f) VXLAN_GBP_FLAGS_##f = n,
  foreach_vxlan_gbp_flags
#undef _
} __attribute__ ((packed)) vxlan_gbp_flags_t;

#define foreach_vxlan_gbp_gpflags \
_ (0x40, D)                       \
_ (0x20, E)                       \
_ (0x10, S)                       \
_ (0x08, A)

typedef enum
{
  VXLAN_GBP_GPFLAGS_NONE = 0,
#define _(n,f) VXLAN_GBP_GPFLAGS_##f = n,
  foreach_vxlan_gbp_gpflags
#undef _
} __attribute__ ((packed)) vxlan_gbp_gpflags_t;

static inline u32
vxlan_gbp_get_vni (vxlan_gbp_header_t * h)
{
  u32 vni_reserved_host_byte_order;

  vni_reserved_host_byte_order = clib_net_to_host_u32 (h->vni_reserved);
  return vni_reserved_host_byte_order >> 8;
}

static inline u16
vxlan_gbp_get_sclass (vxlan_gbp_header_t * h)
{
  u16 sclass_host_byte_order;

  sclass_host_byte_order = clib_net_to_host_u16 (h->sclass);
  return sclass_host_byte_order;
}

static inline vxlan_gbp_gpflags_t
vxlan_gbp_get_gpflags (vxlan_gbp_header_t * h)
{
  return h->gpflags;
}

static inline vxlan_gbp_flags_t
vxlan_gbp_get_flags (vxlan_gbp_header_t * h)
{
  return h->flag_g_i;
}

static inline void
vxlan_gbp_set_header (vxlan_gbp_header_t * h, u32 vni)
{
  h->vni_reserved = clib_host_to_net_u32 (vni << 8);
  h->flags_sclass_as_u32 = 0;
  h->flag_g_i = VXLAN_GBP_FLAGS_I | VXLAN_GBP_FLAGS_G;
}

extern u8 *format_vxlan_gbp_header_flags (u8 * s, va_list * args);
extern u8 *format_vxlan_gbp_header_gpflags (u8 * s, va_list * args);

#endif /* __included_vxlan_gbp_packet_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
cm"> * Copyright (c) 2013,2018 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <vlib/vlib.h> #include <vnet/vnet.h> #include <vnet/ethernet/ethernet.h> #include <vnet/ethernet/packet.h> #include <vnet/ip/ip_packet.h> #include <vnet/ip/ip4_packet.h> #include <vnet/ip/ip6_packet.h> #include <vlib/cli.h> #include <vnet/l2/l2_input.h> #include <vnet/l2/l2_output.h> #include <vnet/l2/feat_bitmap.h> #include <vppinfra/error.h> #include <vppinfra/hash.h> #include <vppinfra/cache.h> #include <vnet/classify/vnet_classify.h> #include <vnet/classify/in_out_acl.h> typedef struct { /* Next nodes for each feature */ u32 feat_next_node_index[IN_OUT_ACL_N_TABLE_GROUPS][32]; /* convenience variables */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; } l2_in_out_acl_main_t; typedef struct { u32 sw_if_index; u32 next_index; u32 table_index; u32 offset; } l2_in_out_acl_trace_t; /* packet trace format function */ static u8 * format_l2_in_out_acl_trace (u8 * s, u32 is_output, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); l2_in_out_acl_trace_t *t = va_arg (*args, l2_in_out_acl_trace_t *); s = format (s, "%s: sw_if_index %d, next_index %d, table %d, offset %d", is_output ? "OUTACL" : "INACL", t->sw_if_index, t->next_index, t->table_index, t->offset); return s; } static u8 * format_l2_inacl_trace (u8 * s, va_list * args) { return format_l2_in_out_acl_trace (s, IN_OUT_ACL_INPUT_TABLE_GROUP, args); } static u8 * format_l2_outacl_trace (u8 * s, va_list * args) { return format_l2_in_out_acl_trace (s, IN_OUT_ACL_OUTPUT_TABLE_GROUP, args); } extern l2_in_out_acl_main_t l2_in_out_acl_main; #ifndef CLIB_MARCH_VARIANT l2_in_out_acl_main_t l2_in_out_acl_main; #endif /* CLIB_MARCH_VARIANT */ extern vlib_node_registration_t l2_inacl_node; extern vlib_node_registration_t l2_outacl_node; #define foreach_l2_inacl_error \ _(NONE, "valid input ACL packets") \ _(MISS, "input ACL misses") \ _(HIT, "input ACL hits") \ _(CHAIN_HIT, "input ACL hits after chain walk") \ _(TABLE_MISS, "input ACL table-miss drops") \ _(SESSION_DENY, "input ACL session deny drops") #define foreach_l2_outacl_error \ _(NONE, "valid output ACL packets") \ _(MISS, "output ACL misses") \ _(HIT, "output ACL hits") \ _(CHAIN_HIT, "output ACL hits after chain walk") \ _(TABLE_MISS, "output ACL table-miss drops") \ _(SESSION_DENY, "output ACL session deny drops") typedef enum { #define _(sym,str) L2_INACL_ERROR_##sym, foreach_l2_inacl_error #undef _ L2_INACL_N_ERROR, } l2_inacl_error_t; static char *l2_inacl_error_strings[] = { #define _(sym,string) string, foreach_l2_inacl_error #undef _ }; typedef enum { #define _(sym,str) L2_OUTACL_ERROR_##sym, foreach_l2_outacl_error #undef _ L2_OUTACL_N_ERROR, } l2_outacl_error_t; static char *l2_outacl_error_strings[] = { #define _(sym,string) string, foreach_l2_outacl_error #undef _ }; static inline uword l2_in_out_acl_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame, int is_output) { u32 n_left_from, *from, *to_next; acl_next_index_t next_index; l2_in_out_acl_main_t *msm = &l2_in_out_acl_main; in_out_acl_main_t *am = &in_out_acl_main; vnet_classify_main_t *vcm = am->vnet_classify_main; in_out_acl_table_id_t tid = IN_OUT_ACL_TABLE_L2; f64 now = vlib_time_now (vm); u32 hits = 0; u32 misses = 0; u32 chain_hits = 0; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; /* number of packets to process */ next_index = node->cached_next_index; /* First pass: compute hashes */ while (n_left_from > 2) { vlib_buffer_t *b0, *b1; u32 bi0, bi1; u8 *h0, *h1; u32 sw_if_index0, sw_if_index1; u32 table_index0, table_index1; vnet_classify_table_t *t0, *t1; /* prefetch next iteration */ { vlib_buffer_t *p1, *p2; p1 = vlib_get_buffer (vm, from[1]); p2 = vlib_get_buffer (vm, from[2]); vlib_prefetch_buffer_header (p1, STORE); clib_prefetch_store (p1->data); vlib_prefetch_buffer_header (p2, STORE); clib_prefetch_store (p2->data); } bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); bi1 = from[1]; b1 = vlib_get_buffer (vm, bi1); sw_if_index0 = vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX]; table_index0 = am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index0]; sw_if_index1 = vnet_buffer (b1)->sw_if_index[is_output ? VLIB_TX : VLIB_RX]; table_index1 = am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index1]; t0 = pool_elt_at_index (vcm->tables, table_index0); t1 = pool_elt_at_index (vcm->tables, table_index1); if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset; else h0 = (void *) vlib_buffer_get_current (b0); vnet_buffer (b0)->l2_classify.hash = vnet_classify_hash_packet (t0, (u8 *) h0); vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash); if (t1->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h1 = (void *) vlib_buffer_get_current (b1) + t1->current_data_offset; else h1 = (void *) vlib_buffer_get_current (b1); vnet_buffer (b1)->l2_classify.hash = vnet_classify_hash_packet (t1, (u8 *) h1); vnet_classify_prefetch_bucket (t1, vnet_buffer (b1)->l2_classify.hash); vnet_buffer (b0)->l2_classify.table_index = table_index0; vnet_buffer (b1)->l2_classify.table_index = table_index1; from += 2; n_left_from -= 2; } while (n_left_from > 0) { vlib_buffer_t *b0; u32 bi0; u8 *h0; u32 sw_if_index0; u32 table_index0; vnet_classify_table_t *t0; bi0 = from[0]; b0 = vlib_get_buffer (vm, bi0); sw_if_index0 = vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX]; table_index0 = am->classify_table_index_by_sw_if_index[is_output][tid][sw_if_index0]; t0 = pool_elt_at_index (vcm->tables, table_index0); if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset; else h0 = (void *) vlib_buffer_get_current (b0); vnet_buffer (b0)->l2_classify.hash = vnet_classify_hash_packet (t0, (u8 *) h0); vnet_buffer (b0)->l2_classify.table_index = table_index0; vnet_classify_prefetch_bucket (t0, vnet_buffer (b0)->l2_classify.hash); from++; n_left_from--; } next_index = node->cached_next_index; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; while (n_left_from > 0) { u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); /* Not enough load/store slots to dual loop... */ while (n_left_from > 0 && n_left_to_next > 0) { u32 bi0; vlib_buffer_t *b0; u32 next0 = ACL_NEXT_INDEX_DENY; u32 table_index0; vnet_classify_table_t *t0; vnet_classify_entry_t *e0; u64 hash0; u8 *h0; u8 error0; /* Stride 3 seems to work best */ if (PREDICT_TRUE (n_left_from > 3)) { vlib_buffer_t *p1 = vlib_get_buffer (vm, from[3]); vnet_classify_table_t *tp1; u32 table_index1; u64 phash1; table_index1 = vnet_buffer (p1)->l2_classify.table_index; if (PREDICT_TRUE (table_index1 != ~0)) { tp1 = pool_elt_at_index (vcm->tables, table_index1); phash1 = vnet_buffer (p1)->l2_classify.hash; vnet_classify_prefetch_entry (tp1, phash1); } } /* speculatively enqueue b0 to the current next frame */ bi0 = from[0]; to_next[0] = bi0; from += 1; to_next += 1; n_left_from -= 1; n_left_to_next -= 1; b0 = vlib_get_buffer (vm, bi0); table_index0 = vnet_buffer (b0)->l2_classify.table_index; e0 = 0; t0 = 0; vnet_buffer (b0)->l2_classify.opaque_index = ~0; /* Determine the next node */ next0 = vnet_l2_feature_next (b0, msm->feat_next_node_index[is_output], is_output ? L2OUTPUT_FEAT_ACL : L2INPUT_FEAT_ACL); if (PREDICT_TRUE (table_index0 != ~0)) { hash0 = vnet_buffer (b0)->l2_classify.hash; t0 = pool_elt_at_index (vcm->tables, table_index0); if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset; else h0 = (void *) vlib_buffer_get_current (b0); e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now); if (e0) { vnet_buffer (b0)->l2_classify.opaque_index = e0->opaque_index; vlib_buffer_advance (b0, e0->advance); next0 = (e0->next_index < ACL_NEXT_INDEX_N_NEXT) ? e0->next_index : next0; hits++; if (is_output) error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_OUTACL_ERROR_SESSION_DENY : L2_INACL_ERROR_NONE; else error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_OUTACL_ERROR_SESSION_DENY : L2_OUTACL_ERROR_NONE; b0->error = node->errors[error0]; } else { while (1) { if (PREDICT_TRUE (t0->next_table_index != ~0)) t0 = pool_elt_at_index (vcm->tables, t0->next_table_index); else { next0 = (t0->miss_next_index < ACL_NEXT_INDEX_N_NEXT) ? t0->miss_next_index : next0; misses++; if (is_output) error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_OUTACL_ERROR_TABLE_MISS : L2_OUTACL_ERROR_NONE; else error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_INACL_ERROR_TABLE_MISS : L2_INACL_ERROR_NONE; b0->error = node->errors[error0]; break; } if (t0->current_data_flag == CLASSIFY_FLAG_USE_CURR_DATA) h0 = (void *) vlib_buffer_get_current (b0) + t0->current_data_offset; else h0 = (void *) vlib_buffer_get_current (b0); hash0 = vnet_classify_hash_packet (t0, (u8 *) h0); e0 = vnet_classify_find_entry (t0, (u8 *) h0, hash0, now); if (e0) { vlib_buffer_advance (b0, e0->advance); next0 = (e0->next_index < ACL_NEXT_INDEX_N_NEXT) ? e0->next_index : next0; hits++; chain_hits++; if (is_output) error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_OUTACL_ERROR_SESSION_DENY : L2_OUTACL_ERROR_NONE; else error0 = (next0 == ACL_NEXT_INDEX_DENY) ? L2_INACL_ERROR_SESSION_DENY : L2_INACL_ERROR_NONE; b0->error = node->errors[error0]; break; } } } } if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE) && (b0->flags & VLIB_BUFFER_IS_TRACED))) { l2_in_out_acl_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); t->sw_if_index = vnet_buffer (b0)->sw_if_index[is_output ? VLIB_TX : VLIB_RX]; t->next_index = next0; t->table_index = t0 ? t0 - vcm->tables : ~0; t->offset = (t0 && e0) ? vnet_classify_get_offset (t0, e0) : ~0; } /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, bi0, next0); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } vlib_node_increment_counter (vm, node->node_index, is_output ? L2_OUTACL_ERROR_MISS : L2_INACL_ERROR_MISS, misses); vlib_node_increment_counter (vm, node->node_index, is_output ? L2_OUTACL_ERROR_HIT : L2_INACL_ERROR_HIT, hits); vlib_node_increment_counter (vm, node->node_index, is_output ? L2_OUTACL_ERROR_CHAIN_HIT : L2_INACL_ERROR_CHAIN_HIT, chain_hits); return frame->n_vectors; } VLIB_NODE_FN (l2_inacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return l2_in_out_acl_node_fn (vm, node, frame, IN_OUT_ACL_INPUT_TABLE_GROUP); } VLIB_NODE_FN (l2_outacl_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return l2_in_out_acl_node_fn (vm, node, frame, IN_OUT_ACL_OUTPUT_TABLE_GROUP); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (l2_inacl_node) = { .name = "l2-input-acl", .vector_size = sizeof (u32), .format_trace = format_l2_inacl_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(l2_inacl_error_strings), .error_strings = l2_inacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, /* edit / add dispositions here */ .next_nodes = { [ACL_NEXT_INDEX_DENY] = "error-drop", }, }; VLIB_REGISTER_NODE (l2_outacl_node) = { .name = "l2-output-acl", .vector_size = sizeof (u32), .format_trace = format_l2_outacl_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(l2_outacl_error_strings), .error_strings = l2_outacl_error_strings, .n_next_nodes = ACL_NEXT_INDEX_N_NEXT, /* edit / add dispositions here */ .next_nodes = { [ACL_NEXT_INDEX_DENY] = "error-drop", }, }; /* *INDENT-ON* */ #ifndef CLIB_MARCH_VARIANT clib_error_t * l2_in_out_acl_init (vlib_main_t * vm) { l2_in_out_acl_main_t *mp = &l2_in_out_acl_main; mp->vlib_main = vm; mp->vnet_main = vnet_get_main (); /* Initialize the feature next-node indexes */ feat_bitmap_init_next_nodes (vm, l2_inacl_node.index, L2INPUT_N_FEAT, l2input_get_feat_names (), mp->feat_next_node_index [IN_OUT_ACL_INPUT_TABLE_GROUP]); feat_bitmap_init_next_nodes (vm, l2_outacl_node.index, L2OUTPUT_N_FEAT, l2output_get_feat_names (), mp->feat_next_node_index [IN_OUT_ACL_OUTPUT_TABLE_GROUP]); return 0; } VLIB_INIT_FUNCTION (l2_in_out_acl_init); #endif /* CLIB_MARCH_VARIANT */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */