aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/span/span_api.c
blob: f96d2677cf757226d40dc8a7774f4944dcabc82b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
/*
 *------------------------------------------------------------------
 * span_api.c - span mirroring api
 *
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *------------------------------------------------------------------
 */

#include <vnet/vnet.h>
#include <vlibmemory/api.h>

#include <vnet/interface.h>
#include <vnet/api_errno.h>
#include <vnet/span/span.h>

#include <vnet/vnet_msg_enum.h>

#define vl_typedefs		/* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_typedefs

#define vl_endianfun		/* define message structures */
#include <vnet/vnet_all_api_h.h>
#undef vl_endianfun

/* instantiate all the print functions we know about */
#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
#define vl_printfun
#include <vnet/vnet_all_api_h.h>
#undef vl_printfun

#include <vlibapi/api_helper_macros.h>

#define foreach_vpe_api_msg                             \
_(SW_INTERFACE_SPAN_ENABLE_DISABLE, sw_interface_span_enable_disable)   \
_(SW_INTERFACE_SPAN_DUMP, sw_interface_span_dump)                       \

static void
  vl_api_sw_interface_span_enable_disable_t_handler
  (vl_api_sw_interface_span_enable_disable_t * mp)
{
  vl_api_sw_interface_span_enable_disable_reply_t *rmp;
  int rv;

  vlib_main_t *vm = vlib_get_main ();

  rv = span_add_delete_entry (vm, ntohl (mp->sw_if_index_from),
			      ntohl (mp->sw_if_index_to), mp->state,
			      mp->is_l2 ? SPAN_FEAT_L2 : SPAN_FEAT_DEVICE);

  REPLY_MACRO (VL_API_SW_INTERFACE_SPAN_ENABLE_DISABLE_REPLY);
}

static void
vl_api_sw_interface_span_dump_t_handler (vl_api_sw_interface_span_dump_t * mp)
{

  vl_api_registration_t *reg;
  span_interface_t *si;
  vl_api_sw_interface_span_details_t *rmp;
  span_main_t *sm = &span_main;

  reg = vl_api_client_index_to_registration (mp->client_index);
  if (!reg)
    return;

  span_feat_t sf = mp->is_l2 ? SPAN_FEAT_L2 : SPAN_FEAT_DEVICE;
  /* *INDENT-OFF* */
  vec_foreach (si, sm->interfaces)
  {
    span_mirror_t * rxm = &si->mirror_rxtx[sf][VLIB_RX];
    span_mirror_t * txm = &si->mirror_rxtx[sf][VLIB_TX];
    if (rxm->num_mirror_ports || txm->num_mirror_ports)
    {
      clib_bitmap_t *b;
      u32 i;
      b = clib_bitmap_dup_or (rxm->mirror_ports, txm->mirror_ports);
      clib_bitmap_foreach (i, b, (
        {
          rmp = vl_msg_api_alloc (sizeof (*rmp));
          memset (rmp, 0, sizeof (*rmp));
          rmp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_SPAN_DETAILS);
          rmp->context = mp->context;

          rmp->sw_if_index_from = htonl (si - sm->interfaces);
          rmp->sw_if_index_to = htonl (i);
          rmp->state = (u8) (clib_bitmap_get (rxm->mirror_ports, i) +
                             clib_bitmap_get (txm->mirror_ports, i) * 2);
	  rmp->is_l2 = mp->is_l2;

          vl_api_send_msg (reg, (u8 *) rmp);
        }));
      clib_bitmap_free (b);
    }
    }
  /* *INDENT-ON* */
}

/*
 * vpe_api_hookup
 * Add vpe's API message handlers to the table.
 * vlib has alread mapped shared memory and
 * added the client registration handlers.
 * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
 */
#define vl_msg_name_crc_list
#include <vnet/vnet_all_api_h.h>
#undef vl_msg_name_crc_list

static void
setup_message_id_table (api_main_t * am)
{
#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
  foreach_vl_msg_name_crc_span;
#undef _
}

static clib_error_t *
span_api_hookup (vlib_main_t * vm)
{
  api_main_t *am = &api_main;

#define _(N,n)                                                  \
    vl_msg_api_set_handlers(VL_API_##N, #n,                     \
                           vl_api_##n##_t_handler,              \
                           vl_noop_handler,                     \
                           vl_api_##n##_t_endian,               \
                           vl_api_##n##_t_print,                \
                           sizeof(vl_api_##n##_t), 1);
  foreach_vpe_api_msg;
#undef _

  /*
   * Set up the (msg_name, crc, message-id) table
   */
  setup_message_id_table (am);

  return 0;
}

VLIB_API_INIT_FUNCTION (span_api_hookup);

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
ATED, "natted", 1) \ _(14, L2_HDR_OFFSET_VALID, "l2_hdr_offset_valid", 0) \ _(15, L3_HDR_OFFSET_VALID, "l3_hdr_offset_valid", 0) \ _(16, L4_HDR_OFFSET_VALID, "l4_hdr_offset_valid", 0) \ _(17, FLOW_REPORT, "flow-report", 1) \ _(18, IS_DVR, "dvr", 1) \ _(19, QOS_DATA_VALID, "qos-data-valid", 0) \ _(20, GSO, "gso", 0) \ _(21, AVAIL1, "avail1", 1) \ _(22, AVAIL2, "avail2", 1) \ _(23, AVAIL3, "avail3", 1) \ _(24, AVAIL4, "avail4", 1) \ _(25, AVAIL5, "avail5", 1) \ _(26, AVAIL6, "avail6", 1) \ _(27, AVAIL7, "avail7", 1) /* * Please allocate the FIRST available bit, redefine * AVAIL 1 ... AVAILn-1, and remove AVAILn. Please maintain the * VNET_BUFFER_FLAGS_ALL_AVAIL definition. */ #define VNET_BUFFER_FLAGS_ALL_AVAIL \ (VNET_BUFFER_F_AVAIL1 | VNET_BUFFER_F_AVAIL2 | VNET_BUFFER_F_AVAIL3 | \ VNET_BUFFER_F_AVAIL4 | VNET_BUFFER_F_AVAIL5 | VNET_BUFFER_F_AVAIL6 | \ VNET_BUFFER_F_AVAIL7) #define VNET_BUFFER_FLAGS_VLAN_BITS \ (VNET_BUFFER_F_VLAN_1_DEEP | VNET_BUFFER_F_VLAN_2_DEEP) enum { #define _(bit, name, s, v) VNET_BUFFER_F_##name = (1 << LOG2_VLIB_BUFFER_FLAG_USER(bit)), foreach_vnet_buffer_flag #undef _ }; enum { #define _(bit, name, s, v) VNET_BUFFER_F_LOG2_##name = LOG2_VLIB_BUFFER_FLAG_USER(bit), foreach_vnet_buffer_flag #undef _ }; /* Make sure that the vnet and vlib bits are disjoint */ STATIC_ASSERT (((VNET_BUFFER_FLAGS_ALL_AVAIL & VLIB_BUFFER_FLAGS_ALL) == 0), "VLIB / VNET buffer flags overlap"); #define foreach_buffer_opaque_union_subtype \ _(ip) \ _(l2) \ _(l2t) \ _(l2_classify) \ _(policer) \ _(ipsec) \ _(map) \ _(map_t) \ _(ip_frag) \ _(mpls) \ _(tcp) /* * vnet stack buffer opaque array overlay structure. * The vnet_buffer_opaque_t *must* be the same size as the * vlib_buffer_t "opaque" structure member, 32 bytes. * * When adding a union type, please add a stanza to * foreach_buffer_opaque_union_subtype (directly above). * Code in vnet_interface_init(...) verifies the size * of the union, and will announce any deviations in an * impossible-to-miss manner. */ typedef struct { u32 sw_if_index[VLIB_N_RX_TX]; i16 l2_hdr_offset; i16 l3_hdr_offset; i16 l4_hdr_offset; u8 feature_arc_index; u8 dont_waste_me; union { /* IP4/6 buffer opaque. */ struct { /* Adjacency from destination IP address lookup [VLIB_TX]. Adjacency from source IP address lookup [VLIB_RX]. This gets set to ~0 until source lookup is performed. */ u32 adj_index[VLIB_N_RX_TX]; union { struct { /* Flow hash value for this packet computed from IP src/dst address protocol and ports. */ u32 flow_hash; union { /* next protocol */ u32 save_protocol; /* Hint for transport protocols */ u32 fib_index; }; /* Rewrite length */ u8 save_rewrite_length; /* MFIB RPF ID */ u32 rpf_id; }; /* ICMP */ struct { u8 type; u8 code; u32 data; } icmp; /* reassembly */ union { /* group input/output to simplify the code, this way * we can handoff while keeping input variables intact */ struct { /* input variables */ struct { u32 next_index; /* index of next node - used by custom apps */ u32 error_next_index; /* index of next node if error - used by custom apps */ }; /* handoff variables */ struct { u16 owner_thread_index; }; }; /* output variables */ struct { union { /* shallow virtual reassembly output variables */ struct { u16 l4_src_port; /* tcp/udp/icmp src port */ u16 l4_dst_port; /* tcp/udp/icmp dst port */ u32 tcp_ack_number; u8 save_rewrite_length; u8 ip_proto; /* protocol in ip header */ u8 icmp_type_or_tcp_flags; u8 is_non_first_fragment; u32 tcp_seq_number; }; /* full reassembly output variables */ struct { u16 estimated_mtu; /* estimated MTU calculated during reassembly */ }; }; }; /* internal variables used during reassembly */ struct { u16 fragment_first; u16 fragment_last; u16 range_first; u16 range_last; u32 next_range_bi; u16 ip6_frag_hdr_offset; }; } reass; }; } ip; /* * MPLS: * data copied from the MPLS header that was popped from the packet * during the look-up. */ struct { /* do not overlay w/ ip.adj_index[0,1] nor flow hash */ u32 pad[VLIB_N_RX_TX + 1]; u8 ttl; u8 exp; u8 first; u8 pyld_proto:3; /* dpo_proto_t */ u8 rsvd:5; /* Rewrite length */ u8 save_rewrite_length; /* Save the mpls header length including all label stack */ u8 mpls_hdr_length; /* * BIER - the number of bytes in the header. * the len field in the header is not authoritative. It's the * value in the table that counts. */ struct { u8 n_bytes; } bier; } mpls; /* l2 bridging path, only valid there */ struct opaque_l2 { u32 feature_bitmap; u16 bd_index; /* bridge-domain index */ u16 l2fib_sn; /* l2fib bd/int seq_num */ u8 l2_len; /* ethernet header length */ u8 shg; /* split-horizon group */ u8 bd_age; /* aging enabled */ } l2; /* l2tpv3 softwire encap, only valid there */ struct { u32 pad[4]; /* do not overlay w/ ip.adj_index[0,1] */ u8 next_index; u32 session_index; } l2t; /* L2 classify */ struct { struct opaque_l2 pad; union { u32 table_index; u32 opaque_index; }; u64 hash; } l2_classify; /* vnet policer */ struct { u32 pad[8 - VLIB_N_RX_TX - 1]; /* to end of opaque */ u32 index; } policer; /* interface output features */ struct { u32 sad_index; u32 protect_index; } ipsec; /* MAP */ struct { u16 mtu; } map; /* MAP-T */ struct { u32 map_domain_index; struct { u32 saddr, daddr; u16 frag_offset; //Fragmentation header offset u16 l4_offset; //L4 header overall offset u8 l4_protocol; //The final protocol number } v6; //Used by ip6_map_t only u16 checksum_offset; //L4 checksum overall offset u16 mtu; //Exit MTU } map_t; /* IP Fragmentation */ struct { u32 pad[2]; /* do not overlay w/ ip.adj_index[0,1] */ u16 mtu; u8 next_index; u8 flags; //See ip_frag.h } ip_frag; /* COP - configurable junk filter(s) */ struct { /* Current configuration index. */ u32 current_config_index; } cop; /* LISP */ struct { /* overlay address family */ u16 overlay_afi; } lisp; /* TCP */ struct { u32 connection_index; union { u32 seq_number; u32 next_node_opaque; }; u32 seq_end; u32 ack_number; u16 hdr_offset; /**< offset relative to ip hdr */ u16 data_offset; /**< offset relative to ip hdr */ u16 data_len; /**< data len */ u8 flags; } tcp; /* SNAT */ struct { u32 flags; } snat; u32 unused[6]; }; } vnet_buffer_opaque_t; #define VNET_REWRITE_TOTAL_BYTES (VLIB_BUFFER_PRE_DATA_SIZE) STATIC_ASSERT (STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) == STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.reass.save_rewrite_length) && STRUCT_SIZE_OF (vnet_buffer_opaque_t, ip.reass.save_rewrite_length) == STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) && STRUCT_SIZE_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) == 1 && VNET_REWRITE_TOTAL_BYTES < UINT8_MAX, "save_rewrite_length member must be able to hold the max value of rewrite length"); STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.save_rewrite_length) == STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.reass.save_rewrite_length) && STRUCT_OFFSET_OF (vnet_buffer_opaque_t, mpls.save_rewrite_length) == STRUCT_OFFSET_OF (vnet_buffer_opaque_t, ip.reass.save_rewrite_length), "save_rewrite_length must be aligned so that reass doesn't overwrite it"); /* * The opaque field of the vlib_buffer_t is interpreted as a * vnet_buffer_opaque_t. Hence it should be big enough to accommodate one. */ STATIC_ASSERT (sizeof (vnet_buffer_opaque_t) <= STRUCT_SIZE_OF (vlib_buffer_t, opaque), "VNET buffer meta-data too large for vlib_buffer"); #define vnet_buffer(b) ((vnet_buffer_opaque_t *) (b)->opaque) /* Full cache line (64 bytes) of additional space */ typedef struct { /** * QoS marking data that needs to persist from the recording nodes * (nominally in the ingress path) to the marking node (in the * egress path) */ struct { u8 bits; u8 source; } qos; u8 loop_counter; u8 __unused[1]; /* Group Based Policy */ struct { u8 __unused; u8 flags; u16 sclass; } gbp; /** * The L4 payload size set on input on GSO enabled interfaces * when we receive a GSO packet (a chain of buffers with the first one * having GSO bit set), and needs to persist all the way to the interface-output, * in case the egress interface is not GSO-enabled - then we need to perform * the segmentation, and use this value to cut the payload appropriately. */ u16 gso_size; /* size of L4 prototol header */ u16 gso_l4_hdr_sz; /* The union below has a u64 alignment, so this space is unused */ u32 __unused2[1]; struct { u32 arc_next; u32 unused; } nat; union { struct { #if VLIB_BUFFER_TRACE_TRAJECTORY > 0 /* buffer trajectory tracing */ u16 *trajectory_trace; #endif }; struct { u64 pad[1]; u64 pg_replay_timestamp; }; u32 unused[8]; }; } vnet_buffer_opaque2_t; #define vnet_buffer2(b) ((vnet_buffer_opaque2_t *) (b)->opaque2) /* * The opaque2 field of the vlib_buffer_t is interpreted as a * vnet_buffer_opaque2_t. Hence it should be big enough to accommodate one. */ STATIC_ASSERT (sizeof (vnet_buffer_opaque2_t) <= STRUCT_SIZE_OF (vlib_buffer_t, opaque2), "VNET buffer opaque2 meta-data too large for vlib_buffer"); #define gso_mtu_sz(b) (vnet_buffer2(b)->gso_size + \ vnet_buffer2(b)->gso_l4_hdr_sz + \ vnet_buffer(b)->l4_hdr_offset - \ vnet_buffer (b)->l3_hdr_offset) format_function_t format_vnet_buffer; #endif /* included_vnet_buffer_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */