summaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec-gre/interface.c
blob: 4faf66ddde88c1a2bb595b7902befdb012cc1e2f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
/*
 * gre_interface.c: gre interfaces
 *
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @file
 * @brief L2-GRE over IPSec tunnel interface.
 *
 * Creates ipsec-gre tunnel interface.
 * Provides a command line interface so humans can interact with VPP.
 */

#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vnet/ipsec-gre/ipsec_gre.h>
#include <vnet/ip/format.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/l2/l2_input.h>

#include <vnet/ipsec/esp.h>

u8 *
format_ipsec_gre_tunnel (u8 * s, va_list * args)
{
  ipsec_gre_tunnel_t *t = va_arg (*args, ipsec_gre_tunnel_t *);
  ipsec_gre_main_t *gm = &ipsec_gre_main;

  s = format (s,
	      "[%d] %U (src) %U (dst) local-sa %d remote-sa %d",
	      t - gm->tunnels,
	      format_ip4_address, &t->tunnel_src,
	      format_ip4_address, &t->tunnel_dst,
	      t->local_sa_id, t->remote_sa_id);
  return s;
}

static clib_error_t *
show_ipsec_gre_tunnel_command_fn (vlib_main_t * vm,
				  unformat_input_t * input,
				  vlib_cli_command_t * cmd)
{
  ipsec_gre_main_t *igm = &ipsec_gre_main;
  ipsec_gre_tunnel_t *t;

  if (pool_elts (igm->tunnels) == 0)
    vlib_cli_output (vm, "No IPSec GRE tunnels configured...");

  /* *INDENT-OFF* */
  pool_foreach (t, igm->tunnels,
  ({
    vlib_cli_output (vm, "%U", format_ipsec_gre_tunnel, t);
  }));
  /* *INDENT-ON* */

  return 0;
}

/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_ipsec_gre_tunnel_command, static) = {
    .path = "show ipsec gre tunnel",
    .function = show_ipsec_gre_tunnel_command_fn,
};
/* *INDENT-ON* */

/* force inclusion from application's main.c */
clib_error_t *
ipsec_gre_interface_init (vlib_main_t * vm)
{
  return 0;
}

VLIB_INIT_FUNCTION (ipsec_gre_interface_init);

/**
 * @brief Add or delete ipsec-gre tunnel interface.
 *
 * @param *a vnet_ipsec_gre_add_del_tunnel_args_t - tunnel interface parameters
 * @param *sw_if_indexp u32 - software interface index
 * @return int - 0 if success otherwise <code>VNET_API_ERROR_</code>
 */
int
vnet_ipsec_gre_add_del_tunnel (vnet_ipsec_gre_add_del_tunnel_args_t * a,
			       u32 * sw_if_indexp)
{
  ipsec_gre_main_t *igm = &ipsec_gre_main;
  vnet_main_t *vnm = igm->vnet_main;
  ip4_main_t *im = &ip4_main;
  ipsec_gre_tunnel_t *t;
  vnet_hw_interface_t *hi;
  u32 hw_if_index, sw_if_index;
  u32 slot;
  uword *p;
  u64 key;
  ipsec_add_del_ipsec_gre_tunnel_args_t args;

  memset (&args, 0, sizeof (args));
  args.is_add = a->is_add;
  args.local_sa_id = a->lsa;
  args.remote_sa_id = a->rsa;
  args.local_ip.as_u32 = a->src.as_u32;
  args.remote_ip.as_u32 = a->dst.as_u32;

  key = (u64) a->src.as_u32 << 32 | (u64) a->dst.as_u32;
  p = hash_get (igm->tunnel_by_key, key);

  if (a->is_add)
    {
      /* check if same src/dst pair exists */
      if (p)
	return VNET_API_ERROR_INVALID_VALUE;

      pool_get_aligned (igm->tunnels, t, CLIB_CACHE_LINE_BYTES);
      memset (t, 0, sizeof (*t));

      if (vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) > 0)
	{
	  vnet_interface_main_t *im = &vnm->interface_main;

	  hw_if_index = igm->free_ipsec_gre_tunnel_hw_if_indices
	    [vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) - 1];
	  _vec_len (igm->free_ipsec_gre_tunnel_hw_if_indices) -= 1;

	  hi = vnet_get_hw_interface (vnm, hw_if_index);
	  hi->dev_instance = t - igm->tunnels;
	  hi->hw_instance = hi->dev_instance;

	  /* clear old stats of freed tunnel before reuse */
	  sw_if_index = hi->sw_if_index;
	  vnet_interface_counter_lock (im);
	  vlib_zero_combined_counter
	    (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_TX],
	     sw_if_index);
	  vlib_zero_combined_counter
	    (&im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_RX],
	     sw_if_index);
	  vlib_zero_simple_counter
	    (&im->sw_if_counters[VNET_INTERFACE_COUNTER_DROP], sw_if_index);
	  vnet_interface_counter_unlock (im);
	}
      else
	{
	  hw_if_index = vnet_register_interface
	    (vnm, ipsec_gre_device_class.index, t - igm->tunnels,
	     ipsec_gre_hw_interface_class.index, t - igm->tunnels);
	  hi = vnet_get_hw_interface (vnm, hw_if_index);
	  sw_if_index = hi->sw_if_index;
	}

      t->hw_if_index = hw_if_index;
      t->sw_if_index = sw_if_index;
      t->local_sa_id = a->lsa;
      t->remote_sa_id = a->rsa;
      t->local_sa = ipsec_get_sa_index_by_sa_id (a->lsa);
      t->remote_sa = ipsec_get_sa_index_by_sa_id (a->rsa);

      ip4_sw_interface_enable_disable (sw_if_index, 1);

      vec_validate_init_empty (igm->tunnel_index_by_sw_if_index,
			       sw_if_index, ~0);
      igm->tunnel_index_by_sw_if_index[sw_if_index] = t - igm->tunnels;

      vec_validate (im->fib_index_by_sw_if_index, sw_if_index);

      hi->min_packet_bytes = 64 + sizeof (gre_header_t) +
	sizeof (ip4_header_t) + sizeof (esp_header_t) + sizeof (esp_footer_t);

      /* Standard default gre MTU. */
      /* TODO: Should take tunnel overhead into consideration */
      vnet_sw_interface_set_mtu (vnm, sw_if_index, 9000);

      clib_memcpy (&t->tunnel_src, &a->src, sizeof (t->tunnel_src));
      clib_memcpy (&t->tunnel_dst, &a->dst, sizeof (t->tunnel_dst));

      hash_set (igm->tunnel_by_key, key, t - igm->tunnels);

      slot = vlib_node_add_named_next_with_slot
	(vnm->vlib_main, hi->tx_node_index, "esp-encrypt",
	 IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT);

      ASSERT (slot == IPSEC_GRE_OUTPUT_NEXT_ESP_ENCRYPT);

    }
  else
    {				/* !is_add => delete */
      /* tunnel needs to exist */
      if (!p)
	return VNET_API_ERROR_NO_SUCH_ENTRY;

      t = pool_elt_at_index (igm->tunnels, p[0]);

      sw_if_index = t->sw_if_index;
      ip4_sw_interface_enable_disable (sw_if_index, 0);
      vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
      /* make sure tunnel is removed from l2 bd or xconnect */
      set_int_l2_mode (igm->vlib_main, vnm, MODE_L3, sw_if_index, 0,
		       L2_BD_PORT_TYPE_NORMAL, 0, 0);
      vec_add1 (igm->free_ipsec_gre_tunnel_hw_if_indices, t->hw_if_index);
      igm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;

      hash_unset (igm->tunnel_by_key, key);
      pool_put (igm->tunnels, t);
    }

  if (sw_if_indexp)
    *sw_if_indexp = sw_if_index;

  return ipsec_add_del_ipsec_gre_tunnel (vnm, &args);
}

static clib_error_t *
create_ipsec_gre_tunnel_command_fn (vlib_main_t * vm,
				    unformat_input_t * input,
				    vlib_cli_command_t * cmd)
{
  unformat_input_t _line_input, *line_input = &_line_input;
  u8 is_add = 1;
  u32 num_m_args = 0;
  ip4_address_t src, dst;
  u32 lsa = 0, rsa = 0;
  vnet_ipsec_gre_add_del_tunnel_args_t _a, *a = &_a;
  int rv;
  u32 sw_if_index;
  clib_error_t *error = NULL;

  /* Get a line of input. */
  if (!unformat_user (input, unformat_line_input, line_input))
    return 0;

  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
    {
      if (unformat (line_input, "del"))
	is_add = 0;
      else if (unformat (line_input, "src %U", unformat_ip4_address, &src))
	num_m_args++;
      else if (unformat (line_input, "dst %U", unformat_ip4_address, &dst))
	num_m_args++;
      else if (unformat (line_input, "local-sa %d", &lsa))
	num_m_args++;
      else if (unformat (line_input, "remote-sa %d", &rsa))
	num_m_args++;
      else
	{
	  error = clib_error_return (0, "unknown input `%U'",
				     format_unformat_error, line_input);
	  goto done;
	}
    }

  if (num_m_args < 4)
    {
      error = clib_error_return (0, "mandatory argument(s) missing");
      goto done;
    }

  if (memcmp (&src, &dst, sizeof (src)) == 0)
    {
      error = clib_error_return (0, "src and dst are identical");
      goto done;
    }

  memset (a, 0, sizeof (*a));
  a->is_add = is_add;
  a->lsa = lsa;
  a->rsa = rsa;
  clib_memcpy (&a->src, &src, sizeof (src));
  clib_memcpy (&a->dst, &dst, sizeof (dst));

  rv = vnet_ipsec_gre_add_del_tunnel (a, &sw_if_index);

  switch (rv)
    {
    case 0:
      vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name,
		       vnet_get_main (), sw_if_index);
      break;
    case VNET_API_ERROR_INVALID_VALUE:
      error = clib_error_return (0, "GRE tunnel already exists...");
      goto done;
    default:
      error = clib_error_return (0,
				 "vnet_ipsec_gre_add_del_tunnel returned %d",
				 rv);
      goto done;
    }

done:
  unformat_free (line_input);

  return error;
}

/* *INDENT-OFF* */
VLIB_CLI_COMMAND (create_ipsec_gre_tunnel_command, static) = {
  .path = "create ipsec gre tunnel",
  .short_help = "create ipsec gre tunnel src <addr> dst <addr> "
                "local-sa <id> remote-sa <id> [del]",
  .function = create_ipsec_gre_tunnel_command_fn,
};
/* *INDENT-ON* */

/*
* fd.io coding-style-patch-verification: ON
*
* Local Variables:
* eval: (c-set-style "gnu")
* End:
*/
pan class="p">(vlan->type); l2hdr_sz += sizeof (*vlan); if (ethertype == ETHERNET_TYPE_VLAN) { vlan++; ethertype = clib_net_to_host_u16 (vlan->type); l2hdr_sz += sizeof (*vlan); } } } vnet_buffer (b)->l2_hdr_offset = 0; vnet_buffer (b)->l3_hdr_offset = l2hdr_sz; if (ethertype == ETHERNET_TYPE_IP4) { ip4_header_t *ip4 = (ip4_header_t *) (b->data + l2hdr_sz); vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4); b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID); l4_proto = ip4->protocol; } else if (ethertype == ETHERNET_TYPE_IP6) { ip6_header_t *ip6 = (ip6_header_t *) (b->data + l2hdr_sz); b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID | VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID); u16 ip6_hdr_len = sizeof (ip6_header_t); if (ip6_ext_hdr (ip6->protocol)) { ip6_ext_header_t *p = (void *) (ip6 + 1); ip6_hdr_len += ip6_ext_header_len (p); while (ip6_ext_hdr (p->next_hdr)) { ip6_hdr_len += ip6_ext_header_len (p); p = ip6_ext_next_header (p); } l4_proto = p->next_hdr; } else l4_proto = ip6->protocol; vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len; } if (l4_proto == IP_PROTOCOL_TCP) { oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM; tcp_header_t *tcp = (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset); *l4_hdr_sz = tcp_header_bytes (tcp); } else if (l4_proto == IP_PROTOCOL_UDP) { oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM; *l4_hdr_sz = sizeof (udp_header_t); } if (oflags) vnet_buffer_offload_flags_set (b, oflags); } always_inline uword af_packet_v3_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, af_packet_if_t *apif, u16 queue_id, u8 is_cksum_gso_enabled) { af_packet_main_t *apm = &af_packet_main; af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id); tpacket3_hdr_t *tph; u32 next_index; u32 n_free_bufs; u32 n_rx_packets = 0; u32 n_rx_bytes = 0; u32 timedout_blk = 0; u32 total = 0; u32 *to_next = 0; u32 block = rx_queue->next_rx_block; u32 block_nr = rx_queue->rx_req->req3.tp_block_nr; u8 *block_start = 0; uword n_trace = vlib_get_trace_count (vm, node); u32 thread_index = vm->thread_index; u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); u32 min_bufs = rx_queue->rx_req->req3.tp_frame_size / n_buffer_bytes; u32 num_pkts = 0; u32 rx_frame_offset = 0; block_desc_t *bd = 0; vlib_buffer_t bt = {}; u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP); if (is_ip) next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; else { next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next_index = apif->per_interface_next_index; /* redirect if feature path enabled */ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt); } if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block])) ->hdr.bh1.block_status & TP_STATUS_USER) != 0) { u32 n_required = 0; bd = (block_desc_t *) block_start; if (PREDICT_FALSE (rx_queue->is_rx_pending)) { num_pkts = rx_queue->num_rx_pkts; rx_frame_offset = rx_queue->rx_frame_offset; rx_queue->is_rx_pending = 0; } else { num_pkts = bd->hdr.bh1.num_pkts; rx_frame_offset = sizeof (block_desc_t); total++; if (TP_STATUS_BLK_TMO & bd->hdr.bh1.block_status) timedout_blk++; } n_required = clib_max (num_pkts, VLIB_FRAME_SIZE); n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < n_required)) { vec_validate (apm->rx_buffers[thread_index], n_required + n_free_bufs - 1); n_free_bufs += vlib_buffer_alloc ( vm, &apm->rx_buffers[thread_index][n_free_bufs], n_required); vec_set_len (apm->rx_buffers[thread_index], n_free_bufs); } while (num_pkts && (n_free_bufs >= min_bufs)) { u32 next0 = next_index; u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while (num_pkts && n_left_to_next && (n_free_bufs >= min_bufs)) { tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset); if (num_pkts > 1) CLIB_PREFETCH (block_start + rx_frame_offset + tph->tp_next_offset, 2 * CLIB_CACHE_LINE_BYTES, LOAD); vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0; vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 data_len = tph->tp_snaplen; u32 offset = 0; u32 bi0 = ~0, first_bi0 = ~0; u8 l4_hdr_sz = 0; if (is_cksum_gso_enabled) vnet_hdr = (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac - sizeof (vnet_virtio_net_hdr_t)); // save current state and return if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) > vec_len (apm->rx_buffers[thread_index]))) { rx_queue->rx_frame_offset = rx_frame_offset; rx_queue->num_rx_pkts = num_pkts; rx_queue->is_rx_pending = 1; vlib_put_next_frame (vm, node, next_index, n_left_to_next); goto done; } while (data_len) { /* grab free buffer */ u32 last_empty_buffer = vec_len (apm->rx_buffers[thread_index]) - 1; bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; vec_set_len (apm->rx_buffers[thread_index], last_empty_buffer); n_free_bufs--; /* copy data */ u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len; u32 vlan_len = 0; u32 bytes_copied = 0; b0 = vlib_get_buffer (vm, bi0); b0->current_data = 0; /* Kernel removes VLAN headers, so reconstruct VLAN */ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID)) { if (PREDICT_TRUE (offset == 0)) { clib_memcpy_fast (vlib_buffer_get_current (b0), (u8 *) tph + tph->tp_mac, sizeof (ethernet_header_t)); ethernet_header_t *eth = vlib_buffer_get_current (b0); ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1); vlan->priority_cfi_and_id = clib_host_to_net_u16 (tph->hv1.tp_vlan_tci); vlan->type = eth->type; eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN); vlan_len = sizeof (ethernet_vlan_header_t); bytes_copied = sizeof (ethernet_header_t); } } clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) + bytes_copied + vlan_len, (u8 *) tph + tph->tp_mac + offset + bytes_copied, (bytes_to_copy - bytes_copied)); /* fill buffer header */ b0->current_length = bytes_to_copy + vlan_len; if (offset == 0) { b0->total_length_not_including_first_buffer = 0; b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index; vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0; first_b0 = b0; first_bi0 = bi0; if (is_cksum_gso_enabled) { if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip); if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 | VIRTIO_NET_HDR_GSO_TCPV6)) fill_gso_offload (first_b0, vnet_hdr->gso_size, l4_hdr_sz); } } else buffer_add_to_chain (b0, first_b0, prev_b0, bi0); prev_b0 = b0; offset += bytes_to_copy; data_len -= bytes_to_copy; } n_rx_packets++; n_rx_bytes += tph->tp_snaplen; to_next[0] = first_bi0; to_next += 1; n_left_to_next--; /* drop partial packets */ if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen)) { next0 = VNET_DEVICE_INPUT_NEXT_DROP; first_b0->error = node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT]; } else { if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP)) { switch (first_b0->data[0] & 0xf0) { case 0x40: next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; break; case 0x60: next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT; break; default: next0 = VNET_DEVICE_INPUT_NEXT_DROP; break; } if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next0 = apif->per_interface_next_index; } else { /* copy feature arc data from template */ first_b0->current_config_index = bt.current_config_index; vnet_buffer (first_b0)->feature_arc_index = vnet_buffer (&bt)->feature_arc_index; } } /* trace */ if (PREDICT_FALSE (n_trace > 0 && vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */ 0))) { af_packet_input_trace_t *tr; vlib_set_trace_count (vm, node, --n_trace); tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr)); tr->is_v3 = 1; tr->next_index = next0; tr->hw_if_index = apif->hw_if_index; tr->queue_id = queue_id; tr->block = block; tr->block_start = bd; tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts; clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t)); clib_memcpy_fast (&tr->tph3, tph, sizeof (tpacket3_hdr_t)); if (is_cksum_gso_enabled) clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr, sizeof (vnet_virtio_net_hdr_t)); else clib_memset_u8 (&tr->vnet_hdr, 0, sizeof (vnet_virtio_net_hdr_t)); } /* enque and take next packet */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, first_bi0, next0); /* next packet */ num_pkts--; rx_frame_offset += tph->tp_next_offset; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } if (PREDICT_TRUE (num_pkts == 0)) { bd->hdr.bh1.block_status = TP_STATUS_KERNEL; block = (block + 1) % block_nr; } else { rx_queue->rx_frame_offset = rx_frame_offset; rx_queue->num_rx_pkts = num_pkts; rx_queue->is_rx_pending = 1; } } rx_queue->next_rx_block = block; done: if (apm->polling_count == 0) { if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block])) ->hdr.bh1.block_status & TP_STATUS_USER) != 0) vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_POLLING); else vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_INTERRUPT); } vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TOTAL_RECV_BLK, total); vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TIMEDOUT_BLK, timedout_blk); vlib_increment_combined_counter (vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes); vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } always_inline uword af_packet_v2_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, af_packet_if_t *apif, u16 queue_id, u8 is_cksum_gso_enabled) { af_packet_main_t *apm = &af_packet_main; af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id); tpacket2_hdr_t *tph; u32 next_index; u32 block = 0; u32 rx_frame; u32 n_free_bufs; u32 n_rx_packets = 0; u32 n_rx_bytes = 0; u32 *to_next = 0; u32 frame_size = rx_queue->rx_req->req.tp_frame_size; u32 frame_num = rx_queue->rx_req->req.tp_frame_nr; u8 *block_start = rx_queue->rx_ring[block]; uword n_trace = vlib_get_trace_count (vm, node); u32 thread_index = vm->thread_index; u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); u32 min_bufs = rx_queue->rx_req->req.tp_frame_size / n_buffer_bytes; u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP); vlib_buffer_t bt = {}; if (is_ip) { next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; } else { next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next_index = apif->per_interface_next_index; /* redirect if feature path enabled */ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt); } n_free_bufs = vec_len (apm->rx_buffers[thread_index]); if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE)) { vec_validate (apm->rx_buffers[thread_index], VLIB_FRAME_SIZE + n_free_bufs - 1); n_free_bufs += vlib_buffer_alloc ( vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE); vec_set_len (apm->rx_buffers[thread_index], n_free_bufs); } rx_frame = rx_queue->next_rx_frame; tph = (tpacket2_hdr_t *) (block_start + rx_frame * frame_size); while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs)) { vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0; u32 next0 = next_index; u32 n_left_to_next; vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) && n_left_to_next) { vnet_virtio_net_hdr_t *vnet_hdr = 0; u32 data_len = tph->tp_snaplen; u32 offset = 0; u32 bi0 = 0, first_bi0 = 0; u8 l4_hdr_sz = 0; if (is_cksum_gso_enabled) vnet_hdr = (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac - sizeof (vnet_virtio_net_hdr_t)); while (data_len) { /* grab free buffer */ u32 last_empty_buffer = vec_len (apm->rx_buffers[thread_index]) - 1; bi0 = apm->rx_buffers[thread_index][last_empty_buffer]; b0 = vlib_get_buffer (vm, bi0); vec_set_len (apm->rx_buffers[thread_index], last_empty_buffer); n_free_bufs--; /* copy data */ u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len; u32 vlan_len = 0; u32 bytes_copied = 0; b0->current_data = 0; /* Kernel removes VLAN headers, so reconstruct VLAN */ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID)) { if (PREDICT_TRUE (offset == 0)) { clib_memcpy_fast (vlib_buffer_get_current (b0), (u8 *) tph + tph->tp_mac, sizeof (ethernet_header_t)); ethernet_header_t *eth = vlib_buffer_get_current (b0); ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1); vlan->priority_cfi_and_id = clib_host_to_net_u16 (tph->tp_vlan_tci); vlan->type = eth->type; eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN); vlan_len = sizeof (ethernet_vlan_header_t); bytes_copied = sizeof (ethernet_header_t); } } clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) + bytes_copied + vlan_len, (u8 *) tph + tph->tp_mac + offset + bytes_copied, (bytes_to_copy - bytes_copied)); /* fill buffer header */ b0->current_length = bytes_to_copy + vlan_len; if (offset == 0) { b0->total_length_not_including_first_buffer = 0; b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID; vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index; vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0; first_bi0 = bi0; first_b0 = vlib_get_buffer (vm, first_bi0); if (is_cksum_gso_enabled) { if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip); if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 | VIRTIO_NET_HDR_GSO_TCPV6)) fill_gso_offload (first_b0, vnet_hdr->gso_size, l4_hdr_sz); } } else buffer_add_to_chain (b0, first_b0, prev_b0, bi0); prev_b0 = b0; offset += bytes_to_copy; data_len -= bytes_to_copy; } n_rx_packets++; n_rx_bytes += tph->tp_snaplen; to_next[0] = first_bi0; to_next += 1; n_left_to_next--; /* drop partial packets */ if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen)) { next0 = VNET_DEVICE_INPUT_NEXT_DROP; first_b0->error = node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT]; } else { if (PREDICT_FALSE (is_ip)) { switch (first_b0->data[0] & 0xf0) { case 0x40: next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT; break; case 0x60: next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT; break; default: next0 = VNET_DEVICE_INPUT_NEXT_DROP; break; } if (PREDICT_FALSE (apif->per_interface_next_index != ~0)) next0 = apif->per_interface_next_index; } else { /* copy feature arc data from template */ first_b0->current_config_index = bt.current_config_index; vnet_buffer (first_b0)->feature_arc_index = vnet_buffer (&bt)->feature_arc_index; } } /* trace */ if (PREDICT_FALSE (n_trace > 0 && vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */ 0))) { af_packet_input_trace_t *tr; vlib_set_trace_count (vm, node, --n_trace); tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr)); tr->is_v3 = 0; tr->next_index = next0; tr->hw_if_index = apif->hw_if_index; tr->queue_id = queue_id; clib_memcpy_fast (&tr->tph2, tph, sizeof (struct tpacket2_hdr)); if (is_cksum_gso_enabled) clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr, sizeof (vnet_virtio_net_hdr_t)); else clib_memset_u8 (&tr->vnet_hdr, 0, sizeof (vnet_virtio_net_hdr_t)); } /* enque and take next packet */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, first_bi0, next0); /* next packet */ tph->tp_status = TP_STATUS_KERNEL; rx_frame = (rx_frame + 1) % frame_num; tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size); } vlib_put_next_frame (vm, node, next_index, n_left_to_next); } rx_queue->next_rx_frame = rx_frame; vlib_increment_combined_counter ( vnet_get_main ()->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes); vnet_device_increment_rx_packets (thread_index, n_rx_packets); return n_rx_packets; } always_inline uword af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, af_packet_if_t *apif, u16 queue_id, u8 is_cksum_gso_enabled) { if (apif->version == TPACKET_V3) return af_packet_v3_device_input_fn (vm, node, frame, apif, queue_id, is_cksum_gso_enabled); else return af_packet_v2_device_input_fn (vm, node, frame, apif, queue_id, is_cksum_gso_enabled); } VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { u32 n_rx_packets = 0; af_packet_main_t *apm = &af_packet_main; vnet_hw_if_rxq_poll_vector_t *pv; pv = vnet_hw_if_get_rxq_poll_vector (vm, node); for (int i = 0; i < vec_len (pv); i++) { af_packet_if_t *apif; apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance); if (apif->is_admin_up) { if (apif->is_cksum_gso_enabled) n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif, pv[i].queue_id, 1); else n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif, pv[i].queue_id, 0); } } return n_rx_packets; } VLIB_REGISTER_NODE (af_packet_input_node) = { .name = "af-packet-input", .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, .sibling_of = "device-input", .format_trace = format_af_packet_input_trace, .type = VLIB_NODE_TYPE_INPUT, .state = VLIB_NODE_STATE_INTERRUPT, .n_errors = AF_PACKET_INPUT_N_ERROR, .error_strings = af_packet_input_error_strings, }; /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */