aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/ioam/lib-vxlan-gpe/vxlan_gpe_ioam_trace.c
blob: c51d251bee3188498b13feebaa112bb81ae06e6c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
/*
 * Copyright (c) 2016 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vppinfra/error.h>

#include <vnet/vxlan-gpe/vxlan_gpe.h>
#include <vnet/vxlan-gpe/vxlan_gpe_packet.h>

#include <vppinfra/hash.h>
#include <vppinfra/error.h>
#include <vppinfra/elog.h>

#include <ioam/lib-trace/trace_util.h>
#include <ioam/lib-trace/trace_config.h>
#include <ioam/lib-vxlan-gpe/vxlan_gpe_ioam.h>

/* Timestamp precision multipliers for seconds, milliseconds, microseconds
 * and nanoseconds respectively.
 */
static f64 trace_tsp_mul[4] = { 1, 1e3, 1e6, 1e9 };

typedef union
{
  u64 as_u64;
  u32 as_u32[2];
} time_u64_t;


/* *INDENT-OFF* */
typedef CLIB_PACKED(struct {
  vxlan_gpe_ioam_option_t hdr;
  u8 ioam_trace_type;
  u8 data_list_elts_left;
  u32 elts[0]; /* Variable type. So keep it generic */
}) vxlan_gpe_ioam_trace_option_t;
/* *INDENT-ON* */


#define foreach_vxlan_gpe_ioam_trace_stats				\
  _(SUCCESS, "Pkts updated with TRACE records")					\
  _(FAILED, "Errors in TRACE due to lack of TRACE records")

static char *vxlan_gpe_ioam_trace_stats_strings[] = {
#define _(sym,string) string,
  foreach_vxlan_gpe_ioam_trace_stats
#undef _
};

typedef enum
{
#define _(sym,str) VXLAN_GPE_IOAM_TRACE_##sym,
  foreach_vxlan_gpe_ioam_trace_stats
#undef _
    VXLAN_GPE_IOAM_TRACE_N_STATS,
} vxlan_gpe_ioam_trace_stats_t;


typedef struct
{
  /* stats */
  u64 counters[ARRAY_LEN (vxlan_gpe_ioam_trace_stats_strings)];

  /* convenience */
  vlib_main_t *vlib_main;
  vnet_main_t *vnet_main;
} vxlan_gpe_ioam_trace_main_t;

vxlan_gpe_ioam_trace_main_t vxlan_gpe_ioam_trace_main;

int
vxlan_gpe_ioam_add_register_option (u8 option,
				    u8 size,
				    int rewrite_options (u8 * rewrite_string,
							 u8 * rewrite_size))
{
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;

  ASSERT ((u32) option < ARRAY_LEN (hm->add_options));

  /* Already registered */
  if (hm->add_options[option])
    return (-1);

  hm->add_options[option] = rewrite_options;
  hm->options_size[option] = size;

  return (0);
}

int
vxlan_gpe_add_unregister_option (u8 option)
{
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;

  ASSERT ((u32) option < ARRAY_LEN (hm->add_options));

  /* Not registered */
  if (!hm->add_options[option])
    return (-1);

  hm->add_options[option] = NULL;
  hm->options_size[option] = 0;
  return (0);
}


int
vxlan_gpe_ioam_register_option (u8 option,
				int options (vlib_buffer_t * b,
					     vxlan_gpe_ioam_option_t * opt,
					     u8 is_ipv4, u8 use_adj),
				u8 * trace (u8 * s,
					    vxlan_gpe_ioam_option_t * opt))
{
  vxlan_gpe_ioam_main_t *im = &vxlan_gpe_ioam_main;

  ASSERT ((u32) option < ARRAY_LEN (im->options));

  /* Already registered */
  if (im->options[option])
    return (-1);

  im->options[option] = options;
  im->trace[option] = trace;

  return (0);
}

int
vxlan_gpe_ioam_unregister_option (u8 option)
{
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;

  ASSERT ((u32) option < ARRAY_LEN (hm->options));

  /* Not registered */
  if (!hm->options[option])
    return (-1);

  hm->options[option] = NULL;
  hm->trace[option] = NULL;

  return (0);
}


always_inline void
vxlan_gpe_ioam_trace_stats_increment_counter (u32 counter_index,
					      u64 increment)
{
  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;

  hm->counters[counter_index] += increment;
}


static u8 *
format_ioam_data_list_element (u8 * s, va_list * args)
{
  u32 *elt = va_arg (*args, u32 *);
  u8 *trace_type_p = va_arg (*args, u8 *);
  u8 trace_type = *trace_type_p;


  if (trace_type & BIT_TTL_NODEID)
    {
      u32 ttl_node_id_host_byte_order = clib_net_to_host_u32 (*elt);
      s = format (s, "ttl 0x%x node id 0x%x ",
		  ttl_node_id_host_byte_order >> 24,
		  ttl_node_id_host_byte_order & 0x00FFFFFF);

      elt++;
    }

  if (trace_type & BIT_ING_INTERFACE && trace_type & BIT_ING_INTERFACE)
    {
      u32 ingress_host_byte_order = clib_net_to_host_u32 (*elt);
      s = format (s, "ingress 0x%x egress 0x%x ",
		  ingress_host_byte_order >> 16,
		  ingress_host_byte_order & 0xFFFF);
      elt++;
    }

  if (trace_type & BIT_TIMESTAMP)
    {
      u32 ts_in_host_byte_order = clib_net_to_host_u32 (*elt);
      s = format (s, "ts 0x%x \n", ts_in_host_byte_order);
      elt++;
    }

  if (trace_type & BIT_APPDATA)
    {
      u32 appdata_in_host_byte_order = clib_net_to_host_u32 (*elt);
      s = format (s, "app 0x%x ", appdata_in_host_byte_order);
      elt++;
    }

  return s;
}



int
vxlan_gpe_ioam_trace_rewrite_handler (u8 * rewrite_string, u8 * rewrite_size)
{
  vxlan_gpe_ioam_trace_option_t *trace_option = NULL;
  u8 trace_data_size = 0;
  u8 trace_option_elts = 0;
  trace_profile *profile = NULL;


  profile = trace_profile_find ();

  if (PREDICT_FALSE (!profile))
    {
      return (-1);
    }

  if (PREDICT_FALSE (!rewrite_string))
    return -1;

  trace_option_elts = profile->num_elts;
  trace_data_size = fetch_trace_data_size (profile->trace_type);
  trace_option = (vxlan_gpe_ioam_trace_option_t *) rewrite_string;
  trace_option->hdr.type = VXLAN_GPE_OPTION_TYPE_IOAM_TRACE;
  trace_option->hdr.length = 2 /*ioam_trace_type,data_list_elts_left */  +
    trace_option_elts * trace_data_size;
  trace_option->ioam_trace_type = profile->trace_type & TRACE_TYPE_MASK;
  trace_option->data_list_elts_left = trace_option_elts;
  *rewrite_size =
    sizeof (vxlan_gpe_ioam_trace_option_t) +
    (trace_option_elts * trace_data_size);

  return 0;
}


int
vxlan_gpe_ioam_trace_data_list_handler (vlib_buffer_t * b,
					vxlan_gpe_ioam_option_t * opt,
					u8 is_ipv4, u8 use_adj)
{
  u8 elt_index = 0;
  vxlan_gpe_ioam_trace_option_t *trace =
    (vxlan_gpe_ioam_trace_option_t *) opt;
  time_u64_t time_u64;
  u32 *elt;
  int rv = 0;
  trace_profile *profile = NULL;
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;


  profile = trace_profile_find ();

  if (PREDICT_FALSE (!profile))
    {
      return (-1);
    }


  time_u64.as_u64 = 0;

  if (PREDICT_TRUE (trace->data_list_elts_left))
    {
      trace->data_list_elts_left--;
      /* fetch_trace_data_size returns in bytes. Convert it to 4-bytes
       * to skip to this node's location.
       */
      elt_index =
	trace->data_list_elts_left *
	fetch_trace_data_size (trace->ioam_trace_type) / 4;
      elt = &trace->elts[elt_index];
      if (is_ipv4)
	{
	  if (trace->ioam_trace_type & BIT_TTL_NODEID)
	    {
	      ip4_header_t *ip0 = vlib_buffer_get_current (b);
	      /* The transit case is the only case where the TTL decrement happens
	       * before iOAM processing. For now, use the use_adj flag as an overload.
	       * We can probably use a separate flag instead of overloading the use_adj flag.
	       */
	      *elt = clib_host_to_net_u32 (((ip0->ttl - 1 + use_adj) << 24) |
					   profile->node_id);
	      elt++;
	    }

	  if (trace->ioam_trace_type & BIT_ING_INTERFACE)
	    {
	      u16 tx_if = 0;
	      u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];

	      if (use_adj)
		{
		  ip_adjacency_t *adj = adj_get (adj_index);
		  tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
		}

	      *elt =
		(vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
		tx_if;
	      *elt = clib_host_to_net_u32 (*elt);
	      elt++;
	    }
	}
      else
	{
	  if (trace->ioam_trace_type & BIT_TTL_NODEID)
	    {
	      ip6_header_t *ip0 = vlib_buffer_get_current (b);
	      *elt = clib_host_to_net_u32 ((ip0->hop_limit << 24) |
					   profile->node_id);
	      elt++;
	    }
	  if (trace->ioam_trace_type & BIT_ING_INTERFACE)
	    {
	      u16 tx_if = 0;
	      u32 adj_index = vnet_buffer (b)->ip.adj_index[VLIB_TX];

	      if (use_adj)
		{
		  ip_adjacency_t *adj = adj_get (adj_index);
		  tx_if = adj->rewrite_header.sw_if_index & 0xFFFF;
		}

	      *elt =
		(vnet_buffer (b)->sw_if_index[VLIB_RX] & 0xFFFF) << 16 |
		tx_if;
	      *elt = clib_host_to_net_u32 (*elt);
	      elt++;
	    }
	}

      if (trace->ioam_trace_type & BIT_TIMESTAMP)
	{
	  /* Send least significant 32 bits */
	  f64 time_f64 =
	    (f64) (((f64) hm->unix_time_0) +
		   (vlib_time_now (hm->vlib_main) - hm->vlib_time_0));

	  time_u64.as_u64 = time_f64 * trace_tsp_mul[profile->trace_tsp];
	  *elt = clib_host_to_net_u32 (time_u64.as_u32[0]);
	  elt++;
	}

      if (trace->ioam_trace_type & BIT_APPDATA)
	{
	  /* $$$ set elt0->app_data */
	  *elt = clib_host_to_net_u32 (profile->app_data);
	  elt++;
	}
      vxlan_gpe_ioam_trace_stats_increment_counter
	(VXLAN_GPE_IOAM_TRACE_SUCCESS, 1);
    }
  else
    {
      vxlan_gpe_ioam_trace_stats_increment_counter
	(VXLAN_GPE_IOAM_TRACE_FAILED, 1);
    }
  return (rv);
}

u8 *
vxlan_gpe_ioam_trace_data_list_trace_handler (u8 * s,
					      vxlan_gpe_ioam_option_t * opt)
{
  vxlan_gpe_ioam_trace_option_t *trace;
  u8 trace_data_size_in_words = 0;
  u32 *elt;
  int elt_index = 0;

  trace = (vxlan_gpe_ioam_trace_option_t *) opt;
  s =
    format (s, "  Trace Type 0x%x , %d elts left\n", trace->ioam_trace_type,
	    trace->data_list_elts_left);
  trace_data_size_in_words =
    fetch_trace_data_size (trace->ioam_trace_type) / 4;
  elt = &trace->elts[0];
  while ((u8 *) elt < ((u8 *) (&trace->elts[0]) + trace->hdr.length - 2
		       /* -2 accounts for ioam_trace_type,elts_left */ ))
    {
      s = format (s, "    [%d] %U\n", elt_index,
		  format_ioam_data_list_element,
		  elt, &trace->ioam_trace_type);
      elt_index++;
      elt += trace_data_size_in_words;
    }
  return (s);
}


static clib_error_t *
vxlan_gpe_show_ioam_trace_cmd_fn (vlib_main_t * vm,
				  unformat_input_t * input,
				  vlib_cli_command_t * cmd)
{
  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;
  u8 *s = 0;
  int i = 0;

  for (i = 0; i < VXLAN_GPE_IOAM_TRACE_N_STATS; i++)
    {
      s = format (s, " %s - %lu\n", vxlan_gpe_ioam_trace_stats_strings[i],
		  hm->counters[i]);
    }

  vlib_cli_output (vm, "%v", s);
  vec_free (s);
  return 0;
}


/* *INDENT-OFF* */
VLIB_CLI_COMMAND (vxlan_gpe_show_ioam_trace_cmd, static) = {
  .path = "show ioam vxlan-gpe trace",
  .short_help = "iOAM trace statistics",
  .function = vxlan_gpe_show_ioam_trace_cmd_fn,
};
/* *INDENT-ON* */


static clib_error_t *
vxlan_gpe_ioam_trace_init (vlib_main_t * vm)
{
  vxlan_gpe_ioam_trace_main_t *hm = &vxlan_gpe_ioam_trace_main;

  hm->vlib_main = vm;
  hm->vnet_main = vnet_get_main ();
  clib_memset (hm->counters, 0, sizeof (hm->counters));

  if (vxlan_gpe_ioam_register_option
      (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
       vxlan_gpe_ioam_trace_data_list_handler,
       vxlan_gpe_ioam_trace_data_list_trace_handler) < 0)
    return (clib_error_create
	    ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE failed"));


  if (vxlan_gpe_ioam_add_register_option
      (VXLAN_GPE_OPTION_TYPE_IOAM_TRACE,
       sizeof (vxlan_gpe_ioam_trace_option_t),
       vxlan_gpe_ioam_trace_rewrite_handler) < 0)
    return (clib_error_create
	    ("registration of VXLAN_GPE_OPTION_TYPE_IOAM_TRACE for rewrite failed"));


  return (0);
}

/* *INDENT-OFF* */
VLIB_INIT_FUNCTION (vxlan_gpe_ioam_trace_init) =
{
  .runs_after = VLIB_INITS("ip_main_init", "ip6_lookup_init",
                           "vxlan_gpe_init"),
};
/* *INDENT-ON* */


int
vxlan_gpe_trace_profile_cleanup (void)
{
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;

  hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = 0;

  return 0;

}

static int
vxlan_gpe_ioam_trace_get_sizeof_handler (u32 * result)
{
  u16 size = 0;
  u8 trace_data_size = 0;
  trace_profile *profile = NULL;

  *result = 0;

  profile = trace_profile_find ();

  if (PREDICT_FALSE (!profile))
    {
      return (-1);
    }

  trace_data_size = fetch_trace_data_size (profile->trace_type);
  if (PREDICT_FALSE (trace_data_size == 0))
    return VNET_API_ERROR_INVALID_VALUE;

  if (PREDICT_FALSE (profile->num_elts * trace_data_size > 254))
    return VNET_API_ERROR_INVALID_VALUE;

  size +=
    sizeof (vxlan_gpe_ioam_trace_option_t) +
    profile->num_elts * trace_data_size;
  *result = size;

  return 0;
}


int
vxlan_gpe_trace_profile_setup (void)
{
  u32 trace_size = 0;
  vxlan_gpe_ioam_main_t *hm = &vxlan_gpe_ioam_main;

  trace_profile *profile = NULL;


  profile = trace_profile_find ();

  if (PREDICT_FALSE (!profile))
    {
      return (-1);
    }


  if (vxlan_gpe_ioam_trace_get_sizeof_handler (&trace_size) < 0)
    return (-1);

  hm->options_size[VXLAN_GPE_OPTION_TYPE_IOAM_TRACE] = trace_size;

  return (0);
}



/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
lor: #a6e22e } /* Name.Attribute */ .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ .highlight .nc { color: #a6e22e } /* Name.Class */ .highlight .no { color: #66d9ef } /* Name.Constant */ .highlight .nd { color: #a6e22e } /* Name.Decorator */ .highlight .ni { color: #f8f8f2 } /* Name.Entity */ .highlight .ne { color: #a6e22e } /* Name.Exception */ .highlight .nf { color: #a6e22e } /* Name.Function */ .highlight .nl { color: #f8f8f2 } /* Name.Label */ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ .highlight .nx { color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * ethernet_node.c: ethernet packet processing
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#include <vlib/vlib.h>
#include <vnet/pg/pg.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ethernet/p2p_ethernet.h>
#include <vnet/devices/pipe/pipe.h>
#include <vppinfra/sparse_vec.h>
#include <vnet/l2/l2_bvi.h>
#include <vnet/classify/trace_classify.h>

#define foreach_ethernet_input_next		\
  _ (PUNT, "error-punt")			\
  _ (DROP, "error-drop")			\
  _ (LLC, "llc-input")				\
  _ (IP4_INPUT, "ip4-input")			\
  _ (IP4_INPUT_NCS, "ip4-input-no-checksum")

typedef enum
{
#define _(s,n) ETHERNET_INPUT_NEXT_##s,
  foreach_ethernet_input_next
#undef _
    ETHERNET_INPUT_N_NEXT,
} ethernet_input_next_t;

typedef struct
{
  u8 packet_data[32];
  u16 frame_flags;
  ethernet_input_frame_t frame_data;
} ethernet_input_trace_t;

static u8 *
format_ethernet_input_trace (u8 * s, va_list * va)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
  ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
  u32 indent = format_get_indent (s);

  if (t->frame_flags)
    {
      s = format (s, "frame: flags 0x%x", t->frame_flags);
      if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
	s = format (s, ", hw-if-index %u, sw-if-index %u",
		    t->frame_data.hw_if_index, t->frame_data.sw_if_index);
      s = format (s, "\n%U", format_white_space, indent);
    }
  s = format (s, "%U", format_ethernet_header, t->packet_data);

  return s;
}

extern vlib_node_registration_t ethernet_input_node;

typedef enum
{
  ETHERNET_INPUT_VARIANT_ETHERNET,
  ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
  ETHERNET_INPUT_VARIANT_NOT_L2,
} ethernet_input_variant_t;


// Parse the ethernet header to extract vlan tags and innermost ethertype
static_always_inline void
parse_header (ethernet_input_variant_t variant,
	      vlib_buffer_t * b0,
	      u16 * type,
	      u16 * orig_type,
	      u16 * outer_id, u16 * inner_id, u32 * match_flags)
{
  u8 vlan_count;

  if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
      || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
    {
      ethernet_header_t *e0;

      e0 = (void *) (b0->data + b0->current_data);

      vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
      b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;

      vlib_buffer_advance (b0, sizeof (e0[0]));

      *type = clib_net_to_host_u16 (e0->type);
    }
  else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
    {
      // here when prior node was LLC/SNAP processing
      u16 *e0;

      e0 = (void *) (b0->data + b0->current_data);

      vlib_buffer_advance (b0, sizeof (e0[0]));

      *type = clib_net_to_host_u16 (e0[0]);
    }

  // save for distinguishing between dot1q and dot1ad later
  *orig_type = *type;

  // default the tags to 0 (used if there is no corresponding tag)
  *outer_id = 0;
  *inner_id = 0;

  *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
  vlan_count = 0;

  // check for vlan encaps
  if (ethernet_frame_is_tagged (*type))
    {
      ethernet_vlan_header_t *h0;
      u16 tag;

      *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;

      h0 = (void *) (b0->data + b0->current_data);

      tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);

      *outer_id = tag & 0xfff;
      if (0 == *outer_id)
	*match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;

      *type = clib_net_to_host_u16 (h0->type);

      vlib_buffer_advance (b0, sizeof (h0[0]));
      vlan_count = 1;

      if (*type == ETHERNET_TYPE_VLAN)
	{
	  // Double tagged packet
	  *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;

	  h0 = (void *) (b0->data + b0->current_data);

	  tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);

	  *inner_id = tag & 0xfff;

	  *type = clib_net_to_host_u16 (h0->type);

	  vlib_buffer_advance (b0, sizeof (h0[0]));
	  vlan_count = 2;
	  if (*type == ETHERNET_TYPE_VLAN)
	    {
	      // More than double tagged packet
	      *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;

	      vlib_buffer_advance (b0, sizeof (h0[0]));
	      vlan_count = 3;	// "unknown" number, aka, 3-or-more
	    }
	}
    }
  ethernet_buffer_set_vlan_count (b0, vlan_count);
}

static_always_inline void
ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
				  u64 * dmacs, u8 * dmacs_bad,
				  u32 n_packets, ethernet_interface_t * ei,
				  u8 have_sec_dmac);

// Determine the subinterface for this packet, given the result of the
// vlan table lookups and vlan header parsing. Check the most specific
// matches first.
static_always_inline void
identify_subint (vnet_hw_interface_t * hi,
		 vlib_buffer_t * b0,
		 u32 match_flags,
		 main_intf_t * main_intf,
		 vlan_intf_t * vlan_intf,
		 qinq_intf_t * qinq_intf,
		 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
{
  u32 matched;

  matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
				 qinq_intf, new_sw_if_index, error0, is_l2);

  if (matched)
    {
      // Perform L3 my-mac filter
      // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
      // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
      if (!(*is_l2))
	{
	  u64 dmacs[2];
	  u8 dmacs_bad[2];
	  ethernet_header_t *e0;
	  ethernet_interface_t *ei0;

	  e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
	  dmacs[0] = *(u64 *) e0;
	  ei0 = ethernet_get_interface (&ethernet_main, hi->hw_if_index);

	  if (ei0 && vec_len (ei0->secondary_addrs))
	    ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
					      1 /* n_packets */ , ei0,
					      1 /* have_sec_dmac */ );
	  else
	    ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
					      1 /* n_packets */ , ei0,
					      0 /* have_sec_dmac */ );

	  if (dmacs_bad[0])
	    *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
	}

      // Check for down subinterface
      *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
    }
}

static_always_inline void
determine_next_node (ethernet_main_t * em,
		     ethernet_input_variant_t variant,
		     u32 is_l20,
		     u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
{
  vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
  b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;

  if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
    {
      // some error occurred
      *next0 = ETHERNET_INPUT_NEXT_DROP;
    }
  else if (is_l20)
    {
      // record the L2 len and reset the buffer so the L2 header is preserved
      u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
      vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
      *next0 = em->l2_next;
      ASSERT (vnet_buffer (b0)->l2.l2_len ==
	      ethernet_buffer_header_size (b0));
      vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));

      // check for common IP/MPLS ethertypes
    }
  else if (type0 == ETHERNET_TYPE_IP4)
    {
      *next0 = em->l3_next.input_next_ip4;
    }
  else if (type0 == ETHERNET_TYPE_IP6)
    {
      *next0 = em->l3_next.input_next_ip6;
    }
  else if (type0 == ETHERNET_TYPE_MPLS)
    {
      *next0 = em->l3_next.input_next_mpls;

    }
  else if (em->redirect_l3)
    {
      // L3 Redirect is on, the cached common next nodes will be
      // pointing to the redirect node, catch the uncommon types here
      *next0 = em->redirect_l3_next;
    }
  else
    {
      // uncommon ethertype, check table
      u32 i0;
      i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
      *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
      *error0 =
	i0 ==
	SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;

      // The table is not populated with LLC values, so check that now.
      // If variant is variant_ethernet then we came from LLC processing. Don't
      // go back there; drop instead using by keeping the drop/bad table result.
      if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
	{
	  *next0 = ETHERNET_INPUT_NEXT_LLC;
	}
    }
}


/* following vector code relies on following assumptions */
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
	       STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
	       "l3_hdr_offset must follow l2_hdr_offset");

static_always_inline void
eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
{
  i16 adv = sizeof (ethernet_header_t);
  u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
    VNET_BUFFER_F_L3_HDR_OFFSET_VALID;

#ifdef CLIB_HAVE_VEC256
  /* to reduce number of small loads/stores we are loading first 64 bits
     of each buffer metadata into 256-bit register so we can advance
     current_data, current_length and flags.
     Observed saving of this code is ~2 clocks per packet */
  u64x4 r, radv;

  /* vector if signed 16 bit integers used in signed vector add operation
     to advnce current_data and current_length */
  u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
  i16x16 adv4 = {
    adv, -adv, 0, 0, adv, -adv, 0, 0,
    adv, -adv, 0, 0, adv, -adv, 0, 0
  };

  /* load 4 x 64 bits */
  r = u64x4_gather (b[0], b[1], b[2], b[3]);

  /* set flags */
  r |= (u64x4) flags4;

  /* advance buffer */
  radv = (u64x4) ((i16x16) r + adv4);

  /* write 4 x 64 bits */
  u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);

  /* use old current_data as l2_hdr_offset and new current_data as
     l3_hdr_offset */
  r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);

  /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
  u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
  u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
  u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
  u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);

  if (is_l3)
    {
      ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
      ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
      ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
      ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);

      ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
      ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
      ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
      ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
    }
  else
    {
      ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
      ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
      ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
      ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);

      ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
      ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
      ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
      ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
    }

#else
  vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
  vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
  vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
  vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
  vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
  vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
  vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
  vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;

  if (is_l3)
    {
      vlib_buffer_advance (b[0], adv);
      vlib_buffer_advance (b[1], adv);
      vlib_buffer_advance (b[2], adv);
      vlib_buffer_advance (b[3], adv);
    }

  b[0]->flags |= flags;
  b[1]->flags |= flags;
  b[2]->flags |= flags;
  b[3]->flags |= flags;
#endif

  if (!is_l3)
    {
      vnet_buffer (b[0])->l2.l2_len = adv;
      vnet_buffer (b[1])->l2.l2_len = adv;
      vnet_buffer (b[2])->l2.l2_len = adv;
      vnet_buffer (b[3])->l2.l2_len = adv;
    }
}

static_always_inline void
eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
{
  i16 adv = sizeof (ethernet_header_t);
  u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
    VNET_BUFFER_F_L3_HDR_OFFSET_VALID;

  vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
  vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;

  if (is_l3)
    vlib_buffer_advance (b[0], adv);
  b[0]->flags |= flags;
  if (!is_l3)
    vnet_buffer (b[0])->l2.l2_len = adv;
}


static_always_inline void
eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
			      u64 * dmacs, int offset, int dmac_check)
{
  ethernet_header_t *e;
  e = vlib_buffer_get_current (b[offset]);
#ifdef CLIB_HAVE_VEC128
  u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
  etype[offset] = ((u16x8) r)[3];
  tags[offset] = r[1];
#else
  etype[offset] = e->type;
  tags[offset] = *(u64 *) (e + 1);
#endif

  if (dmac_check)
    dmacs[offset] = *(u64 *) e;
}

static_always_inline u16
eth_input_next_by_type (u16 etype)
{
  ethernet_main_t *em = &ethernet_main;

  return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
    vec_elt (em->l3_next.input_next_by_type,
	     sparse_vec_index (em->l3_next.input_next_by_type, etype));
}

typedef struct
{
  u64 tag, mask;
  u32 sw_if_index;
  u16 type, len, next;
  i16 adv;
  u8 err, n_tags;
  u64 n_packets, n_bytes;
} eth_input_tag_lookup_t;

static_always_inline void
eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
			      eth_input_tag_lookup_t * l)
{
  if (l->n_packets == 0 || l->sw_if_index == ~0)
    return;

  if (l->adv > 0)
    l->n_bytes += l->n_packets * l->len;

  vlib_increment_combined_counter
    (vnm->interface_main.combined_sw_if_counters +
     VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
     l->n_packets, l->n_bytes);
}

static_always_inline void
eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
		      vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
		      u64 tag, u16 * next, vlib_buffer_t * b,
		      eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
		      int main_is_l3, int check_dmac)
{
  ethernet_main_t *em = &ethernet_main;

  if ((tag ^ l->tag) & l->mask)
    {
      main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
      vlan_intf_t *vif;
      qinq_intf_t *qif;
      vlan_table_t *vlan_table;
      qinq_table_t *qinq_table;
      u16 *t = (u16 *) & tag;
      u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
      u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
      u32 matched, is_l2, new_sw_if_index;

      vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
				     mif->dot1ad_vlans : mif->dot1q_vlans);
      vif = &vlan_table->vlans[vlan1];
      qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
      qif = &qinq_table->vlans[vlan2];
      l->err = ETHERNET_ERROR_NONE;
      l->type = clib_net_to_host_u16 (t[1]);

      if (l->type == ETHERNET_TYPE_VLAN)
	{
	  l->type = clib_net_to_host_u16 (t[3]);
	  l->n_tags = 2;
	  matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
					 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
					 qif, &new_sw_if_index, &l->err,
					 &is_l2);
	}
      else
	{
	  l->n_tags = 1;
	  if (vlan1 == 0)
	    {
	      new_sw_if_index = hi->sw_if_index;
	      l->err = ETHERNET_ERROR_NONE;
	      matched = 1;
	      is_l2 = main_is_l3 == 0;
	    }
	  else
	    matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
					   SUBINT_CONFIG_MATCH_1_TAG, mif,
					   vif, qif, &new_sw_if_index,
					   &l->err, &is_l2);
	}

      if (l->sw_if_index != new_sw_if_index)
	{
	  eth_input_update_if_counters (vm, vnm, l);
	  l->n_packets = 0;
	  l->n_bytes = 0;
	  l->sw_if_index = new_sw_if_index;
	}
      l->tag = tag;
      l->mask = (l->n_tags == 2) ?
	clib_net_to_host_u64 (0xffffffffffffffff) :
	clib_net_to_host_u64 (0xffffffff00000000);

      if (matched && l->sw_if_index == ~0)
	l->err = ETHERNET_ERROR_DOWN;

      l->len = sizeof (ethernet_header_t) +
	l->n_tags * sizeof (ethernet_vlan_header_t);
      if (main_is_l3)
	l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
	  l->n_tags * sizeof (ethernet_vlan_header_t);
      else
	l->adv = is_l2 ? 0 : l->len;

      if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
	l->next = ETHERNET_INPUT_NEXT_DROP;
      else if (is_l2)
	l->next = em->l2_next;
      else if (l->type == ETHERNET_TYPE_IP4)
	l->next = em->l3_next.input_next_ip4;
      else if (l->type == ETHERNET_TYPE_IP6)
	l->next = em->l3_next.input_next_ip6;
      else if (l->type == ETHERNET_TYPE_MPLS)
	l->next = em->l3_next.input_next_mpls;
      else if (em->redirect_l3)
	l->next = em->redirect_l3_next;
      else
	{
	  l->next = eth_input_next_by_type (l->type);
	  if (l->next == ETHERNET_INPUT_NEXT_PUNT)
	    l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
	}
    }

  if (check_dmac && l->adv > 0 && dmac_bad)
    {
      l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
      next[0] = ETHERNET_INPUT_NEXT_PUNT;
    }
  else
    next[0] = l->next;

  vlib_buffer_advance (b, l->adv);
  vnet_buffer (b)->l2.l2_len = l->len;
  vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;

  if (l->err == ETHERNET_ERROR_NONE)
    {
      vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
      ethernet_buffer_set_vlan_count (b, l->n_tags);
    }
  else
    b->error = node->errors[l->err];

  /* update counters */
  l->n_packets += 1;
  l->n_bytes += vlib_buffer_length_in_chain (vm, b);
}

#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)

#ifdef CLIB_HAVE_VEC256
static_always_inline u32
is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
{
  u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
  r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
  return u8x32_msb_mask ((u8x32) (r0));
}
#endif

static_always_inline u8
is_dmac_bad (u64 dmac, u64 hwaddr)
{
  u64 r0 = dmac & DMAC_MASK;
  return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
}

static_always_inline u8
is_sec_dmac_bad (u64 dmac, u64 hwaddr)
{
  return ((dmac & DMAC_MASK) != hwaddr);
}

#ifdef CLIB_HAVE_VEC256
static_always_inline u32
is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
{
  u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
  r0 = (r0 != u64x4_splat (hwaddr));
  return u8x32_msb_mask ((u8x32) (r0));
}
#endif

static_always_inline u8
eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
{
  dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
  return dmac_bad[0];
}

static_always_inline u32
eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
{
#ifdef CLIB_HAVE_VEC256
  *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
#else
  dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
  dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
  dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
  dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
#endif
  return *(u32 *) dmac_bad;
}

/*
 * DMAC check for ethernet_input_inline()
 *
 * dmacs and dmacs_bad are arrays that are 2 elements long
 * n_packets should be 1 or 2 for ethernet_input_inline()
 */
static_always_inline void
ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
				  u64 * dmacs, u8 * dmacs_bad,
				  u32 n_packets, ethernet_interface_t * ei,
				  u8 have_sec_dmac)
{
  u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
  u8 bad = 0;

  dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
  dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));

  bad = dmacs_bad[0] | dmacs_bad[1];

  if (PREDICT_FALSE (bad && have_sec_dmac))
    {
      mac_address_t *sec_addr;

      vec_foreach (sec_addr, ei->secondary_addrs)
      {
	hwaddr = (*(u64 *) sec_addr) & DMAC_MASK;

	bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
	       eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
					    dmacs_bad + 1));

	if (!bad)
	  return;
      }
    }
}

static_always_inline void
eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
				    u64 * dmacs, u8 * dmacs_bad,
				    u32 n_packets, ethernet_interface_t * ei,
				    u8 have_sec_dmac)
{
  u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
  u64 *dmac = dmacs;
  u8 *dmac_bad = dmacs_bad;
  u32 bad = 0;
  i32 n_left = n_packets;

#ifdef CLIB_HAVE_VEC256
  while (n_left > 0)
    {
      bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
      bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);

      /* next */
      dmac += 8;
      dmac_bad += 8;
      n_left -= 8;
    }
#else
  while (n_left > 0)
    {
      bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
      bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
      bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
      bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);

      /* next */
      dmac += 4;
      dmac_bad += 4;
      n_left -= 4;
    }
#endif

  if (have_sec_dmac && bad)
    {
      mac_address_t *addr;

      vec_foreach (addr, ei->secondary_addrs)
      {
	u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
	i32 n_left = n_packets;
	u64 *dmac = dmacs;
	u8 *dmac_bad = dmacs_bad;

	bad = 0;

	while (n_left > 0)
	  {
	    int adv = 0;
	    int n_bad;

	    /* skip any that have already matched */
	    if (!dmac_bad[0])
	      {
		dmac += 1;
		dmac_bad += 1;
		n_left -= 1;
		continue;
	      }

	    n_bad = clib_min (4, n_left);

	    /* If >= 4 left, compare 4 together */
	    if (n_bad == 4)
	      {
		bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
		adv = 4;
		n_bad = 0;
	      }

	    /* handle individually */
	    while (n_bad > 0)
	      {
		bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
						    dmac_bad + adv);
		adv += 1;
		n_bad -= 1;
	      }

	    dmac += adv;
	    dmac_bad += adv;
	    n_left -= adv;
	  }

	if (!bad)		/* can stop looping if everything matched */
	  break;
      }
    }
}

/* process frame of buffers, store ethertype into array and update
   buffer metadata fields depending on interface being l2 or l3 assuming that
   packets are untagged. For tagged packets those fields are updated later.
   Optionally store Destionation MAC address and tag data into arrays
   for further processing */

STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
	       "VLIB_FRAME_SIZE must be power of 8");
static_always_inline void
eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
			 vnet_hw_interface_t * hi,
			 u32 * buffer_indices, u32 n_packets, int main_is_l3,
			 int ip4_cksum_ok, int dmac_check)
{
  ethernet_main_t *em = &ethernet_main;
  u16 nexts[VLIB_FRAME_SIZE], *next;
  u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
  u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
  u8 dmacs_bad[VLIB_FRAME_SIZE];
  u64 tags[VLIB_FRAME_SIZE], *tag = tags;
  u16 slowpath_indices[VLIB_FRAME_SIZE];
  u16 n_slowpath, i;
  u16 next_ip4, next_ip6, next_mpls, next_l2;
  u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
  u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
  u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
  u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
  u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
  i32 n_left = n_packets;
  vlib_buffer_t *b[20];
  u32 *from;
  ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);

  from = buffer_indices;

  while (n_left >= 20)
    {
      vlib_buffer_t **ph = b + 16, **pd = b + 8;
      vlib_get_buffers (vm, from, b, 4);
      vlib_get_buffers (vm, from + 8, pd, 4);
      vlib_get_buffers (vm, from + 16, ph, 4);

      vlib_prefetch_buffer_header (ph[0], LOAD);
      vlib_prefetch_buffer_data (pd[0], LOAD);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);

      vlib_prefetch_buffer_header (ph[1], LOAD);
      vlib_prefetch_buffer_data (pd[1], LOAD);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);

      vlib_prefetch_buffer_header (ph[2], LOAD);
      vlib_prefetch_buffer_data (pd[2], LOAD);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);

      vlib_prefetch_buffer_header (ph[3], LOAD);
      vlib_prefetch_buffer_data (pd[3], LOAD);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);

      eth_input_adv_and_flags_x4 (b, main_is_l3);

      /* next */
      n_left -= 4;
      etype += 4;
      tag += 4;
      dmac += 4;
      from += 4;
    }
  while (n_left >= 4)
    {
      vlib_get_buffers (vm, from, b, 4);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
      eth_input_adv_and_flags_x4 (b, main_is_l3);

      /* next */
      n_left -= 4;
      etype += 4;
      tag += 4;
      dmac += 4;
      from += 4;
    }
  while (n_left)
    {
      vlib_get_buffers (vm, from, b, 1);
      eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
      eth_input_adv_and_flags_x1 (b, main_is_l3);

      /* next */
      n_left -= 1;
      etype += 1;
      tag += 1;
      dmac += 4;
      from += 1;
    }

  if (dmac_check)
    {
      if (ei && vec_len (ei->secondary_addrs))
	eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
					    ei, 1 /* have_sec_dmac */ );
      else
	eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
					    ei, 0 /* have_sec_dmac */ );
    }

  next_ip4 = em->l3_next.input_next_ip4;
  next_ip6 = em->l3_next.input_next_ip6;
  next_mpls = em->l3_next.input_next_mpls;
  next_l2 = em->l2_next;

  if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
    next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;

#ifdef CLIB_HAVE_VEC256
  u16x16 et16_ip4 = u16x16_splat (et_ip4);
  u16x16 et16_ip6 = u16x16_splat (et_ip6);
  u16x16 et16_mpls = u16x16_splat (et_mpls);
  u16x16 et16_vlan = u16x16_splat (et_vlan);
  u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
  u16x16 next16_ip4 = u16x16_splat (next_ip4);
  u16x16 next16_ip6 = u16x16_splat (next_ip6);
  u16x16 next16_mpls = u16x16_splat (next_mpls);
  u16x16 next16_l2 = u16x16_splat (next_l2);
  u16x16 zero = { 0 };
  u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
#endif

  etype = etypes;
  n_left = n_packets;
  next = nexts;
  n_slowpath = 0;
  i = 0;

  /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
     are considered as slowpath, in l2 mode all untagged packets are
     considered as fastpath */
  while (n_left > 0)
    {
#ifdef CLIB_HAVE_VEC256
      if (n_left >= 16)
	{
	  u16x16 r = zero;
	  u16x16 e16 = u16x16_load_unaligned (etype);
	  if (main_is_l3)
	    {
	      r += (e16 == et16_ip4) & next16_ip4;
	      r += (e16 == et16_ip6) & next16_ip6;
	      r += (e16 == et16_mpls) & next16_mpls;
	    }
	  else
	    r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
	  u16x16_store_unaligned (r, next);

	  if (!u16x16_is_all_zero (r == zero))
	    {
	      if (u16x16_is_all_zero (r))
		{
		  u16x16_store_unaligned (u16x16_splat (i) + stairs,
					  slowpath_indices + n_slowpath);
		  n_slowpath += 16;
		}
	      else
		{
		  for (int j = 0; j < 16; j++)
		    if (next[j] == 0)
		      slowpath_indices[n_slowpath++] = i + j;
		}
	    }

	  etype += 16;
	  next += 16;
	  n_left -= 16;
	  i += 16;
	  continue;
	}
#endif
      if (main_is_l3 && etype[0] == et_ip4)
	next[0] = next_ip4;
      else if (main_is_l3 && etype[0] == et_ip6)
	next[0] = next_ip6;
      else if (main_is_l3 && etype[0] == et_mpls)
	next[0] = next_mpls;
      else if (main_is_l3 == 0 &&
	       etype[0] != et_vlan && etype[0] != et_dot1ad)
	next[0] = next_l2;
      else
	{
	  next[0] = 0;
	  slowpath_indices[n_slowpath++] = i;
	}

      etype += 1;
      next += 1;
      n_left -= 1;
      i += 1;
    }

  if (n_slowpath)
    {
      vnet_main_t *vnm = vnet_get_main ();
      n_left = n_slowpath;
      u16 *si = slowpath_indices;
      u32 last_unknown_etype = ~0;
      u32 last_unknown_next = ~0;
      eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
	.mask = -1LL,
	.tag = tags[si[0]] ^ -1LL,
	.sw_if_index = ~0
      };

      clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));

      while (n_left)
	{
	  i = si[0];
	  u16 etype = etypes[i];

	  if (etype == et_vlan)
	    {
	      vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
	      eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
				    &dot1q_lookup, dmacs_bad[i], 0,
				    main_is_l3, dmac_check);

	    }
	  else if (etype == et_dot1ad)
	    {
	      vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
	      eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
				    &dot1ad_lookup, dmacs_bad[i], 1,
				    main_is_l3, dmac_check);
	    }
	  else
	    {
	      /* untagged packet with not well known etyertype */
	      if (last_unknown_etype != etype)
		{
		  last_unknown_etype = etype;
		  etype = clib_host_to_net_u16 (etype);
		  last_unknown_next = eth_input_next_by_type (etype);
		}
	      if (dmac_check && main_is_l3 && dmacs_bad[i])
		{
		  vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
		  b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
		  nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
		}
	      else
		nexts[i] = last_unknown_next;
	    }

	  /* next */
	  n_left--;
	  si++;
	}

      eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
      eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
    }

  vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
}

static_always_inline void
eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
		      vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
		      int ip4_cksum_ok)
{
  ethernet_main_t *em = &ethernet_main;
  ethernet_interface_t *ei;
  ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
  main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
  subint_config_t *subint0 = &intf0->untagged_subint;

  int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
  int promisc = (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;

  if (main_is_l3)
    {
      /* main interface is L3, we dont expect tagged packets and interface
         is not in promisc node, so we dont't need to check DMAC */
      int is_l3 = 1;

      if (promisc == 0)
	eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
				 ip4_cksum_ok, 0);
      else
	/* subinterfaces and promisc mode so DMAC check is needed */
	eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
				 ip4_cksum_ok, 1);
      return;
    }
  else
    {
      /* untagged packets are treated as L2 */
      int is_l3 = 0;
      eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
			       ip4_cksum_ok, 1);
      return;
    }
}

static_always_inline void
ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
		      vlib_frame_t * from_frame)
{
  u32 *from, n_left;
  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
    {
      from = vlib_frame_vector_args (from_frame);
      n_left = from_frame->n_vectors;

      while (n_left)
	{
	  ethernet_input_trace_t *t0;
	  vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);

	  if (b0->flags & VLIB_BUFFER_IS_TRACED)
	    {
	      t0 = vlib_add_trace (vm, node, b0,
				   sizeof (ethernet_input_trace_t));
	      clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
				sizeof (t0->packet_data));
	      t0->frame_flags = from_frame->flags;
	      clib_memcpy_fast (&t0->frame_data,
				vlib_frame_scalar_args (from_frame),
				sizeof (ethernet_input_frame_t));
	    }
	  from += 1;
	  n_left -= 1;
	}
    }

  /* rx pcap capture if enabled */
  if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
    {
      u32 bi0;
      vnet_pcap_t *pp = &vlib_global_main.pcap;

      from = vlib_frame_vector_args (from_frame);
      n_left = from_frame->n_vectors;
      while (n_left > 0)
	{
	  int classify_filter_result;
	  vlib_buffer_t *b0;
	  bi0 = from[0];
	  from++;
	  n_left--;
	  b0 = vlib_get_buffer (vm, bi0);
	  if (pp->filter_classify_table_index != ~0)
	    {
	      classify_filter_result =
		vnet_is_packet_traced_inline
		(b0, pp->filter_classify_table_index, 0 /* full classify */ );
	      if (classify_filter_result)
		pcap_add_buffer (&pp->pcap_main, vm, bi0,
				 pp->max_bytes_per_pkt);
	      continue;
	    }

	  if (pp->pcap_sw_if_index == 0 ||
	      pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
	    {
	      vnet_main_t *vnm = vnet_get_main ();
	      vnet_hw_interface_t *hi =
		vnet_get_sup_hw_interface
		(vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);

	      /* Capture pkt if not filtered, or if filter hits */
	      if (hi->trace_classify_table_index == ~0 ||
		  vnet_is_packet_traced_inline
		  (b0, hi->trace_classify_table_index,
		   0 /* full classify */ ))
		pcap_add_buffer (&pp->pcap_main, vm, bi0,
				 pp->max_bytes_per_pkt);
	    }
	}
    }
}

static_always_inline void
ethernet_input_inline (vlib_main_t * vm,
		       vlib_node_runtime_t * node,
		       u32 * from, u32 n_packets,
		       ethernet_input_variant_t variant)
{
  vnet_main_t *vnm = vnet_get_main ();
  ethernet_main_t *em = &ethernet_main;
  vlib_node_runtime_t *error_node;
  u32 n_left_from, next_index, *to_next;
  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
  u32 thread_index = vm->thread_index;
  u32 cached_sw_if_index = ~0;
  u32 cached_is_l2 = 0;		/* shut up gcc */
  vnet_hw_interface_t *hi = NULL;	/* used for main interface only */
  ethernet_interface_t *ei = NULL;
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
  vlib_buffer_t **b = bufs;

  if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
    error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
  else
    error_node = node;

  n_left_from = n_packets;

  next_index = node->cached_next_index;
  stats_sw_if_index = node->runtime_data[0];
  stats_n_packets = stats_n_bytes = 0;
  vlib_get_buffers (vm, from, bufs, n_left_from);

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
	{
	  u32 bi0, bi1;
	  vlib_buffer_t *b0, *b1;
	  u8 next0, next1, error0, error1;
	  u16 type0, orig_type0, type1, orig_type1;
	  u16 outer_id0, inner_id0, outer_id1, inner_id1;
	  u32 match_flags0, match_flags1;
	  u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
	    new_sw_if_index1, len1;
	  vnet_hw_interface_t *hi0, *hi1;
	  main_intf_t *main_intf0, *main_intf1;
	  vlan_intf_t *vlan_intf0, *vlan_intf1;
	  qinq_intf_t *qinq_intf0, *qinq_intf1;
	  u32 is_l20, is_l21;
	  ethernet_header_t *e0, *e1;
	  u64 dmacs[2];
	  u8 dmacs_bad[2];

	  /* Prefetch next iteration. */
	  {
	    vlib_prefetch_buffer_header (b[2], STORE);
	    vlib_prefetch_buffer_header (b[3], STORE);

	    CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
	    CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
	  }

	  bi0 = from[0];
	  bi1 = from[1];
	  to_next[0] = bi0;
	  to_next[1] = bi1;
	  from += 2;
	  to_next += 2;
	  n_left_to_next -= 2;
	  n_left_from -= 2;

	  b0 = b[0];
	  b1 = b[1];
	  b += 2;

	  error0 = error1 = ETHERNET_ERROR_NONE;
	  e0 = vlib_buffer_get_current (b0);
	  type0 = clib_net_to_host_u16 (e0->type);
	  e1 = vlib_buffer_get_current (b1);
	  type1 = clib_net_to_host_u16 (e1->type);

	  /* Set the L2 header offset for all packets */
	  vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
	  vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
	  b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
	  b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;

	  /* Speed-path for the untagged case */
	  if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
			    && !ethernet_frame_is_any_tagged_x2 (type0,
								 type1)))
	    {
	      main_intf_t *intf0;
	      subint_config_t *subint0;
	      u32 sw_if_index0, sw_if_index1;

	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
	      sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
	      is_l20 = cached_is_l2;

	      /* This is probably wholly unnecessary */
	      if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
		goto slowpath;

	      /* Now sw_if_index0 == sw_if_index1  */
	      if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
		{
		  cached_sw_if_index = sw_if_index0;
		  hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
		  ei = ethernet_get_interface (em, hi->hw_if_index);
		  intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
		  subint0 = &intf0->untagged_subint;
		  cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
		}

	      if (PREDICT_TRUE (is_l20 != 0))
		{
		  vnet_buffer (b0)->l3_hdr_offset =
		    vnet_buffer (b0)->l2_hdr_offset +
		    sizeof (ethernet_header_t);
		  vnet_buffer (b1)->l3_hdr_offset =
		    vnet_buffer (b1)->l2_hdr_offset +
		    sizeof (ethernet_header_t);
		  b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
		  b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
		  next0 = em->l2_next;
		  vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
		  next1 = em->l2_next;
		  vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
		}
	      else
		{
		  dmacs[0] = *(u64 *) e0;
		  dmacs[1] = *(u64 *) e1;

		  if (ei && vec_len (ei->secondary_addrs))
		    ethernet_input_inline_dmac_check (hi, dmacs,
						      dmacs_bad,
						      2 /* n_packets */ ,
						      ei,
						      1 /* have_sec_dmac */ );
		  else
		    ethernet_input_inline_dmac_check (hi, dmacs,
						      dmacs_bad,
						      2 /* n_packets */ ,
						      ei,
						      0 /* have_sec_dmac */ );

		  if (dmacs_bad[0])
		    error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
		  if (dmacs_bad[1])
		    error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;

		  vlib_buffer_advance (b0, sizeof (ethernet_header_t));
		  determine_next_node (em, variant, 0, type0, b0,
				       &error0, &next0);
		  vlib_buffer_advance (b1, sizeof (ethernet_header_t));
		  determine_next_node (em, variant, 0, type1, b1,
				       &error1, &next1);
		}
	      goto ship_it01;
	    }

	  /* Slow-path for the tagged case */
	slowpath:
	  parse_header (variant,
			b0,
			&type0,
			&orig_type0, &outer_id0, &inner_id0, &match_flags0);

	  parse_header (variant,
			b1,
			&type1,
			&orig_type1, &outer_id1, &inner_id1, &match_flags1);

	  old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
	  old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];

	  eth_vlan_table_lookups (em,
				  vnm,
				  old_sw_if_index0,
				  orig_type0,
				  outer_id0,
				  inner_id0,
				  &hi0,
				  &main_intf0, &vlan_intf0, &qinq_intf0);

	  eth_vlan_table_lookups (em,
				  vnm,
				  old_sw_if_index1,
				  orig_type1,
				  outer_id1,
				  inner_id1,
				  &hi1,
				  &main_intf1, &vlan_intf1, &qinq_intf1);

	  identify_subint (hi0,
			   b0,
			   match_flags0,
			   main_intf0,
			   vlan_intf0,
			   qinq_intf0, &new_sw_if_index0, &error0, &is_l20);

	  identify_subint (hi1,
			   b1,
			   match_flags1,
			   main_intf1,
			   vlan_intf1,
			   qinq_intf1, &new_sw_if_index1, &error1, &is_l21);

	  // Save RX sw_if_index for later nodes
	  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
	    error0 !=
	    ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
	  vnet_buffer (b1)->sw_if_index[VLIB_RX] =
	    error1 !=
	    ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;

	  // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
	  if (((new_sw_if_index0 != ~0)
	       && (new_sw_if_index0 != old_sw_if_index0))
	      || ((new_sw_if_index1 != ~0)
		  && (new_sw_if_index1 != old_sw_if_index1)))
	    {

	      len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
		- vnet_buffer (b0)->l2_hdr_offset;
	      len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
		- vnet_buffer (b1)->l2_hdr_offset;

	      stats_n_packets += 2;
	      stats_n_bytes += len0 + len1;

	      if (PREDICT_FALSE
		  (!(new_sw_if_index0 == stats_sw_if_index
		     && new_sw_if_index1 == stats_sw_if_index)))
		{
		  stats_n_packets -= 2;
		  stats_n_bytes -= len0 + len1;

		  if (new_sw_if_index0 != old_sw_if_index0
		      && new_sw_if_index0 != ~0)
		    vlib_increment_combined_counter (vnm->
						     interface_main.combined_sw_if_counters
						     +
						     VNET_INTERFACE_COUNTER_RX,
						     thread_index,
						     new_sw_if_index0, 1,
						     len0);
		  if (new_sw_if_index1 != old_sw_if_index1
		      && new_sw_if_index1 != ~0)
		    vlib_increment_combined_counter (vnm->
						     interface_main.combined_sw_if_counters
						     +
						     VNET_INTERFACE_COUNTER_RX,
						     thread_index,
						     new_sw_if_index1, 1,
						     len1);

		  if (new_sw_if_index0 == new_sw_if_index1)
		    {
		      if (stats_n_packets > 0)
			{
			  vlib_increment_combined_counter
			    (vnm->interface_main.combined_sw_if_counters
			     + VNET_INTERFACE_COUNTER_RX,
			     thread_index,
			     stats_sw_if_index,
			     stats_n_packets, stats_n_bytes);
			  stats_n_packets = stats_n_bytes = 0;
			}
		      stats_sw_if_index = new_sw_if_index0;
		    }
		}
	    }

	  if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
	    is_l20 = is_l21 = 0;

	  determine_next_node (em, variant, is_l20, type0, b0, &error0,
			       &next0);
	  determine_next_node (em, variant, is_l21, type1, b1, &error1,
			       &next1);

	ship_it01:
	  b0->error = error_node->errors[error0];
	  b1->error = error_node->errors[error1];

	  // verify speculative enqueue
	  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
					   n_left_to_next, bi0, bi1, next0,
					   next1);
	}

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;
	  u8 error0, next0;
	  u16 type0, orig_type0;
	  u16 outer_id0, inner_id0;
	  u32 match_flags0;
	  u32 old_sw_if_index0, new_sw_if_index0, len0;
	  vnet_hw_interface_t *hi0;
	  main_intf_t *main_intf0;
	  vlan_intf_t *vlan_intf0;
	  qinq_intf_t *qinq_intf0;
	  ethernet_header_t *e0;
	  u32 is_l20;
	  u64 dmacs[2];
	  u8 dmacs_bad[2];

	  // Prefetch next iteration
	  if (n_left_from > 1)
	    {
	      vlib_prefetch_buffer_header (b[1], STORE);
	      CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
	    }

	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = b[0];
	  b += 1;

	  error0 = ETHERNET_ERROR_NONE;
	  e0 = vlib_buffer_get_current (b0);
	  type0 = clib_net_to_host_u16 (e0->type);

	  /* Set the L2 header offset for all packets */
	  vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
	  b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;

	  /* Speed-path for the untagged case */
	  if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
			    && !ethernet_frame_is_tagged (type0)))
	    {
	      main_intf_t *intf0;
	      subint_config_t *subint0;
	      u32 sw_if_index0;

	      sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
	      is_l20 = cached_is_l2;

	      if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
		{
		  cached_sw_if_index = sw_if_index0;
		  hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
		  ei = ethernet_get_interface (em, hi->hw_if_index);
		  intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
		  subint0 = &intf0->untagged_subint;
		  cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
		}


	      if (PREDICT_TRUE (is_l20 != 0))
		{
		  vnet_buffer (b0)->l3_hdr_offset =
		    vnet_buffer (b0)->l2_hdr_offset +
		    sizeof (ethernet_header_t);
		  b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
		  next0 = em->l2_next;
		  vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
		}
	      else
		{
		  dmacs[0] = *(u64 *) e0;

		  if (ei && vec_len (ei->secondary_addrs))
		    ethernet_input_inline_dmac_check (hi, dmacs,
						      dmacs_bad,
						      1 /* n_packets */ ,
						      ei,
						      1 /* have_sec_dmac */ );
		  else
		    ethernet_input_inline_dmac_check (hi, dmacs,
						      dmacs_bad,
						      1 /* n_packets */ ,
						      ei,
						      0 /* have_sec_dmac */ );

		  if (dmacs_bad[0])
		    error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;

		  vlib_buffer_advance (b0, sizeof (ethernet_header_t));
		  determine_next_node (em, variant, 0, type0, b0,
				       &error0, &next0);
		}
	      goto ship_it0;
	    }

	  /* Slow-path for the tagged case */
	  parse_header (variant,
			b0,
			&type0,
			&orig_type0, &outer_id0, &inner_id0, &match_flags0);

	  old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];

	  eth_vlan_table_lookups (em,
				  vnm,
				  old_sw_if_index0,
				  orig_type0,
				  outer_id0,
				  inner_id0,
				  &hi0,
				  &main_intf0, &vlan_intf0, &qinq_intf0);

	  identify_subint (hi0,
			   b0,
			   match_flags0,
			   main_intf0,
			   vlan_intf0,
			   qinq_intf0, &new_sw_if_index0, &error0, &is_l20);

	  // Save RX sw_if_index for later nodes
	  vnet_buffer (b0)->sw_if_index[VLIB_RX] =
	    error0 !=
	    ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;

	  // Increment subinterface stats
	  // Note that interface-level counters have already been incremented
	  // prior to calling this function. Thus only subinterface counters
	  // are incremented here.
	  //
	  // Interface level counters include packets received on the main
	  // interface and all subinterfaces. Subinterface level counters
	  // include only those packets received on that subinterface
	  // Increment stats if the subint is valid and it is not the main intf
	  if ((new_sw_if_index0 != ~0)
	      && (new_sw_if_index0 != old_sw_if_index0))
	    {

	      len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
		- vnet_buffer (b0)->l2_hdr_offset;

	      stats_n_packets += 1;
	      stats_n_bytes += len0;

	      // Batch stat increments from the same subinterface so counters
	      // don't need to be incremented for every packet.
	      if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
		{
		  stats_n_packets -= 1;
		  stats_n_bytes -= len0;

		  if (new_sw_if_index0 != ~0)
		    vlib_increment_combined_counter
		      (vnm->interface_main.combined_sw_if_counters
		       + VNET_INTERFACE_COUNTER_RX,
		       thread_index, new_sw_if_index0, 1, len0);
		  if (stats_n_packets > 0)
		    {
		      vlib_increment_combined_counter
			(vnm->interface_main.combined_sw_if_counters
			 + VNET_INTERFACE_COUNTER_RX,
			 thread_index,
			 stats_sw_if_index, stats_n_packets, stats_n_bytes);
		      stats_n_packets = stats_n_bytes = 0;
		    }
		  stats_sw_if_index = new_sw_if_index0;
		}
	    }

	  if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
	    is_l20 = 0;

	  determine_next_node (em, variant, is_l20, type0, b0, &error0,
			       &next0);

	ship_it0:
	  b0->error = error_node->errors[error0];

	  // verify speculative enqueue
	  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
					   to_next, n_left_to_next,
					   bi0, next0);
	}

      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
    }

  // Increment any remaining batched stats
  if (stats_n_packets > 0)
    {
      vlib_increment_combined_counter
	(vnm->interface_main.combined_sw_if_counters
	 + VNET_INTERFACE_COUNTER_RX,
	 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
      node->runtime_data[0] = stats_sw_if_index;
    }
}

VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
				    vlib_node_runtime_t * node,
				    vlib_frame_t * frame)
{
  vnet_main_t *vnm = vnet_get_main ();
  u32 *from = vlib_frame_vector_args (frame);
  u32 n_packets = frame->n_vectors;

  ethernet_input_trace (vm, node, frame);

  if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
    {
      ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
      int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
      vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
      eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
    }
  else
    ethernet_input_inline (vm, node, from, n_packets,
			   ETHERNET_INPUT_VARIANT_ETHERNET);
  return n_packets;
}

VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
					 vlib_node_runtime_t * node,
					 vlib_frame_t * from_frame)
{
  u32 *from = vlib_frame_vector_args (from_frame);
  u32 n_packets = from_frame->n_vectors;
  ethernet_input_trace (vm, node, from_frame);
  ethernet_input_inline (vm, node, from, n_packets,
			 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
  return n_packets;
}

VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
					   vlib_node_runtime_t * node,
					   vlib_frame_t * from_frame)
{
  u32 *from = vlib_frame_vector_args (from_frame);
  u32 n_packets = from_frame->n_vectors;
  ethernet_input_trace (vm, node, from_frame);
  ethernet_input_inline (vm, node, from, n_packets,
			 ETHERNET_INPUT_VARIANT_NOT_L2);
  return n_packets;
}


// Return the subinterface config struct for the given sw_if_index
// Also return via parameter the appropriate match flags for the
// configured number of tags.
// On error (unsupported or not ethernet) return 0.
static subint_config_t *
ethernet_sw_interface_get_config (vnet_main_t * vnm,
				  u32 sw_if_index,
				  u32 * flags, u32 * unsupported)
{
  ethernet_main_t *em = &ethernet_main;
  vnet_hw_interface_t *hi;
  vnet_sw_interface_t *si;
  main_intf_t *main_intf;
  vlan_table_t *vlan_table;
  qinq_table_t *qinq_table;
  subint_config_t *subint = 0;

  hi = vnet_get_sup_hw_interface (vnm, sw_if_index);

  if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
    {
      *unsupported = 0;
      goto done;		// non-ethernet interface
    }

  // ensure there's an entry for the main intf (shouldn't really be necessary)
  vec_validate (em->main_intfs, hi->hw_if_index);
  main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);

  // Locate the subint for the given ethernet config
  si = vnet_get_sw_interface (vnm, sw_if_index);

  if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
    {
      p2p_ethernet_main_t *p2pm = &p2p_main;
      u32 p2pe_sw_if_index =
	p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
      if (p2pe_sw_if_index == ~0)
	{
	  pool_get (p2pm->p2p_subif_pool, subint);
	  si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
	}
      else
	subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
      *flags = SUBINT_CONFIG_P2P;
    }
  else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
    {
      pipe_t *pipe;

      pipe = pipe_get (sw_if_index);
      subint = &pipe->subint;
      *flags = SUBINT_CONFIG_P2P;
    }
  else if (si->sub.eth.flags.default_sub)
    {
      subint = &main_intf->default_subint;
      *flags = SUBINT_CONFIG_MATCH_1_TAG |
	SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
    }
  else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
    {
      // if no flags are set then this is a main interface
      // so treat as untagged
      subint = &main_intf->untagged_subint;
      *flags = SUBINT_CONFIG_MATCH_0_TAG;
    }
  else
    {
      // one or two tags
      // first get the vlan table
      if (si->sub.eth.flags.dot1ad)
	{
	  if (main_intf->dot1ad_vlans == 0)
	    {
	      // Allocate a vlan table from the pool
	      pool_get (em->vlan_pool, vlan_table);
	      main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
	    }
	  else
	    {
	      // Get ptr to existing vlan table
	      vlan_table =
		vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
	    }
	}
      else
	{			// dot1q
	  if (main_intf->dot1q_vlans == 0)
	    {
	      // Allocate a vlan table from the pool
	      pool_get (em->vlan_pool, vlan_table);
	      main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
	    }
	  else
	    {
	      // Get ptr to existing vlan table
	      vlan_table =
		vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
	    }
	}

      if (si->sub.eth.flags.one_tag)
	{
	  *flags = si->sub.eth.flags.exact_match ?
	    SUBINT_CONFIG_MATCH_1_TAG :
	    (SUBINT_CONFIG_MATCH_1_TAG |
	     SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);

	  if (si->sub.eth.flags.outer_vlan_id_any)
	    {
	      // not implemented yet
	      *unsupported = 1;
	      goto done;
	    }
	  else
	    {
	      // a single vlan, a common case
	      subint =
		&vlan_table->vlans[si->sub.eth.
				   outer_vlan_id].single_tag_subint;
	    }

	}
      else
	{
	  // Two tags
	  *flags = si->sub.eth.flags.exact_match ?
	    SUBINT_CONFIG_MATCH_2_TAG :
	    (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);

	  if (si->sub.eth.flags.outer_vlan_id_any
	      && si->sub.eth.flags.inner_vlan_id_any)
	    {
	      // not implemented yet
	      *unsupported = 1;
	      goto done;
	    }

	  if (si->sub.eth.flags.inner_vlan_id_any)
	    {
	      // a specific outer and "any" inner
	      // don't need a qinq table for this
	      subint =
		&vlan_table->vlans[si->sub.eth.
				   outer_vlan_id].inner_any_subint;
	      if (si->sub.eth.flags.exact_match)
		{
		  *flags = SUBINT_CONFIG_MATCH_2_TAG;
		}
	      else
		{
		  *flags = SUBINT_CONFIG_MATCH_2_TAG |
		    SUBINT_CONFIG_MATCH_3_TAG;
		}
	    }
	  else
	    {
	      // a specific outer + specifc innner vlan id, a common case

	      // get the qinq table
	      if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
		{
		  // Allocate a qinq table from the pool
		  pool_get (em->qinq_pool, qinq_table);
		  vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
		    qinq_table - em->qinq_pool;
		}
	      else
		{
		  // Get ptr to existing qinq table
		  qinq_table =
		    vec_elt_at_index (em->qinq_pool,
				      vlan_table->vlans[si->sub.
							eth.outer_vlan_id].
				      qinqs);
		}
	      subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
	    }
	}
    }

done:
  return subint;
}

static clib_error_t *
ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
{
  subint_config_t *subint;
  u32 dummy_flags;
  u32 dummy_unsup;
  clib_error_t *error = 0;

  // Find the config for this subinterface
  subint =
    ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
				      &dummy_unsup);

  if (subint == 0)
    {
      // not implemented yet or not ethernet
      goto done;
    }

  subint->sw_if_index =
    ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);

done:
  return error;
}

VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);


#ifndef CLIB_MARCH_VARIANT
// Set the L2/L3 mode for the subinterface
void
ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
{
  subint_config_t *subint;
  u32 dummy_flags;
  u32 dummy_unsup;
  int is_port;
  vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);

  is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);

  // Find the config for this subinterface
  subint =
    ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
				      &dummy_unsup);

  if (subint == 0)
    {
      // unimplemented or not ethernet
      goto done;
    }

  // Double check that the config we found is for our interface (or the interface is down)
  ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));

  if (l2)
    {
      subint->flags |= SUBINT_CONFIG_L2;
      if (is_port)
	subint->flags |=
	  SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
	  | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
    }
  else
    {
      subint->flags &= ~SUBINT_CONFIG_L2;
      if (is_port)
	subint->flags &=
	  ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
	    | SUBINT_CONFIG_MATCH_3_TAG);
    }

done:
  return;
}

/*
 * Set the L2/L3 mode for the subinterface regardless of port
 */
void
ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
					  u32 sw_if_index, u32 l2)
{
  subint_config_t *subint;
  u32 dummy_flags;
  u32 dummy_unsup;

  /* Find the config for this subinterface */
  subint =
    ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
				      &dummy_unsup);

  if (subint == 0)
    {
      /* unimplemented or not ethernet */
      goto done;
    }

  /*
   * Double check that the config we found is for our interface (or the
   * interface is down)
   */
  ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));

  if (l2)
    {
      subint->flags |= SUBINT_CONFIG_L2;
    }
  else
    {
      subint->flags &= ~SUBINT_CONFIG_L2;
    }

done:
  return;
}
#endif

static clib_error_t *
ethernet_sw_interface_add_del (vnet_main_t * vnm,
			       u32 sw_if_index, u32 is_create)
{
  clib_error_t *error = 0;
  subint_config_t *subint;
  u32 match_flags;
  u32 unsupported = 0;

  // Find the config for this subinterface
  subint =
    ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
				      &unsupported);

  if (subint == 0)
    {
      // not implemented yet or not ethernet
      if (unsupported)
	{
	  // this is the NYI case
	  error = clib_error_return (0, "not implemented yet");
	}
      goto done;
    }

  if (!is_create)
    {
      subint->flags = 0;
      return error;
    }

  // Initialize the subint
  if (subint->flags & SUBINT_CONFIG_VALID)
    {
      // Error vlan already in use
      error = clib_error_return (0, "vlan is already in use");
    }
  else
    {
      // Note that config is L3 by default
      subint->flags = SUBINT_CONFIG_VALID | match_flags;
      subint->sw_if_index = ~0;	// because interfaces are initially down
    }

done:
  return error;
}

VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);

static char *ethernet_error_strings[] = {
#define ethernet_error(n,c,s) s,
#include "error.def"
#undef ethernet_error
};

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ethernet_input_node) = {
  .name = "ethernet-input",
  /* Takes a vector of packets. */
  .vector_size = sizeof (u32),
  .scalar_size = sizeof (ethernet_input_frame_t),
  .n_errors = ETHERNET_N_ERROR,
  .error_strings = ethernet_error_strings,
  .n_next_nodes = ETHERNET_INPUT_N_NEXT,
  .next_nodes = {
#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
    foreach_ethernet_input_next
#undef _
  },
  .format_buffer = format_ethernet_header_with_length,
  .format_trace = format_ethernet_input_trace,
  .unformat_buffer = unformat_ethernet_header,
};

VLIB_REGISTER_NODE (ethernet_input_type_node) = {
  .name = "ethernet-input-type",
  /* Takes a vector of packets. */
  .vector_size = sizeof (u32),
  .n_next_nodes = ETHERNET_INPUT_N_NEXT,
  .next_nodes = {
#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
    foreach_ethernet_input_next
#undef _
  },
};

VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
  .name = "ethernet-input-not-l2",
  /* Takes a vector of packets. */
  .vector_size = sizeof (u32),
  .n_next_nodes = ETHERNET_INPUT_N_NEXT,
  .next_nodes = {
#define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
    foreach_ethernet_input_next
#undef _
  },
};
/* *INDENT-ON* */

#ifndef CLIB_MARCH_VARIANT
void
ethernet_set_rx_redirect (vnet_main_t * vnm,
			  vnet_hw_interface_t * hi, u32 enable)
{
  // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
  // don't go directly to ip4-input)
  vnet_hw_interface_rx_redirect_to_node
    (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
}


/*
 * Initialization and registration for the next_by_ethernet structure
 */

clib_error_t *
next_by_ethertype_init (next_by_ethertype_t * l3_next)
{
  l3_next->input_next_by_type = sparse_vec_new
    ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
     /* bits in index */ BITS (((ethernet_header_t *) 0)->type));

  vec_validate (l3_next->sparse_index_by_input_next_index,
		ETHERNET_INPUT_NEXT_DROP);
  vec_validate (l3_next->sparse_index_by_input_next_index,
		ETHERNET_INPUT_NEXT_PUNT);
  l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
    SPARSE_VEC_INVALID_INDEX;
  l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
    SPARSE_VEC_INVALID_INDEX;

  /*
   * Make sure we don't wipe out an ethernet registration by mistake
   * Can happen if init function ordering constraints are missing.
   */
  if (CLIB_DEBUG > 0)
    {
      ethernet_main_t *em = &ethernet_main;
      ASSERT (em->next_by_ethertype_register_called == 0);
    }

  return 0;
}

// Add an ethertype -> next index mapping to the structure
clib_error_t *
next_by_ethertype_register (next_by_ethertype_t * l3_next,
			    u32 ethertype, u32 next_index)
{
  u32 i;
  u16 *n;
  ethernet_main_t *em = &ethernet_main;

  if (CLIB_DEBUG > 0)
    {
      ethernet_main_t *em = &ethernet_main;
      em->next_by_ethertype_register_called = 1;
    }

  /* Setup ethernet type -> next index sparse vector mapping. */
  n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
  n[0] = next_index;

  /* Rebuild next index -> sparse index inverse mapping when sparse vector
     is updated. */
  vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
  for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
    l3_next->
      sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;

  // do not allow the cached next index's to be updated if L3
  // redirect is enabled, as it will have overwritten them
  if (!em->redirect_l3)
    {
      // Cache common ethertypes directly
      if (ethertype == ETHERNET_TYPE_IP4)
	{
	  l3_next->input_next_ip4 = next_index;
	}
      else if (ethertype == ETHERNET_TYPE_IP6)
	{
	  l3_next->input_next_ip6 = next_index;
	}
      else if (ethertype == ETHERNET_TYPE_MPLS)
	{
	  l3_next->input_next_mpls = next_index;
	}
    }
  return 0;
}

void
ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
{
  __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
  __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;

  ethernet_setup_node (vm, ethernet_input_node.index);
  ethernet_setup_node (vm, ethernet_input_type_node.index);
  ethernet_setup_node (vm, ethernet_input_not_l2_node.index);

  next_by_ethertype_init (&em->l3_next);

  // Initialize pools and vector for vlan parsing
  vec_validate (em->main_intfs, 10);	// 10 main interfaces
  pool_alloc (em->vlan_pool, 10);
  pool_alloc (em->qinq_pool, 1);

  // The first vlan pool will always be reserved for an invalid table
  pool_get (em->vlan_pool, invalid_vlan_table);	// first id = 0
  // The first qinq pool will always be reserved for an invalid table
  pool_get (em->qinq_pool, invalid_qinq_table);	// first id = 0
}

void
ethernet_register_input_type (vlib_main_t * vm,
			      ethernet_type_t type, u32 node_index)
{
  ethernet_main_t *em = &ethernet_main;
  ethernet_type_info_t *ti;
  u32 i;

  {
    clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
    if (error)
      clib_error_report (error);
  }

  ti = ethernet_get_type_info (em, type);
  if (ti == 0)
    {
      clib_warning ("type_info NULL for type %d", type);
      return;
    }
  ti->node_index = node_index;
  ti->next_index = vlib_node_add_next (vm,
				       ethernet_input_node.index, node_index);
  i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
  ASSERT (i == ti->next_index);

  i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
  ASSERT (i == ti->next_index);

  // Add the L3 node for this ethertype to the next nodes structure
  next_by_ethertype_register (&em->l3_next, type, ti->next_index);

  // Call the registration functions for other nodes that want a mapping
  l2bvi_register_input_type (vm, type, node_index);
}

void
ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
{
  ethernet_main_t *em = &ethernet_main;
  u32 i;

  em->l2_next =
    vlib_node_add_next (vm, ethernet_input_node.index, node_index);

  /*
   * Even if we never use these arcs, we have to align the next indices...
   */
  i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);

  ASSERT (i == em->l2_next);

  i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
  ASSERT (i == em->l2_next);
}

// Register a next node for L3 redirect, and enable L3 redirect
void
ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
{
  ethernet_main_t *em = &ethernet_main;
  u32 i;

  em->redirect_l3 = 1;
  em->redirect_l3_next = vlib_node_add_next (vm,
					     ethernet_input_node.index,
					     node_index);
  /*
   * Change the cached next nodes to the redirect node
   */
  em->l3_next.input_next_ip4 = em->redirect_l3_next;
  em->l3_next.input_next_ip6 = em->redirect_l3_next;
  em->l3_next.input_next_mpls = em->redirect_l3_next;

  /*
   * Even if we never use these arcs, we have to align the next indices...
   */
  i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);

  ASSERT (i == em->redirect_l3_next);

  i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);

  ASSERT (i == em->redirect_l3_next);
}
#endif

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */