aboutsummaryrefslogtreecommitdiffstats
path: root/extras/emacs/pipe-skel.el
blob: bfae58dc92260892241b8f6c8cc4af20582183c5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
;;; Copyright (c) 2016 Cisco and/or its affiliates.
;;; Licensed under the Apache License, Version 2.0 (the "License");
;;; you may not use this file except in compliance with the License.
;;; You may obtain a copy of the License at:
;;;
;;;     http://www.apache.org/licenses/LICENSE-2.0
;;;
;;; Unless required by applicable law or agreed to in writing, software
;;; distributed under the License is distributed on an "AS IS" BASIS,
;;; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
;;; See the License for the specific language governing permissions and
;;; limitations under the License.

;;; pipe-skel.el - pipelined graph node skeleton

(require 'skeleton)

(define-skeleton skel-pipeline-node
"Insert a skeleton pipelined graph node"
nil
'(setq node-name (skeleton-read "Node Name: "))
'(setq uc-node-name (upcase node-name))
'(setq nstages (skeleton-read "Number of pipeline stages: "))
"
#include <vlib/vlib.h>
#include <vppinfra/error.h>

/*
 * Dump these counters via the \"show error\" CLI command 
 * FIXME: Add packet counter / error strings as desired
 */

#define foreach_" node-name "_error \\
_(ERROR1, \"sample counter/ error string\")

static char * " node-name "_error_strings[] = {
#define _(sym,string) string,
  foreach_" node-name "_error
#undef _
};

/*
 * packet error / counter enumeration
 *
 * To count and drop a vlib_buffer_t *b:
 *
 *     Set b->error = node->errors[" uc-node-name "_ERROR_xxx];
 *     last_stage returns a disposition index bound to \"error-drop\"
 * 
 * To manually increment the specific counter " uc-node-name "_ERROR1
 *
 *  vlib_node_t *n = vlib_get_node (vm, " node-name ".index);
 *  u32 node_counter_base_index = n->error_heap_index;
 *  vlib_error_main_t * em = &vm->error_main;
 *  em->counters[node_counter_base_index + " uc-node-name "_ERROR1] += 1;
 * 
 */

typedef enum {
#define _(sym,str) " uc-node-name "_ERROR_##sym,
    foreach_" node-name "_error
#undef _
    " uc-node-name "_N_ERROR,
} " node-name "_error_t;

/*
 * enumeration of per-packet dispositions
 * FIXME: add dispositions as desired
 */

typedef enum { \n"
"    " uc-node-name "_NEXT_NORMAL,\n"
"    " uc-node-name "_N_NEXT,
} " node-name "_next_t;

#define NSTAGES " nstages "

/* 
 * Use the generic buffer metadata + first line of packet data prefetch
 * stage function from <api/pipeline.h>. This is usually a Good Idea.
 */
#define stage0 generic_stage0

/* 
 * FIXME: add stage functions. Here is the function prototype:
 * 
 * static inline void stageN (vlib_main_t * vm,
 *                            vlib_node_runtime_t * node,
 *                            u32 buffer_index)
 */

/*
 * FIXME: the last pipeline stage returns the desired pkt next node index,
 * from the " node-name "_next_t enum above
 */
static inline u32 last_stage (vlib_main_t *vm, vlib_node_runtime_t *node,
                              u32 bi)
{
    vlib_buffer_t *b = vlib_get_buffer (vm, bi);

    b->error = node->errors[EXAMPLE_ERROR_ERROR1];

    return " uc-node-name "_NEXT_NORMAL;
}

#include <api/pipeline.h>

static uword " node-name "_node_fn (vlib_main_t * vm,
                              vlib_node_runtime_t * node,
                              vlib_frame_t * frame)
{
    return dispatch_pipeline (vm, node, frame);
}

static VLIB_REGISTER_NODE (example_node) = {
  .function = " node-name "_node_fn,
  .name = \"" node-name "-node\",
  .vector_size = sizeof (u32),
  .type = VLIB_NODE_TYPE_INTERNAL,
  
  .n_errors = ARRAY_LEN(" node-name "_error_strings),
  .error_strings = " node-name "_error_strings,

  .n_next_nodes = " uc-node-name "_N_NEXT,

  /* edit / add dispositions here */
  .next_nodes = {
        [" uc-node-name "_NEXT_NORMAL] = \"error-drop\",
  },
};

/* 
 * packet generator definition to push superframes of data into the
 * new graph node. Cut and paste into <file>, then
 * \"exec <file>\", \"pa enable test\" at the QVNET prompt...
 * 
packet-generator new {
  name test
  limit 100
  node " node-name "-node
  size 374-374
  data { hex 0x02b46b96000100096978676265000500bf436973636f20494f5320536f6674776172652c2043333735304520536f66747761726520284333373530452d554e4956455253414c2d4d292c2056657273696f6e2031322e32283335295345352c2052454c4541534520534f4654574152452028666331290a436f707972696768742028632920313938362d3230303720627920436973636f2053797374656d732c20496e632e0a436f6d70696c6564205468752031392d4a756c2d30372031363a3137206279206e616368656e00060018636973636f2057532d4333373530452d3234544400020011000000010101cc0004000000000003001b54656e4769676162697445746865726e6574312f302f3100040008000000280008002400000c011200000000ffffffff010221ff000000000000001e7a50f000ff000000090004000a00060001000b0005010012000500001300050000160011000000010101cc000400000000001a00100000000100000000ffffffff }
}
 */
")
ref='#n602'>602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
/*
 * l2_in_out_feat_arc.c : layer 2 input/output acl processing
 *
 * Copyright (c) 2013,2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ethernet/packet.h>
#include <vnet/ip/ip_packet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vlib/cli.h>
#include <vnet/l2/l2_input.h>
#include <vnet/l2/l2_output.h>
#include <vnet/l2/feat_bitmap.h>
#include <vnet/l2/l2_in_out_feat_arc.h>

#include <vppinfra/error.h>
#include <vppinfra/hash.h>
#include <vppinfra/cache.h>


typedef struct
{

  /* Next nodes for each feature */
  u32 feat_next_node_index[IN_OUT_FEAT_ARC_N_TABLE_GROUPS][32];
  u8 ip4_feat_arc_index[IN_OUT_FEAT_ARC_N_TABLE_GROUPS];
  u8 ip6_feat_arc_index[IN_OUT_FEAT_ARC_N_TABLE_GROUPS];
  u8 nonip_feat_arc_index[IN_OUT_FEAT_ARC_N_TABLE_GROUPS];
  u32 next_slot[IN_OUT_FEAT_ARC_N_TABLE_GROUPS];

  /* convenience variables */
  vlib_main_t *vlib_main;
  vnet_main_t *vnet_main;
} l2_in_out_feat_arc_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));

typedef struct
{
  u32 sw_if_index;
  u32 next_index;
  u32 feature_bitmap;
  u16 ethertype;
  u8 arc_head;
} l2_in_out_feat_arc_trace_t;

/* packet trace format function */
static u8 *
format_l2_in_out_feat_arc_trace (u8 * s, u32 is_output, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  l2_in_out_feat_arc_trace_t *t =
    va_arg (*args, l2_in_out_feat_arc_trace_t *);

  s =
    format (s,
	    "%s: head %d feature_bitmap %x ethertype %x sw_if_index %d, next_index %d",
	    is_output ? "OUT-FEAT-ARC" : "IN-FEAT-ARC", t->arc_head,
	    t->feature_bitmap, t->ethertype, t->sw_if_index, t->next_index);
  return s;
}

static u8 *
format_l2_in_feat_arc_trace (u8 * s, va_list * args)
{
  return format_l2_in_out_feat_arc_trace (s,
					  IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP,
					  args);
}

static u8 *
format_l2_out_feat_arc_trace (u8 * s, va_list * args)
{
  return format_l2_in_out_feat_arc_trace (s,
					  IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP,
					  args);
}


#define foreach_l2_in_feat_arc_error                   \
_(DEFAULT, "in default")                         \


#define foreach_l2_out_feat_arc_error                   \
_(DEFAULT, "out default")                         \


typedef enum
{
#define _(sym,str) L2_IN_FEAT_ARC_ERROR_##sym,
  foreach_l2_in_feat_arc_error
#undef _
    L2_IN_FEAT_ARC_N_ERROR,
} l2_in_feat_arc_error_t;

static char *l2_in_feat_arc_error_strings[] = {
#define _(sym,string) string,
  foreach_l2_in_feat_arc_error
#undef _
};

typedef enum
{
#define _(sym,str) L2_OUT_FEAT_ARC_ERROR_##sym,
  foreach_l2_out_feat_arc_error
#undef _
    L2_OUT_FEAT_ARC_N_ERROR,
} l2_out_feat_arc_error_t;

static char *l2_out_feat_arc_error_strings[] = {
#define _(sym,string) string,
  foreach_l2_out_feat_arc_error
#undef _
};

extern l2_in_out_feat_arc_main_t l2_in_out_feat_arc_main;

#ifndef CLIB_MARCH_VARIANT
l2_in_out_feat_arc_main_t l2_in_out_feat_arc_main;
#endif /* CLIB_MARCH_VARIANT */

#define get_u16(addr) ( *((u16 *)(addr)) )
#define L2_FEAT_ARC_VEC_SIZE 2

static_always_inline void
buffer_prefetch_xN (int vector_sz, vlib_buffer_t ** b)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    CLIB_PREFETCH (b[ii], CLIB_CACHE_LINE_BYTES, STORE);
}

static_always_inline void
get_sw_if_index_xN (int vector_sz, int is_output, vlib_buffer_t ** b,
		    u32 * out_sw_if_index)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    if (is_output)
      out_sw_if_index[ii] = vnet_buffer (b[ii])->sw_if_index[VLIB_TX];
    else
      out_sw_if_index[ii] = vnet_buffer (b[ii])->sw_if_index[VLIB_RX];
}

static_always_inline void
get_ethertype_xN (int vector_sz, int is_output, vlib_buffer_t ** b,
		  u16 * out_ethertype)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    {
      ethernet_header_t *h0 = vlib_buffer_get_current (b[ii]);
      u8 *l3h0 = (u8 *) h0 + vnet_buffer (b[ii])->l2.l2_len;
      out_ethertype[ii] = clib_net_to_host_u16 (get_u16 (l3h0 - 2));
    }
}


static_always_inline void
set_next_in_arc_head_xN (int vector_sz, int is_output, u32 * next_nodes,
			 vlib_buffer_t ** b, u32 * sw_if_index,
			 u16 * ethertype, u8 ip4_arc, u8 ip6_arc,
			 u8 nonip_arc, u16 * out_next)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    {
      u32 next_index = 0;
      u8 feature_arc;
      switch (ethertype[ii])
	{
	case ETHERNET_TYPE_IP4:
	  feature_arc = ip4_arc;
	  break;
	case ETHERNET_TYPE_IP6:
	  feature_arc = ip6_arc;
	  break;
	default:
	  feature_arc = nonip_arc;
	}
      if (PREDICT_TRUE (vnet_have_features (feature_arc, sw_if_index[ii])))
	vnet_feature_arc_start (feature_arc,
				sw_if_index[ii], &next_index, b[ii]);
      else
	next_index =
	  vnet_l2_feature_next (b[ii], next_nodes,
				is_output ? L2OUTPUT_FEAT_OUTPUT_FEAT_ARC :
				L2INPUT_FEAT_INPUT_FEAT_ARC);

      out_next[ii] = next_index;
    }
}

static_always_inline void
set_next_in_arc_tail_xN (int vector_sz, int is_output, u32 * next_nodes,
			 vlib_buffer_t ** b, u16 * out_next)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    {
      out_next[ii] =
	vnet_l2_feature_next (b[ii], next_nodes,
			      is_output ? L2OUTPUT_FEAT_OUTPUT_FEAT_ARC :
			      L2INPUT_FEAT_INPUT_FEAT_ARC);
    }

}


static_always_inline void
maybe_trace_xN (int vector_sz, int arc_head, vlib_main_t * vm,
		vlib_node_runtime_t * node, vlib_buffer_t ** b,
		u32 * sw_if_index, u16 * ethertype, u16 * next)
{
  int ii;
  for (ii = 0; ii < vector_sz; ii++)
    if (PREDICT_FALSE (b[ii]->flags & VLIB_BUFFER_IS_TRACED))
      {
	l2_in_out_feat_arc_trace_t *t =
	  vlib_add_trace (vm, node, b[ii], sizeof (*t));
	t->arc_head = arc_head;
	t->sw_if_index = arc_head ? sw_if_index[ii] : ~0;
	t->feature_bitmap = vnet_buffer (b[ii])->l2.feature_bitmap;
	t->ethertype = arc_head ? ethertype[ii] : 0;
	t->next_index = next[ii];
      }
}

always_inline uword
l2_in_out_feat_arc_node_fn (vlib_main_t * vm,
			    vlib_node_runtime_t * node, vlib_frame_t * frame,
			    int is_output, vlib_node_registration_t * fa_node,
			    int arc_head, int do_trace)
{
  u32 n_left, *from;
  u16 nexts[VLIB_FRAME_SIZE], *next;
  u16 ethertypes[VLIB_FRAME_SIZE], *ethertype;
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
  u32 sw_if_indices[VLIB_FRAME_SIZE], *sw_if_index;
  l2_in_out_feat_arc_main_t *fam = &l2_in_out_feat_arc_main;

  u8 ip4_arc_index = fam->ip4_feat_arc_index[is_output];
  u8 ip6_arc_index = fam->ip6_feat_arc_index[is_output];
  u8 nonip_arc_index = fam->nonip_feat_arc_index[is_output];
  u32 *next_node_indices = fam->feat_next_node_index[is_output];

  from = vlib_frame_vector_args (frame);
  vlib_get_buffers (vm, from, bufs, frame->n_vectors);
  /* set the initial values for the current buffer the next pointers */
  b = bufs;
  next = nexts;
  ethertype = ethertypes;
  sw_if_index = sw_if_indices;
  n_left = frame->n_vectors;

  CLIB_PREFETCH (next_node_indices, 2 * CLIB_CACHE_LINE_BYTES, LOAD);

  while (n_left > 3 * L2_FEAT_ARC_VEC_SIZE)
    {
      const int vec_sz = L2_FEAT_ARC_VEC_SIZE;
      /* prefetch next N buffers */
      buffer_prefetch_xN (vec_sz, b + 2 * vec_sz);

      if (arc_head)
	{
	  get_sw_if_index_xN (vec_sz, is_output, b, sw_if_index);
	  get_ethertype_xN (vec_sz, is_output, b, ethertype);
	  set_next_in_arc_head_xN (vec_sz, is_output, next_node_indices, b,
				   sw_if_index, ethertype, ip4_arc_index,
				   ip6_arc_index, nonip_arc_index, next);
	}
      else
	{
	  set_next_in_arc_tail_xN (vec_sz, is_output, next_node_indices, b,
				   next);
	}
      if (do_trace)
	maybe_trace_xN (vec_sz, arc_head, vm, node, b, sw_if_index, ethertype,
			next);

      next += vec_sz;
      b += vec_sz;
      sw_if_index += vec_sz;
      ethertype += vec_sz;

      n_left -= vec_sz;
    }

  while (n_left > 0)
    {
      const int vec_sz = 1;

      if (arc_head)
	{
	  get_sw_if_index_xN (vec_sz, is_output, b, sw_if_index);
	  get_ethertype_xN (vec_sz, is_output, b, ethertype);
	  set_next_in_arc_head_xN (vec_sz, is_output, next_node_indices, b,
				   sw_if_index, ethertype, ip4_arc_index,
				   ip6_arc_index, nonip_arc_index, next);
	}
      else
	{
	  set_next_in_arc_tail_xN (vec_sz, is_output, next_node_indices, b,
				   next);
	}
      if (do_trace)
	maybe_trace_xN (vec_sz, arc_head, vm, node, b, sw_if_index, ethertype,
			next);

      next += vec_sz;
      b += vec_sz;
      sw_if_index += vec_sz;
      ethertype += vec_sz;

      n_left -= vec_sz;
    }

  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);

  return frame->n_vectors;
}

VLIB_NODE_FN (l2_in_feat_arc_node) (vlib_main_t * vm,
				    vlib_node_runtime_t * node,
				    vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP,
				       &l2_in_feat_arc_node, 1, 1);
  else
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP,
				       &l2_in_feat_arc_node, 1, 0);
}

VLIB_NODE_FN (l2_out_feat_arc_node) (vlib_main_t * vm,
				     vlib_node_runtime_t * node,
				     vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP,
				       &l2_out_feat_arc_node, 1, 1);
  else
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP,
				       &l2_out_feat_arc_node, 1, 0);
}

VLIB_NODE_FN (l2_in_feat_arc_end_node) (vlib_main_t * vm,
					vlib_node_runtime_t * node,
					vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP,
				       &l2_in_feat_arc_end_node, 0, 1);
  else
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP,
				       &l2_in_feat_arc_end_node, 0, 0);
}

VLIB_NODE_FN (l2_out_feat_arc_end_node) (vlib_main_t * vm,
					 vlib_node_runtime_t * node,
					 vlib_frame_t * frame)
{
  if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP,
				       &l2_out_feat_arc_end_node, 0, 1);
  else
    return l2_in_out_feat_arc_node_fn (vm, node, frame,
				       IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP,
				       &l2_out_feat_arc_end_node, 0, 0);
}


#ifndef CLIB_MARCH_VARIANT
void
vnet_l2_in_out_feat_arc_enable_disable (u32 sw_if_index, int is_output,
					int enable_disable)
{
  if (is_output)
    l2output_intf_bitmap_enable (sw_if_index, L2OUTPUT_FEAT_OUTPUT_FEAT_ARC,
				 (u32) enable_disable);
  else
    l2input_intf_bitmap_enable (sw_if_index, L2INPUT_FEAT_INPUT_FEAT_ARC,
				(u32) enable_disable);
}
#endif /* CLIB_MARCH_VARIANT */

/* *INDENT-OFF* */
VNET_FEATURE_ARC_INIT (l2_in_ip4_arc, static) =
{
  .arc_name  = "l2-input-ip4",
  .start_nodes = VNET_FEATURES ("l2-input-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.ip4_feat_arc_index[IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP],
};

VNET_FEATURE_ARC_INIT (l2_out_ip4_arc, static) =
{
  .arc_name  = "l2-output-ip4",
  .start_nodes = VNET_FEATURES ("l2-output-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.ip4_feat_arc_index[IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP],
};

VNET_FEATURE_ARC_INIT (l2_out_ip6_arc, static) =
{
  .arc_name  = "l2-input-ip6",
  .start_nodes = VNET_FEATURES ("l2-input-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.ip6_feat_arc_index[IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP],
};
VNET_FEATURE_ARC_INIT (l2_in_ip6_arc, static) =
{
  .arc_name  = "l2-output-ip6",
  .start_nodes = VNET_FEATURES ("l2-output-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.ip6_feat_arc_index[IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP],
};

VNET_FEATURE_ARC_INIT (l2_out_nonip_arc, static) =
{
  .arc_name  = "l2-input-nonip",
  .start_nodes = VNET_FEATURES ("l2-input-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.nonip_feat_arc_index[IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP],
};
VNET_FEATURE_ARC_INIT (l2_in_nonip_arc, static) =
{
  .arc_name  = "l2-output-nonip",
  .start_nodes = VNET_FEATURES ("l2-output-feat-arc"),
  .arc_index_ptr = &l2_in_out_feat_arc_main.nonip_feat_arc_index[IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP],
};


/* *INDENT-ON* */


/* *INDENT-OFF* */
VLIB_REGISTER_NODE (l2_in_feat_arc_node) = {
  .name = "l2-input-feat-arc",
  .vector_size = sizeof (u32),
  .format_trace = format_l2_in_feat_arc_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(l2_in_feat_arc_error_strings),
  .error_strings = l2_in_feat_arc_error_strings,

};

VLIB_REGISTER_NODE (l2_out_feat_arc_node) = {
  .name = "l2-output-feat-arc",
  .vector_size = sizeof (u32),
  .format_trace = format_l2_out_feat_arc_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,

  .n_errors = ARRAY_LEN(l2_out_feat_arc_error_strings),
  .error_strings = l2_out_feat_arc_error_strings,

};

VLIB_REGISTER_NODE (l2_in_feat_arc_end_node) = {
  .name = "l2-input-feat-arc-end",
  .vector_size = sizeof (u32),
  .format_trace = format_l2_in_feat_arc_trace,
  .sibling_of = "l2-input-feat-arc",
};

VLIB_REGISTER_NODE (l2_out_feat_arc_end_node) = {
  .name = "l2-output-feat-arc-end",
  .vector_size = sizeof (u32),
  .format_trace = format_l2_out_feat_arc_trace,
  .sibling_of = "l2-output-feat-arc",
};

VNET_FEATURE_INIT (l2_in_ip4_arc_end, static) =
{
  .arc_name = "l2-input-ip4",
  .node_name = "l2-input-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};

VNET_FEATURE_INIT (l2_out_ip4_arc_end, static) =
{
  .arc_name = "l2-output-ip4",
  .node_name = "l2-output-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};

VNET_FEATURE_INIT (l2_in_ip6_arc_end, static) =
{
  .arc_name = "l2-input-ip6",
  .node_name = "l2-input-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};


VNET_FEATURE_INIT (l2_out_ip6_arc_end, static) =
{
  .arc_name = "l2-output-ip6",
  .node_name = "l2-output-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};

VNET_FEATURE_INIT (l2_in_nonip_arc_end, static) =
{
  .arc_name = "l2-input-nonip",
  .node_name = "l2-input-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};


VNET_FEATURE_INIT (l2_out_nonip_arc_end, static) =
{
  .arc_name = "l2-output-nonip",
  .node_name = "l2-output-feat-arc-end",
  .runs_before = 0,     /* not before any other features */
};
/* *INDENT-ON* */


#ifndef CLIB_MARCH_VARIANT
clib_error_t *
l2_in_out_feat_arc_init (vlib_main_t * vm)
{
  l2_in_out_feat_arc_main_t *mp = &l2_in_out_feat_arc_main;

  mp->vlib_main = vm;
  mp->vnet_main = vnet_get_main ();

  /* Initialize the feature next-node indexes */
  feat_bitmap_init_next_nodes (vm,
			       l2_in_feat_arc_end_node.index,
			       L2INPUT_N_FEAT,
			       l2input_get_feat_names (),
			       mp->feat_next_node_index
			       [IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP]);
  feat_bitmap_init_next_nodes (vm, l2_out_feat_arc_end_node.index,
			       L2OUTPUT_N_FEAT, l2output_get_feat_names (),
			       mp->feat_next_node_index
			       [IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP]);
  return 0;
}


static int
l2_has_features (u32 sw_if_index, int is_output)
{
  int has_features = 0;
  l2_in_out_feat_arc_main_t *mp = &l2_in_out_feat_arc_main;
  has_features +=
    vnet_have_features (mp->ip4_feat_arc_index[is_output], sw_if_index);
  has_features +=
    vnet_have_features (mp->ip6_feat_arc_index[is_output], sw_if_index);
  has_features +=
    vnet_have_features (mp->nonip_feat_arc_index[is_output], sw_if_index);
  return has_features > 0;
}

static int
l2_is_output_arc (u8 arc_index)
{
  l2_in_out_feat_arc_main_t *mp = &l2_in_out_feat_arc_main;
  int idx = IN_OUT_FEAT_ARC_OUTPUT_TABLE_GROUP;
  return (mp->ip4_feat_arc_index[idx] == arc_index
	  || mp->ip6_feat_arc_index[idx] == arc_index
	  || mp->nonip_feat_arc_index[idx] == arc_index);
}

static int
l2_is_input_arc (u8 arc_index)
{
  l2_in_out_feat_arc_main_t *mp = &l2_in_out_feat_arc_main;
  int idx = IN_OUT_FEAT_ARC_INPUT_TABLE_GROUP;
  return (mp->ip4_feat_arc_index[idx] == arc_index
	  || mp->ip6_feat_arc_index[idx] == arc_index
	  || mp->nonip_feat_arc_index[idx] == arc_index);
}

int
vnet_l2_feature_enable_disable (const char *arc_name, const char *node_name,
				u32 sw_if_index, int enable_disable,
				void *feature_config,
				u32 n_feature_config_bytes)
{
  u8 arc_index = vnet_get_feature_arc_index (arc_name);
  if (arc_index == (u8) ~ 0)
    return VNET_API_ERROR_INVALID_VALUE;

  /* check the state before we tried to enable/disable */
  int had_features = vnet_have_features (arc_index, sw_if_index);

  int ret = vnet_feature_enable_disable (arc_name, node_name, sw_if_index,
					 enable_disable, feature_config,
					 n_feature_config_bytes);
  if (ret)
    return ret;

  int has_features = vnet_have_features (arc_index, sw_if_index);

  if (had_features != has_features)
    {
      if (l2_is_output_arc (arc_index))
	{
	  vnet_l2_in_out_feat_arc_enable_disable (sw_if_index, 1,
						  l2_has_features
						  (sw_if_index, 1));
	}
      if (l2_is_input_arc (arc_index))
	{
	  vnet_l2_in_out_feat_arc_enable_disable (sw_if_index, 0,
						  l2_has_features
						  (sw_if_index, 0));
	}
    }
  return 0;
}


VLIB_INIT_FUNCTION (l2_in_out_feat_arc_init);
#endif /* CLIB_MARCH_VARIANT */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */