summaryrefslogtreecommitdiffstats
path: root/src/vlib/physmem_funcs.h
blob: d93bae8fbe5eed132ed73e446ed5d6c7a7855d0a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/*
 * physmem.h: virtual <-> physical memory mapping for VLIB buffers
 *
 * Copyright (c) 2008 Eliot Dresselhaus
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be
 * included in all copies or substantial portions of the Software.
 *
 *  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 *  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 *  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 *  NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
 *  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 *  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 *  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 */

#ifndef included_vlib_physmem_funcs_h
#define included_vlib_physmem_funcs_h

#include <vppinfra/clib.h>
#include <vppinfra/clib_error.h>
#include <vlib/physmem.h>
#include <vlib/main.h>

clib_error_t *vlib_physmem_init (vlib_main_t * vm);
clib_error_t *vlib_physmem_shared_map_create (vlib_main_t * vm, char *name,
					      uword size, u32 log2_page_sz,
					      u32 numa_node, u32 * map_index);

vlib_physmem_map_t *vlib_physmem_get_map (vlib_main_t * vm, u32 index);

always_inline void *
vlib_physmem_alloc_aligned (vlib_main_t * vm, uword n_bytes, uword alignment)
{
  clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
  return clib_pmalloc_alloc_aligned (pm, n_bytes, alignment);
}

always_inline void *
vlib_physmem_alloc_aligned_on_numa (vlib_main_t * vm, uword n_bytes,
				    uword alignment, u32 numa_node)
{
  clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
  return clib_pmalloc_alloc_aligned_on_numa (pm, n_bytes, alignment,
					     numa_node);
}

/* By default allocate I/O memory with cache line alignment. */
always_inline void *
vlib_physmem_alloc (vlib_main_t * vm, uword n_bytes)
{
  return vlib_physmem_alloc_aligned (vm, n_bytes, CLIB_CACHE_LINE_BYTES);
}

always_inline void *
vlib_physmem_alloc_from_map (vlib_main_t * vm, u32 physmem_map_index,
			     uword n_bytes, uword alignment)
{
  clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
  vlib_physmem_map_t *map = vlib_physmem_get_map (vm, physmem_map_index);
  return clib_pmalloc_alloc_from_arena (pm, map->base, n_bytes,
					CLIB_CACHE_LINE_BYTES);
}

always_inline void
vlib_physmem_free (vlib_main_t * vm, void *p)
{
  if (p)
    clib_pmalloc_free (vm->physmem_main.pmalloc_main, p);
}

always_inline u64
vlib_physmem_get_page_index (vlib_main_t * vm, void *mem)
{
  clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
  return clib_pmalloc_get_page_index (pm, mem);
}

always_inline u64
vlib_physmem_get_pa (vlib_main_t * vm, void *mem)
{
  clib_pmalloc_main_t *pm = vm->physmem_main.pmalloc_main;
  return clib_pmalloc_get_pa (pm, mem);
}

always_inline clib_error_t *
vlib_physmem_last_error (struct vlib_main_t * vm)
{
  return clib_error_return (0, "unknown error");
}

#endif /* included_vlib_physmem_funcs_h */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */
weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
/*
 * Copyright (c) 2015 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef __included_vnet_classify_h__
#define __included_vnet_classify_h__

#include <stdarg.h>

#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/ethernet/packet.h>
#include <vnet/ip/ip_packet.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vlib/cli.h>
#include <vnet/api_errno.h>	/* for API error numbers */

#include <vppinfra/error.h>
#include <vppinfra/hash.h>
#include <vppinfra/cache.h>
#include <vppinfra/xxhash.h>

extern vlib_node_registration_t ip4_classify_node;
extern vlib_node_registration_t ip6_classify_node;

#define CLASSIFY_TRACE 0

#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)

/*
 * Classify table option to process packets
 *  CLASSIFY_FLAG_USE_CURR_DATA:
 *   - classify packets starting from VPP node’s current data pointer
 */
#define CLASSIFY_FLAG_USE_CURR_DATA              1

/*
 * Classify session action
 *  CLASSIFY_ACTION_SET_IP4_FIB_INDEX:
 *   - Classified IP packets will be looked up
 *     from the specified ipv4 fib table
 *  CLASSIFY_ACTION_SET_IP6_FIB_INDEX:
 *   - Classified IP packets will be looked up
 *     from the specified ipv6 fib table
 */
typedef enum vnet_classify_action_t_
{
  CLASSIFY_ACTION_SET_IP4_FIB_INDEX = 1,
  CLASSIFY_ACTION_SET_IP6_FIB_INDEX = 2,
  CLASSIFY_ACTION_SET_METADATA = 3,
} __attribute__ ((packed)) vnet_classify_action_t;

struct _vnet_classify_main;
typedef struct _vnet_classify_main vnet_classify_main_t;

#define foreach_size_in_u32x4                   \
_(1)                                            \
_(2)                                            \
_(3)                                            \
_(4)                                            \
_(5)

/* *INDENT-OFF* */
typedef CLIB_PACKED(struct _vnet_classify_entry {
  /* Graph node next index */
  u32 next_index;

  /* put into vnet_buffer(b)->l2_classfy.opaque_index */
  union {
    struct {
      u32 opaque_index;
      /* advance on hit, note it's a signed quantity... */
      i32 advance;
    };
    u64 opaque_count;
  };

  /* Really only need 1 bit */
  u8 flags;
#define VNET_CLASSIFY_ENTRY_FREE	(1<<0)

  vnet_classify_action_t action;
  u16 metadata;

  /* Hit counter, last heard time */
  union {
    u64 hits;
    struct _vnet_classify_entry * next_free;
  };

  f64 last_heard;

  /* Must be aligned to a 16-octet boundary */
  u32x4 key[0];
}) vnet_classify_entry_t;
/* *INDENT-ON* */

static inline int
vnet_classify_entry_is_free (vnet_classify_entry_t * e)
{
  return e->flags & VNET_CLASSIFY_ENTRY_FREE;
}

static inline int
vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
{
  return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
}

/* Need these to con the vector allocator */
/* *INDENT-OFF* */
#define _(size)                                 \
typedef CLIB_PACKED(struct {                    \
  u32 pad0[4];                                  \
  u64 pad1[2];                                  \
  u32x4 key[size];                              \
}) vnet_classify_entry_##size##_t;
foreach_size_in_u32x4;
/* *INDENT-ON* */
#undef _

typedef struct
{
  union
  {
    struct
    {
      u32 offset;
      u8 linear_search;
      u8 pad[2];
      u8 log2_pages;
    };
    u64 as_u64;
  };
} vnet_classify_bucket_t;

typedef struct
{
  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
  /* Mask to apply after skipping N vectors */
  u32x4 *mask;
  /* Buckets and entries */
  vnet_classify_bucket_t *buckets;
  vnet_classify_entry_t *entries;

  /* Config parameters */
  u32 match_n_vectors;
  u32 skip_n_vectors;
  u32 nbuckets;
  u32 log2_nbuckets;
  u32 linear_buckets;
  int entries_per_page;
  u32 active_elements;
  u32 current_data_flag;
  int current_data_offset;
  u32 data_offset;
  /* Index of next table to try */
  u32 next_table_index;

  /* Miss next index, return if next_table_index = 0 */
  u32 miss_next_index;

  /* Per-bucket working copies, one per thread */
  vnet_classify_entry_t **working_copies;
  int *working_copy_lengths;
  vnet_classify_bucket_t saved_bucket;

  /* Free entry freelists */
  vnet_classify_entry_t **freelists;

  u8 *name;

  /* Private allocation arena, protected by the writer lock */
  void *mheap;

  /* Writer (only) lock for this table */
  volatile u32 *writer_lock;

} vnet_classify_table_t;

struct _vnet_classify_main
{
  /* Table pool */
  vnet_classify_table_t *tables;

  /* Registered next-index, opaque unformat fcns */
  unformat_function_t **unformat_l2_next_index_fns;
  unformat_function_t **unformat_ip_next_index_fns;
  unformat_function_t **unformat_acl_next_index_fns;
  unformat_function_t **unformat_policer_next_index_fns;
  unformat_function_t **unformat_opaque_index_fns;

  /* convenience variables */
  vlib_main_t *vlib_main;
  vnet_main_t *vnet_main;
};

extern vnet_classify_main_t vnet_classify_main;

u8 *format_classify_table (u8 * s, va_list * args);

u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h);

static inline u64
vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
{
  u32x4 *mask;

  union
  {
    u32x4 as_u32x4;
    u64 as_u64[2];
  } xor_sum __attribute__ ((aligned (sizeof (u32x4))));

  ASSERT (t);
  mask = t->mask;
#ifdef CLIB_HAVE_VEC128
  if (U32X4_ALIGNED (h))
    {				//SSE can't handle unaligned data
      u32x4 *data = (u32x4 *) h;
      xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
      switch (t->match_n_vectors)
	{
	case 5:
	  xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
	  /* FALLTHROUGH */
	case 4:
	  xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
	  /* FALLTHROUGH */
	case 3:
	  xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
	  /* FALLTHROUGH */
	case 2:
	  xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
	  /* FALLTHROUGH */
	case 1:
	  break;
	default:
	  abort ();
	}
    }
  else
#endif /* CLIB_HAVE_VEC128 */
    {
      u32 skip_u64 = t->skip_n_vectors * 2;
      u64 *data64 = (u64 *) h;
      xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *) mask)[0];
      xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *) mask)[1];
      switch (t->match_n_vectors)
	{
	case 5:
	  xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *) mask)[8];
	  xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *) mask)[9];
	  /* FALLTHROUGH */
	case 4:
	  xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *) mask)[6];
	  xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *) mask)[7];
	  /* FALLTHROUGH */
	case 3:
	  xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *) mask)[4];
	  xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *) mask)[5];
	  /* FALLTHROUGH */
	case 2:
	  xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *) mask)[2];
	  xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *) mask)[3];
	  /* FALLTHROUGH */
	case 1:
	  break;

	default:
	  abort ();
	}
    }

  return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
}

static inline void
vnet_classify_prefetch_bucket (vnet_classify_table_t * t, u64 hash)
{
  u32 bucket_index;

  ASSERT (is_pow2 (t->nbuckets));

  bucket_index = hash & (t->nbuckets - 1);

  CLIB_PREFETCH (&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
}

static inline vnet_classify_entry_t *
vnet_classify_get_entry (vnet_classify_table_t * t, uword offset)
{
  u8 *hp = t->mheap;
  u8 *vp = hp + offset;

  return (void *) vp;
}

static inline uword
vnet_classify_get_offset (vnet_classify_table_t * t,
			  vnet_classify_entry_t * v)
{
  u8 *hp, *vp;

  hp = (u8 *) t->mheap;
  vp = (u8 *) v;

  ASSERT ((vp - hp) < 0x100000000ULL);
  return vp - hp;
}

static inline vnet_classify_entry_t *
vnet_classify_entry_at_index (vnet_classify_table_t * t,
			      vnet_classify_entry_t * e, u32 index)
{
  u8 *eu8;

  eu8 = (u8 *) e;

  eu8 += index * (sizeof (vnet_classify_entry_t) +
		  (t->match_n_vectors * sizeof (u32x4)));

  return (vnet_classify_entry_t *) eu8;
}

static inline void
vnet_classify_prefetch_entry (vnet_classify_table_t * t, u64 hash)
{
  u32 bucket_index;
  u32 value_index;
  vnet_classify_bucket_t *b;
  vnet_classify_entry_t *e;

  bucket_index = hash & (t->nbuckets - 1);

  b = &t->buckets[bucket_index];

  if (b->offset == 0)
    return;

  hash >>= t->log2_nbuckets;

  e = vnet_classify_get_entry (t, b->offset);
  value_index = hash & ((1 << b->log2_pages) - 1);

  e = vnet_classify_entry_at_index (t, e, value_index);

  CLIB_PREFETCH (e, CLIB_CACHE_LINE_BYTES, LOAD);
}

vnet_classify_entry_t *vnet_classify_find_entry (vnet_classify_table_t * t,
						 u8 * h, u64 hash, f64 now);

static inline vnet_classify_entry_t *
vnet_classify_find_entry_inline (vnet_classify_table_t * t,
				 u8 * h, u64 hash, f64 now)
{
  vnet_classify_entry_t *v;
  u32x4 *mask, *key;
  union
  {
    u32x4 as_u32x4;
    u64 as_u64[2];
  } result __attribute__ ((aligned (sizeof (u32x4))));
  vnet_classify_bucket_t *b;
  u32 value_index;
  u32 bucket_index;
  u32 limit;
  int i;

  bucket_index = hash & (t->nbuckets - 1);
  b = &t->buckets[bucket_index];
  mask = t->mask;

  if (b->offset == 0)
    return 0;

  hash >>= t->log2_nbuckets;

  v = vnet_classify_get_entry (t, b->offset);
  value_index = hash & ((1 << b->log2_pages) - 1);
  limit = t->entries_per_page;
  if (PREDICT_FALSE (b->linear_search))
    {
      value_index = 0;
      limit *= (1 << b->log2_pages);
    }

  v = vnet_classify_entry_at_index (t, v, value_index);

#ifdef CLIB_HAVE_VEC128
  if (U32X4_ALIGNED (h))
    {
      u32x4 *data = (u32x4 *) h;
      for (i = 0; i < limit; i++)
	{
	  key = v->key;
	  result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
	  switch (t->match_n_vectors)
	    {
	    case 5:
	      result.as_u32x4 |=
		(data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
	      /* FALLTHROUGH */
	    case 4:
	      result.as_u32x4 |=
		(data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
	      /* FALLTHROUGH */
	    case 3:
	      result.as_u32x4 |=
		(data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
	      /* FALLTHROUGH */
	    case 2:
	      result.as_u32x4 |=
		(data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
	      /* FALLTHROUGH */
	    case 1:
	      break;
	    default:
	      abort ();
	    }

	  if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff)
	    {
	      if (PREDICT_TRUE (now))
		{
		  v->hits++;
		  v->last_heard = now;
		}
	      return (v);
	    }
	  v = vnet_classify_entry_at_index (t, v, 1);
	}
    }
  else
#endif /* CLIB_HAVE_VEC128 */
    {
      u32 skip_u64 = t->skip_n_vectors * 2;
      u64 *data64 = (u64 *) h;
      for (i = 0; i < limit; i++)
	{
	  key = v->key;

	  result.as_u64[0] =
	    (data64[0 + skip_u64] & ((u64 *) mask)[0]) ^ ((u64 *) key)[0];
	  result.as_u64[1] =
	    (data64[1 + skip_u64] & ((u64 *) mask)[1]) ^ ((u64 *) key)[1];
	  switch (t->match_n_vectors)
	    {
	    case 5:
	      result.as_u64[0] |=
		(data64[8 + skip_u64] & ((u64 *) mask)[8]) ^ ((u64 *) key)[8];
	      result.as_u64[1] |=
		(data64[9 + skip_u64] & ((u64 *) mask)[9]) ^ ((u64 *) key)[9];
	      /* FALLTHROUGH */
	    case 4:
	      result.as_u64[0] |=
		(data64[6 + skip_u64] & ((u64 *) mask)[6]) ^ ((u64 *) key)[6];
	      result.as_u64[1] |=
		(data64[7 + skip_u64] & ((u64 *) mask)[7]) ^ ((u64 *) key)[7];
	      /* FALLTHROUGH */
	    case 3:
	      result.as_u64[0] |=
		(data64[4 + skip_u64] & ((u64 *) mask)[4]) ^ ((u64 *) key)[4];
	      result.as_u64[1] |=
		(data64[5 + skip_u64] & ((u64 *) mask)[5]) ^ ((u64 *) key)[5];
	      /* FALLTHROUGH */
	    case 2:
	      result.as_u64[0] |=
		(data64[2 + skip_u64] & ((u64 *) mask)[2]) ^ ((u64 *) key)[2];
	      result.as_u64[1] |=
		(data64[3 + skip_u64] & ((u64 *) mask)[3]) ^ ((u64 *) key)[3];
	      /* FALLTHROUGH */
	    case 1:
	      break;
	    default:
	      abort ();
	    }

	  if (result.as_u64[0] == 0 && result.as_u64[1] == 0)
	    {
	      if (PREDICT_TRUE (now))
		{
		  v->hits++;
		  v->last_heard = now;
		}
	      return (v);
	    }

	  v = vnet_classify_entry_at_index (t, v, 1);
	}
    }
  return 0;
}

vnet_classify_table_t *vnet_classify_new_table (vnet_classify_main_t * cm,
						u8 * mask, u32 nbuckets,
						u32 memory_size,
						u32 skip_n_vectors,
						u32 match_n_vectors);

int vnet_classify_add_del_session (vnet_classify_main_t * cm,
				   u32 table_index,
				   u8 * match,
				   u32 hit_next_index,
				   u32 opaque_index,
				   i32 advance,
				   u8 action, u32 metadata, int is_add);

int vnet_classify_add_del_table (vnet_classify_main_t * cm,
				 u8 * mask,
				 u32 nbuckets,
				 u32 memory_size,
				 u32 skip,
				 u32 match,
				 u32 next_table_index,
				 u32 miss_next_index,
				 u32 * table_index,
				 u8 current_data_flag,
				 i16 current_data_offset,
				 int is_add, int del_chain);

unformat_function_t unformat_ip4_mask;
unformat_function_t unformat_ip6_mask;
unformat_function_t unformat_l3_mask;
unformat_function_t unformat_l2_mask;
unformat_function_t unformat_classify_mask;
unformat_function_t unformat_l2_next_index;
unformat_function_t unformat_ip_next_index;
unformat_function_t unformat_ip4_match;
unformat_function_t unformat_ip6_match;
unformat_function_t unformat_l3_match;
unformat_function_t unformat_l4_match;
unformat_function_t unformat_vlan_tag;
unformat_function_t unformat_l2_match;
unformat_function_t unformat_classify_match;

void vnet_classify_register_unformat_ip_next_index_fn
  (unformat_function_t * fn);

void vnet_classify_register_unformat_l2_next_index_fn
  (unformat_function_t * fn);

void vnet_classify_register_unformat_acl_next_index_fn
  (unformat_function_t * fn);

void vnet_classify_register_unformat_policer_next_index_fn
  (unformat_function_t * fn);

void vnet_classify_register_unformat_opaque_index_fn (unformat_function_t *
						      fn);

#endif /* __included_vnet_classify_h__ */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */