/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * main.c: main vector processing loop * * Copyright (c) 2008 Eliot Dresselhaus * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include #include #include #include #include CJ_GLOBAL_LOG_PROTOTYPE; /* Actually allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_ALLOC (VLIB_FRAME_SIZE + 4) u32 wraps; always_inline u32 vlib_frame_bytes (u32 n_scalar_bytes, u32 n_vector_bytes) { u32 n_bytes; /* Make room for vlib_frame_t plus scalar arguments. */ n_bytes = vlib_frame_vector_byte_offset (n_scalar_bytes); /* Make room for vector arguments. Allocate a few extra slots of vector data to support speculative vector enqueues which overflow vector data in next frame. */ #define VLIB_FRAME_SIZE_EXTRA 4 n_bytes += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * n_vector_bytes; /* Magic number is first 32bit number after vector data. Used to make sure that vector data is never overrun. */ #define VLIB_FRAME_MAGIC (0xabadc0ed) n_bytes += sizeof (u32); /* Pad to cache line. */ n_bytes = round_pow2 (n_bytes, CLIB_CACHE_LINE_BYTES); return n_bytes; } always_inline u32 * vlib_frame_find_magic (vlib_frame_t * f, vlib_node_t * node) { void *p = f; p += vlib_frame_vector_byte_offset (node->scalar_size); p += (VLIB_FRAME_SIZE + VLIB_FRAME_SIZE_EXTRA) * node->vector_size; return p; } static vlib_frame_size_t * get_frame_size_info (vlib_node_main_t * nm, u32 n_scalar_bytes, u32 n_vector_bytes) { uword key = (n_scalar_bytes << 16) | n_vector_bytes; uword *p, i; p = hash_get (nm->frame_size_hash, key); if (p) i = p[0]; else { i = vec_len (nm->frame_sizes); vec_validate (nm->frame_sizes, i); hash_set (nm->frame_size_hash, key, i); } return vec_elt_at_index (nm->frame_sizes, i); } static u32 vlib_frame_alloc_to_node (vlib_main_t * vm, u32 to_node_index, u32 frame_flags) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_node_t *to_node; vlib_frame_t *f; u32 fi, l, n, scalar_size, vector_size; to_node = vlib_get_node (vm, to_node_index); scalar_size = to_node->scalar_size; vector_size = to_node->vector_size; fs = get_frame_size_info (nm, scalar_size, vector_size); n = vlib_frame_bytes (scalar_size, vector_size); if ((l = vec_len (fs->free_frame_indices)) > 0) { /* Allocate from end of free list. */ fi = fs->free_frame_indices[l - 1]; f = vlib_get_frame_no_check (vm, fi); _vec_len (fs->free_frame_indices) = l - 1; } else { f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN); f->cpu_index = vm->cpu_index; fi = vlib_frame_index_no_check (vm, f); } /* Poison frame when debugging. */ if (CLIB_DEBUG > 0) { u32 save_cpu_index = f->cpu_index; memset (f, 0xfe, n); f->cpu_index = save_cpu_index; } /* Insert magic number. */ { u32 *magic; magic = vlib_frame_find_magic (f, to_node); *magic = VLIB_FRAME_MAGIC; } f->flags = VLIB_FRAME_IS_ALLOCATED | frame_flags; f->n_vectors = 0; f->scalar_size = scalar_size; f->vector_size = vector_size; fs->n_alloc_frames += 1; return fi; } /* Allocate a frame for from FROM_NODE to TO_NODE via TO_NEXT_INDEX. Returns frame index. */ static u32 vlib_frame_alloc (vlib_main_t * vm, vlib_node_runtime_t * from_node_runtime, u32 to_next_index) { vlib_node_t *from_node; from_node = vlib_get_node (vm, from_node_runtime->node_index); ASSERT (to_next_index < vec_len (from_node->next_nodes)); return vlib_frame_alloc_to_node (vm, from_node->next_nodes[to_next_index], /* frame_flags */ 0); } vlib_frame_t * vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index) { u32 fi = vlib_frame_alloc_to_node (vm, to_node_index, /* frame_flags */ VLIB_FRAME_FREE_AFTER_DISPATCH); return vlib_get_frame (vm, fi); } void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f) { vlib_pending_frame_t *p; vlib_node_t *to_node; if (f->n_vectors == 0) return; to_node = vlib_get_node (vm, to_node_index); vec_add2 (vm->node_main.pending_frames, p, 1); f->flags |= VLIB_FRAME_PENDING; p->frame_index = vlib_frame_index (vm, f); p->node_runtime_index = to_node->runtime_index; p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME; } /* Free given frame. */ void vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *node; vlib_frame_size_t *fs; u32 frame_index; ASSERT (f->flags & VLIB_FRAME_IS_ALLOCATED); node = vlib_get_node (vm, r->node_index); fs = get_frame_size_info (nm, node->scalar_size, node->vector_size); frame_index = vlib_frame_index (vm, f); ASSERT (f->flags & VLIB_FRAME_IS_ALLOCATED); /* No next frames may point to freed frame. */ if (CLIB_DEBUG > 0) { vlib_next_frame_t *nf; vec_foreach (nf, vm->node_main.next_frames) ASSERT (nf->frame_index != frame_index); } f->flags &= ~VLIB_FRAME_IS_ALLOCATED; vec_add1 (fs->free_frame_indices, frame_index); ASSERT (fs->n_alloc_frames > 0); fs->n_alloc_frames -= 1; } static clib_error_t * show_frame_stats (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { vlib_node_main_t *nm = &vm->node_main; vlib_frame_size_t *fs; vlib_cli_output (vm, "%=6s%=12s%=12s", "Size", "# Alloc", "# Free"); vec_foreach (fs, nm->frame_sizes) { u32 n_alloc = fs->n_alloc_frames; u32 n_free = vec_len (fs->free_frame_indices); if (n_alloc + n_free > 0) vlib_cli_output (vm, "%=6d%=12d%=12d", fs - nm->frame_sizes, n_alloc, n_free); } return 0; } /* *INDENT-OFF* */ VLIB_CLI_COMMAND (show_frame_stats_cli, static) = { .path = "show vlib frame-allocation", .short_help = "Show node dispatch frame statistics", .function = show_frame_stats, }; /* *INDENT-ON* */ /* Change ownership of enqueue rights to given next node. */ static void vlib_next_frame_change_ownership (vlib_main_t * vm, vlib_node_runtime_t * node_runtime, u32 next_index) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *next_frame; vlib_node_t *node, *next_node; node = vec_elt (nm->nodes, node_runtime->node_index); /* Only internal & input nodes are allowed to call other nodes. */ ASSERT (node->type == VLIB_NODE_TYPE_INTERNAL || node->type == VLIB_NODE_TYPE_INPUT || node->type == VLIB_NODE_TYPE_PROCESS); ASSERT (vec_len (node->next_nodes) == node_runtime->n_next_nodes); next_frame = vlib_node_runtime_get_next_frame (vm, node_runtime, next_index); next_node = vec_elt (nm->nodes, node->next_nodes[next_index]); if (next_node->owner_node_index != VLIB_INVALID_NODE_INDEX) { /* Get frame from previous owner. */ vlib_next_frame_t *owner_next_frame; vlib_next_frame_t tmp; owner_next_frame = vlib_node_get_next_frame (vm, next_node->owner_node_index, next_node->owner_next_index); /* Swap target next frame with owner's. */ tmp = owner_next_frame[0]; owner_next_frame[0] = next_frame[0]; next_frame[0] = tmp; /* * If next_frame is already pending, we have to track down * all pending frames and fix their next_frame_index fields. */ if (next_frame->flags & VLIB_FRAME_PENDING) { vlib_pending_frame_t *p; if (next_frame->frame_index != ~0) { vec_foreach (p, nm->pending_frames) { if (p->frame_index == next_frame->frame_index) { p->next_frame_index = next_frame - vm->node_main.next_frames; } } } } } else { /* No previous owner. Take ownership. */ next_frame->flags |= VLIB_FRAME_OWNER; } /* Record new owner. */ next_node->owner_node_index = node->index; next_node->owner_next_index = next_index; /* Now we should be owner. */ ASSERT (next_frame->flags & VLIB_FRAME_OWNER); } /* Make sure that magic number is still there. Otherwise, it is likely that caller has overrun frame arguments. */ always_inline void validate_frame_magic (vlib_main_t * vm, vlib_frame_t * f, vlib_node_t * n, uword next_index) { vlib_node_t *next_node = vlib_get_node (vm, n->next_nodes[next_index]); u32 *magic = vlib_frame_find_magic (f, next_node); ASSERT (VLIB_FRAME_MAGIC == magic[0]); } vlib_frame_t * vlib_get_next_frame_internal (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next_index, u32 allocate_new_next_frame) { vlib_frame_t *f; vlib_next_frame_t *nf; u32 n_used; nf = vlib_node_runtime_get_next_frame (vm, node, next_index); /* Make sure this next frame owns right to enqueue to destination frame. */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_OWNER))) vlib_next_frame_change_ownership (vm, node, next_index); /* ??? Don't need valid flag: can use frame_index == ~0 */ if (PREDICT_FALSE (!(nf->flags & VLIB_FRAME_IS_ALLOCATED))) { nf->frame_index = vlib_frame_alloc (vm, node, next_index); nf->flags |= VLIB_FRAME_IS_ALLOCATED; } f = vlib_get_frame (vm, nf->frame_index); /* Has frame been removed from pending vector (e.g. finished dispatching)? If so we can reuse frame. */ if ((nf->flags & VLIB_FRAME_PENDING) && !(f->flags & VLIB_FRAME_PENDING)) { nf->flags &= ~VLIB_FRAME_PENDING; f->n_vectors = 0; } /* Allocate new frame if current one is already full. */ n_used = f->n_vectors; if (n_used >= VLIB_FRAME_SIZE || (allocate_new_next_frame && n_used > 0)) { /* Old frame may need to be freed after dispatch, since we'll have two redundant frames from node -> next node. */ if (!(nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH)) { vlib_frame_t *f_old = vlib_get_frame (vm, nf->frame_index); f_old->flags |= VLIB_FRAME_FREE_AFTER_DISPATCH; } /* Allocate new frame to replace full one. */ nf->frame_index = vlib_frame_alloc (vm, node, next_index); f = vlib_get_frame (vm, nf->frame_index); n_used = f->n_vectors; } /* Should have free vectors in frame now. */ ASSERT (n_used < VLIB_FRAME_SIZE); if (CLIB_DEBUG > 0) { validate_frame_magic (vm, f, vlib_get_node (vm, node->node_index), next_index); } return f; } static void vlib_put_next_frame_validate (vlib_main_t * vm, vlib_node_runtime_t * rt, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; vlib_node_runtime_t *next_rt; vlib_node_t *next_node; u32 n_before, n_after; nf = vlib_node_runtime_get_next_frame (vm, rt, next_index); f = vlib_get_frame (vm, nf->frame_index); ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_after = VLIB_FRAME_SIZE - n_vectors_left; n_before = f->n_vectors; ASSERT (n_after >= n_before); next_rt = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL], nf->node_runtime_index); next_node = vlib_get_node (vm, next_rt->node_index); if (n_after > 0 && next_node->validate_frame) { u8 *msg = next_node->validate_frame (vm, rt, f); if (msg) { clib_warning ("%v", msg); ASSERT (0); } vec_free (msg); } } void vlib_put_next_frame (vlib_main_t * vm, vlib_node_runtime_t * r, u32 next_index, u32 n_vectors_left) { vlib_node_main_t *nm = &vm->node_main; vlib_next_frame_t *nf; vlib_frame_t *f; u32 n_vectors_in_frame; if (vm->buffer_main->extern_buffer_mgmt == 0 && CLIB_DEBUG > 0) vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left); nf = vlib_node_runtime_get_next_frame (vm, r, next_index); f = vlib_get_frame (vm, nf->frame_index); /* Make sure that magic number is still there. Otherwise, caller has overrun frame meta data. */ if (CLIB_DEBUG > 0) { vlib_node_t *node = vlib_get_node (vm, r->node_index); validate_frame_magic (vm, f, node, next_index); } /* Convert # of vectors left -> number of vectors there. */ ASSERT (n_vectors_left <= VLIB_FRAME_SIZE); n_vectors_in_frame = VLIB_FRAME_SIZE - n_vectors_left; f->n_vectors = n_vectors_in_frame; /* If vectors were added to frame, add to pending vector. */ if (PREDICT_TRUE (n_vectors_in_frame > 0)) { vlib_pending_frame_t *p; u32 v0, v1; r->cached_next_index = next_index; if (!(f->flags & VLIB_FRAME_PENDING)) { __attribute__ ((unused)) vlib_node_t *node; vlib_node_t *next_node; vlib_node_runtime_t *next_runtime; node = vlib_get_node (vm, r->node_index); next_node = vlib_get_next_node (vm, r->node_index, next_index); next_runtime = vlib_node_get_runtime (vm, next_node->index); vec_add2 (nm->pending_frames, p, 1); p->frame_index = nf->frame_index; p->node_runtime_index = nf->node_runtime_index; p->next_frame_index = nf - nm->next_frames; nf->flags |= VLIB_FRAME_PENDING; f->flags |= VLIB_FRAME_PENDING; /* * If we're going to dispatch this frame on another thread, * force allocation of a new frame. Otherwise, we create * a dangling frame reference. Each thread has its own copy of * the next_frames vector. */ if (0 && r->cpu_index != next_runtime->cpu_index) { nf->frame_index = ~0; nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED); } } /* Copy trace flag from next_frame and from runtime. */ nf->flags |= (nf->flags & VLIB_NODE_FLAG_TRACE) | (r-> flags & VLIB_NODE_FLAG_TRACE); v0 = nf->vectors_since_last_overflow; v1 = v0 + n_vectors_in_frame; nf->vectors_since_last_overflow = v1; if (PREDICT_FALSE (v1 < v0)) { vlib_node_t *node = vlib_get_node (vm, r->node_index); vec_elt (node->n_vectors_by_next_node, next_index) += v0; } } } /* Sync up runtime (32 bit counters) and main node stats (64 bit counters). */ never_inline void vlib_node_runtime_sync_stats (vlib_main_t * vm, vlib_node_runtime_t * r, uword n_calls, uword n_vectors, uword n_clocks) { vlib_node_t *n = vlib_get_node (vm, r->node_index); n->stats_total.calls += n_calls + r->calls_since_last_overflow; n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow; n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow; n->stats_total.max_clock = r->max_clock; n->stats_total.max_clock_n = r->max_clock_n; r->calls_since_last_overflow = 0; r->vectors_since_last_overflow = 0; r->clocks_since_last_overflow = 0; } always_inline void __attribute__ ((unused)) vlib_process_sync_stats (vlib_main_t * vm, vlib_process_t * p, uword n_calls, uword n_vectors, uword n_clocks) { vlib_node_runtime_t *rt = &p->node_runtime; vlib_node_t *n = vlib_get_node (vm, rt->node_index); vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks); n->stats_total.suspends += p->n_suspends; p->n_suspends = 0; } void vlib_node_sync_stats (vlib_main_t * vm, vlib_node_t * n) { vlib_node_runtime_t *rt; if (n->type == VLIB_NODE_TYPE_PROCESS) { /* Nothing to do for PROCESS nodes except in main thread */ if (vm != &vlib_global_main) return; vlib_process_t *p = vlib_get_process_from_node (vm, n); n->stats_total.suspends += p->n_suspends; p->n_suspends = 0; rt = &p->node_runtime; } else rt = vec_elt_at_index (vm->node_main.nodes_by_type[n->type], n->runtime_index); vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0); /* Sync up runtime next frame vector counters with main node structure. */ { vlib_next_frame_t *nf; uword i; for (i = 0; i < rt->n_next_nodes; i++) { nf = vlib_node_runtime_get_next_frame (vm, rt, i); vec_elt (n->n_vectors_by_next_node, i) += nf->vectors_since_last_overflow; nf->vectors_since_last_overflow = 0; } } } always_inline u32 vlib_node_runtime_update_stats (vlib_main_t * vm, vlib_node_runtime_t * node, uword n_calls, uword n_vectors, uword n_clocks) { u32 ca0, ca1, v0, v1, cl0, cl1, r; cl0 = cl1 = node->clocks_since_last_overflow; ca0 = ca1 = node->calls_since_last_overflow; v0 = v1 = node->vectors_since_last_overflow; ca1 = ca0 + n_calls; v1 = v0 + n_vectors; cl1 = cl0 + n_clocks; node->calls_since_last_overflow = ca1; node->clocks_since_last_overflow = cl1; node->vectors_since_last_overflow = v1; node->max_clock_n = node->max_clock > n_clocks ? node->max_clock_n : n_vectors; node->max_clock = node->max_clock > n_clocks ? node->max_clock : n_clocks; r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors); if (PREDICT_FALSE (ca1 < ca0 || v1 < v
/*
 * node.c - skeleton vpp engine plug-in dual-loop node skeleton
 *
 * Copyright (c) <current-year> <your-organization>
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
#include <vppinfra/error.h>
#include <mactime/mactime.h>
#include <vnet/ip/ip4.h>

typedef struct
{
  u32 next_index;
  u32 device_index;
  u8 src_mac[6];
  u8 device_name[64];
} mactime_trace_t;

vlib_node_registration_t mactime_node;
vlib_node_registration_t mactime_tx_node;

#define foreach_mactime_error                   \
_(OK, "Permitted packets")			\
_(STATIC_DROP, "Static drop packets")           \
_(RANGE_DROP, "Range drop packets")             \
_(QUOTA_DROP, "Data quota drop packets")	\
_(DROP_10001, "Dropped UDP DST-port 10001")

typedef enum
{
#define _(sym,str) MACTIME_ERROR_##sym,
  foreach_mactime_error
#undef _
    MACTIME_N_ERROR,
} mactime_error_t;

static char *mactime_error_strings[] = {
#define _(sym,string) string,
  foreach_mactime_error
#undef _
};

typedef enum
{
  MACTIME_NEXT_DROP,
  MACTIME_NEXT_ETHERNET_INPUT,
  MACTIME_N_NEXT,
} mactime_next_t;

/* packet trace format function */
static u8 *
format_mactime_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  mactime_trace_t *t = va_arg (*args, mactime_trace_t *);

  s = format (s, "MACTIME: src mac %U device %s result %s\n",
	      format_mac_address, t->src_mac,
	      (t->device_index != ~0) ? t->device_name : (u8 *) "unknown",
	      t->next_index == MACTIME_NEXT_DROP ? "drop" : "pass");
  return s;
}

static uword
mactime_node_inline (vlib_main_t * vm,
		     vlib_node_runtime_t * node, vlib_frame_t * frame,
		     int is_tx)
{
  u32 n_left_from, *from, *to_next;
  mactime_next_t next_index;
  mactime_main_t *mm = &mactime_main;
  mactime_device_t *dp;
  clib_bihash_kv_8_8_t kv;
  clib_bihash_8_8_t *lut = &mm->lookup_table;
  u32 packets_ok = 0;
  f64 now;
  u32 thread_index = vm->thread_index;
  vnet_main_t *vnm = vnet_get_main ();
  vnet_interface_main_t *im = &vnm->interface_main;
  u8 arc = im->output_feature_arc_index;
  vnet_feature_config_main_t *fcm;

  if (is_tx)
    fcm = vnet_feature_get_config_main (arc);

  now = clib_timebase_now (&mm->timebase);

  if (PREDICT_FALSE ((now - mm->sunday_midnight) > 86400.0 * 7.0))
    mm->sunday_midnight = clib_timebase_find_sunday_midnight (now);

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);

      while (n_left_from > 0 && n_left_to_next > 0)
	{
	  u32 bi0;
	  vlib_buffer_t *b0;
	  u32 next0;
	  u32 device_index0;
	  u32 len0;
	  ethernet_header_t *en0;
	  int has_dynamic_range_allow = 0;
	  int i;

	  /* speculatively enqueue b0 to the current next frame */
	  bi0 = from[0];
	  to_next[0] = bi0;
	  from += 1;
	  to_next += 1;
	  n_left_from -= 1;
	  n_left_to_next -= 1;

	  b0 = vlib_get_buffer (vm, bi0);

	  /* Set next0 to e.g. interface-tx */
	  if (is_tx)
	    vnet_get_config_data (&fcm->config_main,
				  &b0->current_config_index, &next0,
				  /* # bytes of config data */ 0);
	  else
	    next0 = MACTIME_NEXT_ETHERNET_INPUT;

	  vlib_buffer_advance (b0, -(word) vnet_buffer (b0)->l2_hdr_offset);

	  len0 = vlib_buffer_length_in_chain (vm, b0);
	  en0 = vlib_buffer_get_current (b0);
	  kv.key = 0;
	  if (is_tx)
	    clib_memcpy_fast (&kv.key, en0->dst_address, 6);
	  else
	    clib_memcpy_fast (&kv.key, en0->src_address, 6);

	  /* Lookup the src/dst mac address */
	  if (clib_bihash_search_8_8 (lut, &kv, &kv) < 0)
	    {
	      /* Create a table entry... */
	      mactime_send_create_entry_message
		(is_tx ? en0->dst_address : en0->src_address);

	      /* and let this packet pass */
	      device_index0 = ~0;
	      dp = 0;
	      packets_ok++;
	      goto trace0;
	    }
	  else
	    device_index0 = kv.value;

	  dp = pool_elt_at_index (mm->devices, device_index0);

	  /* Known device, check for an always-on traffic quota */
	  if ((dp->flags & MACTIME_DEVICE_FLAG_DYNAMIC_ALLOW)
	      && PREDICT_FALSE (dp->data_quota))
	    {
	      vlib_counter_t device_current_count;
	      vlib_get_combined_counter (&