aboutsummaryrefslogtreecommitdiffstats
path: root/src/tests/vnet/lisp-cp
diff options
context:
space:
mode:
authorFilip Tehlar <ftehlar@cisco.com>2017-04-26 16:09:06 +0200
committerFlorin Coras <florin.coras@gmail.com>2017-05-03 15:19:23 +0000
commit816f437d943688f67d61fb6b9708eff59432b2ee (patch)
tree565bddb5ad81b00eae739c4438ecb67042dd4218 /src/tests/vnet/lisp-cp
parentafc47aa36f44d3f865c6e1e48f41eded366a85ac (diff)
Fix vnet unit tests
Change-Id: Ibe55e4399c6b78d83268d7c49ed498cab7bfdb43 Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
Diffstat (limited to 'src/tests/vnet/lisp-cp')
-rw-r--r--src/tests/vnet/lisp-cp/test_cp_serdes.c51
-rw-r--r--src/tests/vnet/lisp-cp/test_lisp_types.c45
2 files changed, 82 insertions, 14 deletions
diff --git a/src/tests/vnet/lisp-cp/test_cp_serdes.c b/src/tests/vnet/lisp-cp/test_cp_serdes.c
index 0766bee11ad..8e8c8455f7d 100644
--- a/src/tests/vnet/lisp-cp/test_cp_serdes.c
+++ b/src/tests/vnet/lisp-cp/test_cp_serdes.c
@@ -21,9 +21,6 @@
#include <vlibapi/api.h>
#include <vnet/lisp-cp/packets.h>
-/* FIXME */
-#include <vlibapi/api_helper_macros.h>
-
#define _assert(e) \
error = CLIB_ERROR_ASSERT (e); \
if (error) \
@@ -489,6 +486,53 @@ done:
return error;
}
+static vlib_buffer_t *
+create_buffer (u8 * data, u32 data_len)
+{
+ vlib_buffer_t *b;
+
+ u8 *buf_data = clib_mem_alloc(500);
+ memset (buf_data, 0, 500);
+ b = (vlib_buffer_t *)buf_data;
+
+ u8 * p = vlib_buffer_put_uninit (b, data_len);
+ clib_memcpy (p, data, data_len);
+
+ return b;
+}
+
+static clib_error_t *
+test_lisp_parse_map_reply ()
+{
+ clib_error_t * error = 0;
+ u8 map_reply_data[] =
+ {
+ 0x00, 0x00, 0x00, 0x01, /* type; rsvd; mapping count */
+ 0x00, 0x00, 0x00, 0x00,
+ };
+ vlib_buffer_t *b = create_buffer (map_reply_data, sizeof (map_reply_data));
+ map_records_arg_t *mrecs = parse_map_reply (b);
+ _assert (0 == mrecs);
+ clib_mem_free (b);
+
+ u8 map_reply_data2[] =
+ {
+ 0x00, 0x00, 0x00, 0x01, /* type; rsvd */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, /* nonce */
+
+ /* 1. record - incomplete */
+ 0x01, 0x02, 0x03, 0x04, /* record TTL */
+ 0x01, /* locator count */
+ };
+ b = create_buffer (map_reply_data2, sizeof (map_reply_data2));
+ mrecs = parse_map_reply (b);
+ _assert (0 == mrecs);
+done:
+ clib_mem_free (b);
+ return error;
+}
+
static clib_error_t *
test_lisp_parse_lcaf ()
{
@@ -610,6 +654,7 @@ done:
_(lisp_msg_push_ecm) \
_(lisp_msg_parse) \
_(lisp_msg_parse_mapping_record) \
+ _(lisp_parse_map_reply) \
_(lisp_parse_lcaf) \
_(lisp_map_register)
diff --git a/src/tests/vnet/lisp-cp/test_lisp_types.c b/src/tests/vnet/lisp-cp/test_lisp_types.c
index fa34a3c6200..21575015d7f 100644
--- a/src/tests/vnet/lisp-cp/test_lisp_types.c
+++ b/src/tests/vnet/lisp-cp/test_lisp_types.c
@@ -18,9 +18,6 @@
#include <vnet/lisp-cp/lisp_types.h>
#include <vnet/lisp-cp/lisp_cp_messages.h>
-/* FIXME */
-#include <vlibapi/api_helper_macros.h>
-
#define _assert(e) \
error = CLIB_ERROR_ASSERT (e); \
if (error) \
@@ -265,7 +262,6 @@ done:
}
#endif
-#if 0 /* uncomment this once VNI is supported */
static clib_error_t * test_write_mac_in_lcaf (void)
{
clib_error_t * error = 0;
@@ -276,13 +272,12 @@ static clib_error_t * test_write_mac_in_lcaf (void)
gid_address_t g =
{
.mac = {0x1, 0x2, 0x3, 0x4, 0x5, 0x6},
- .vni = 0x30,
+ .vni = 0x01020304,
.vni_mask = 0x10,
.type = GID_ADDR_MAC,
};
u16 len = gid_address_put (b, &g);
- _assert (8 == len);
u8 expected[] =
{
@@ -290,20 +285,20 @@ static clib_error_t * test_write_mac_in_lcaf (void)
0x00, /* reserved1 */
0x00, /* flags */
0x02, /* LCAF type = Instance ID */
- 0x20, /* IID/VNI mask len */
- 0x00, 0x0a, /* length */
+ 0x10, /* IID/IID mask len */
+ 0x00, 0x0c, /* length */
0x01, 0x02, 0x03, 0x04, /* Instance ID / VNI */
- 0x00, 0x06, /* AFI = MAC */
+ 0x40, 0x05, /* AFI = MAC */
0x01, 0x02, 0x03, 0x04,
0x05, 0x06 /* MAC */
- }
+ };
+ _assert (sizeof (expected) == len);
_assert (0 == memcmp (expected, b, len));
done:
clib_mem_free (b);
return error;
}
-#endif
static clib_error_t * test_mac_address_write (void)
{
@@ -418,6 +413,32 @@ done:
}
static clib_error_t *
+test_src_dst_deser_bad_afi (void)
+{
+ clib_error_t * error = 0;
+
+ u8 expected_data[] =
+ {
+ 0x40, 0x03, 0x00, 0x00, /* AFI = LCAF, reserved1, flags */
+ 0x0c, 0x00, 0x00, 0x14, /* LCAF type = source/dest key, rsvd, length */
+ 0x00, 0x00, 0x00, 0x00, /* reserved; source-ML, Dest-ML */
+
+ 0xde, 0xad, /* AFI = bad value */
+ 0x11, 0x22, 0x33, 0x44,
+ 0x55, 0x66, /* source */
+
+ 0x40, 0x05, /* AFI = MAC */
+ 0x10, 0x21, 0x32, 0x43,
+ 0x54, 0x65, /* destination */
+ };
+
+ gid_address_t p;
+ _assert (~0 == gid_address_parse (expected_data, &p));
+done:
+ return error;
+}
+
+static clib_error_t *
test_src_dst_serdes (void)
{
clib_error_t * error = 0;
@@ -537,6 +558,8 @@ done:
_(mac_address_write) \
_(gid_address_write) \
_(src_dst_serdes) \
+ _(write_mac_in_lcaf) \
+ _(src_dst_deser_bad_afi) \
_(src_dst_with_vni_serdes)
int run_tests (void)
r: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */
/*
 * Copyright (c) 2018 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
/**
 * @file
 * @brief NAT44 worker handoff
 */

#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/handoff.h>
#include <vnet/fib/ip4_fib.h>
#include <vppinfra/error.h>
#include <nat/nat.h>
#include <nat/nat_inlines.h>

typedef struct
{
  u32 next_worker_index;
  u32 trace_index;
  u8 in2out;
  u8 output;
} nat44_handoff_trace_t;

static char *nat44_handoff_error_strings[] = {
#define _(sym,string) string,
  foreach_nat44_handoff_error
#undef _
};

static u8 *
format_nat44_handoff_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  nat44_handoff_trace_t *t = va_arg (*args, nat44_handoff_trace_t *);
  char *tag, *output;

  tag = t->in2out ? "IN2OUT" : "OUT2IN";
  output = t->output ? "OUTPUT-FEATURE" : "";
  s =
    format (s, "NAT44_%s_WORKER_HANDOFF %s: next-worker %d trace index %d",
	    tag, output, t->next_worker_index, t->trace_index);

  return s;
}

static inline uword
nat44_worker_handoff_fn_inline (vlib_main_t * vm,
				vlib_node_runtime_t * node,
				vlib_frame_t * frame, u8 is_output,
				u8 is_in2out)
{
  u32 n_enq, n_left_from, *from, do_handoff = 0, same_worker = 0;

  u16 thread_indices[VLIB_FRAME_SIZE], *ti = thread_indices;
  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
  snat_main_t *sm = &snat_main;

  u32 fq_index, thread_index = vm->thread_index;

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;

  vlib_get_buffers (vm, from, b, n_left_from);

  if (is_in2out)
    {
      fq_index = is_output ? sm->fq_in2out_output_index : sm->fq_in2out_index;
    }
  else
    {
      fq_index = sm->fq_out2in_index;
    }

  while (n_left_from >= 4)
    {
      u32 arc_next0, arc_next1, arc_next2, arc_next3;
      u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
      u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3;
      u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0;
      ip4_header_t *ip0, *ip1, *ip2, *ip3;

      if (PREDICT_TRUE (n_left_from >= 8))
	{
	  vlib_prefetch_buffer_header (b[4], LOAD);
	  vlib_prefetch_buffer_header (b[5], LOAD);
	  vlib_prefetch_buffer_header (b[6], LOAD);
	  vlib_prefetch_buffer_header (b[7], LOAD);
	  CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, LOAD);
	  CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, LOAD);
	  CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, LOAD);
	  CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, LOAD);
	}

      if (is_output)
	{
	  iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;
	  iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length;
	  iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length;
	  iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length;
	}

      ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
			      iph_offset0);
      ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) +
			      iph_offset1);
      ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) +
			      iph_offset2);
      ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) +
			      iph_offset3);

      vnet_feature_next (&arc_next0, b[0]);
      vnet_feature_next (&arc_next1, b[1]);
      vnet_feature_next (&arc_next2, b[2]);
      vnet_feature_next (&arc_next3, b[3]);

      vnet_buffer2 (b[0])->nat.arc_next = arc_next0;
      vnet_buffer2 (b[1])->nat.arc_next = arc_next1;
      vnet_buffer2 (b[2])->nat.arc_next = arc_next2;
      vnet_buffer2 (b[3])->nat.arc_next = arc_next3;

      sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
      sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
      sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
      sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];

      rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);
      rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1);
      rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2);
      rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3);

      if (is_in2out)
	{
	  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
	  ti[1] = sm->worker_in2out_cb (ip1, rx_fib_index1, is_output);
	  ti[2] = sm->worker_in2out_cb (ip2, rx_fib_index2, is_output);
	  ti[3] = sm->worker_in2out_cb (ip3, rx_fib_index3, is_output);
	}
      else
	{
	  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
	  ti[1] = sm->worker_out2in_cb (b[1], ip1, rx_fib_index1, is_output);
	  ti[2] = sm->worker_out2in_cb (b[2], ip2, rx_fib_index2, is_output);
	  ti[3] = sm->worker_out2in_cb (b[3], ip3, rx_fib_index3, is_output);
	}

      if (ti[0] == thread_index)
	same_worker++;
      else
	do_handoff++;

      if (ti[1] == thread_index)
	same_worker++;
      else
	do_handoff++;

      if (ti[2] == thread_index)
	same_worker++;
      else
	do_handoff++;

      if (ti[3] == thread_index)
	same_worker++;
      else
	do_handoff++;

      b += 4;
      ti += 4;
      n_left_from -= 4;
    }

  while (n_left_from > 0)
    {
      u32 arc_next0;
      u32 sw_if_index0;
      u32 rx_fib_index0;
      u32 iph_offset0 = 0;
      ip4_header_t *ip0;


      if (is_output)
	iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length;

      ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) +
			      iph_offset0);

      vnet_feature_next (&arc_next0, b[0]);
      vnet_buffer2 (b[0])->nat.arc_next = arc_next0;

      sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
      rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0);

      if (is_in2out)
	{
	  ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output);
	}
      else
	{
	  ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output);
	}

      if (ti[0] == thread_index)
	same_worker++;
      else
	do_handoff++;

      b += 1;
      ti += 1;
      n_left_from -= 1;
    }

  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
    {
      u32 i;
      b = bufs;
      ti = thread_indices;

      for (i = 0; i < frame->n_vectors; i++)
	{
	  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
	    {
	      nat44_handoff_trace_t *t =
		vlib_add_trace (vm, node, b[0], sizeof (*t));
	      t->next_worker_index = ti[0];
	      t->trace_index = vlib_buffer_get_trace_index (b[0]);
	      t->in2out = is_in2out;
	      t->output = is_output;

	      b += 1;
	      ti += 1;
	    }
	  else
	    break;
	}
    }

  n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
					 frame->n_vectors, 1);

  if (n_enq < frame->n_vectors)
    {
      vlib_node_increment_counter (vm, node->node_index,
				   NAT44_HANDOFF_ERROR_CONGESTION_DROP,
				   frame->n_vectors - n_enq);
    }

  vlib_node_increment_counter (vm, node->node_index,
			       NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker);
  vlib_node_increment_counter (vm, node->node_index,
			       NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff);
  return frame->n_vectors;
}

VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm,
						vlib_node_runtime_t * node,
						vlib_frame_t * frame)
{
  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = {
  .name = "nat44-in2out-worker-handoff",
  .vector_size = sizeof (u32),
  .sibling_of = "nat-default",
  .format_trace = format_nat44_handoff_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,
  .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
  .error_strings = nat44_handoff_error_strings,
};
/* *INDENT-ON* */

VLIB_NODE_FN (snat_in2out_output_worker_handoff_node) (vlib_main_t * vm,
						       vlib_node_runtime_t *
						       node,
						       vlib_frame_t * frame)
{
  return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node) = {
  .name = "nat44-in2out-output-worker-handoff",
  .vector_size = sizeof (u32),
  .sibling_of = "nat-default",
  .format_trace = format_nat44_handoff_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,
  .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
  .error_strings = nat44_handoff_error_strings,
};
/* *INDENT-ON* */

VLIB_NODE_FN (snat_out2in_worker_handoff_node) (vlib_main_t * vm,
						vlib_node_runtime_t * node,
						vlib_frame_t * frame)
{
  return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0);
}

/* *INDENT-OFF* */
VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node) = {
  .name = "nat44-out2in-worker-handoff",
  .vector_size = sizeof (u32),
  .sibling_of = "nat-default",
  .format_trace = format_nat44_handoff_trace,
  .type = VLIB_NODE_TYPE_INTERNAL,
  .n_errors = ARRAY_LEN(nat44_handoff_error_strings),
  .error_strings = nat44_handoff_error_strings,
};
/* *INDENT-ON* */

/*
 * fd.io coding-style-patch-verification: ON
 *
 * Local Variables:
 * eval: (c-set-style "gnu")
 * End:
 */