/* * decap.c: vxlan tunnel decap packet processing * * Copyright (c) 2013 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #ifndef CLIB_MARCH_VARIANT vlib_node_registration_t vxlan4_input_node; vlib_node_registration_t vxlan6_input_node; #endif typedef struct { u32 next_index; u32 tunnel_index; u32 error; u32 vni; } vxlan_rx_trace_t; static u8 * format_vxlan_rx_trace (u8 * s, va_list * args) { CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *); if (t->tunnel_index == ~0) return format (s, "VXLAN decap error - tunnel for vni %d does not exist", t->vni); return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d", t->tunnel_index, t->vni, t->next_index, t->error); } always_inline u32 buf_fib_index (vlib_buffer_t * b, u32 is_ip4) { u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX]; if (sw_if_index != (u32) ~ 0) return sw_if_index; u32 *fib_index_by_sw_if_index = is_ip4 ? ip4_main.fib_index_by_sw_if_index : ip6_main.fib_index_by_sw_if_index; sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX]; return vec_elt (fib_index_by_sw_if_index, sw_if_index); } typedef vxlan4_tunnel_key_t last_tunnel_cache4; always_inline vxlan_tunnel_t * vxlan4_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache4 * cache, u32 fib_index, ip4_header_t * ip4_0, vxlan_header_t * vxlan0, vxlan_tunnel_t ** stats_t0) { /* Make sure VXLAN tunnel exist according to packet SIP and VNI */ vxlan4_tunnel_key_t key4; key4.key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved; if (PREDICT_FALSE (key4.key[1] != cache->key[1] || ip4_0->src_address.as_u32 != (u32) cache->key[0])) { key4.key[0] = ip4_0->src_address.as_u32; int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4); if (PREDICT_FALSE (rv != 0)) { *stats_t0 = 0; return 0; } *cache = key4; } vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); /* Validate VXLAN tunnel SIP against packet DIP */ if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32)) *stats_t0 = t0; else { /* try multicast */ if (PREDICT_TRUE (!ip4_address_is_multicast (&ip4_0->dst_address))) { *stats_t0 = 0; return 0; } key4.key[0] = ip4_0->dst_address.as_u32; /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */ int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4); if (PREDICT_FALSE (rv != 0)) { *stats_t0 = 0; return 0; } *stats_t0 = pool_elt_at_index (vxm->tunnels, key4.value); } return t0; } typedef vxlan6_tunnel_key_t last_tunnel_cache6; always_inline vxlan_tunnel_t * vxlan6_find_tunnel (vxlan_main_t * vxm, last_tunnel_cache6 * cache, u32 fib_index, ip6_header_t * ip6_0, vxlan_header_t * vxlan0, vxlan_tunnel_t ** stats_t0) { /* Make sure VXLAN tunnel exist according to packet SIP and VNI */ vxlan6_tunnel_key_t key6 = { .key = { [0] = ip6_0->src_address.as_u64[0], [1] = ip6_0->src_address.as_u64[1], [2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved, } }; if (PREDICT_FALSE (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0)) { int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6); if (PREDICT_FALSE (rv != 0)) { *stats_t0 = 0; return 0; } *cache = key6; } vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value); /* Validate VXLAN tunnel SIP against packet DIP */ if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6))) *stats_t0 = t0; else { /* try multicast */ if (PREDICT_TRUE (!ip6_address_is_multicast (&ip6_0->dst_address))) { *stats_t0 = 0; return 0; } /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */ key6.key[0] = ip6_0->dst_address.as_u64[0]; key6.key[1] = ip6_0->dst_address.as_u64[1]; int rv = clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6); if (PREDICT_FALSE (rv != 0)) { *stats_t0 = 0; return 0; } *stats_t0 = pool_elt_at_index (vxm->tunnels, key6.value); } return t0; } always_inline uword vxlan_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, u32 is_ip4) { vxlan_main_t *vxm = &vxlan_main; vnet_main_t *vnm = vxm->vnet_main; vnet_interface_main_t *im = &vnm->interface_main; vlib_combined_counter_main_t *rx_counter = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX; vlib_combined_counter_main_t *drop_counter = im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP; last_tunnel_cache4 last4; last_tunnel_cache6 last6; u32 pkts_dropped = 0; u32 thread_index = vlib_get_thread_index (); if (is_ip4) memset (&last4, 0xff, sizeof last4); else memset (&last6, 0xff, sizeof last6); u32 *from = vlib_frame_vector_args (from_frame); u32 n_left_from = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; vlib_get_buffers (vm, from, bufs, n_left_from); u16 nexts[VLIB_FRAME_SIZE], *next = nexts; while (n_left_from >= 4) { /* Prefetch next iteration. */ vlib_prefetch_buffer_header (b[2], LOAD); vlib_prefetch_buffer_header (b[3], LOAD); /* udp leaves current_data pointing at the vxlan header */ void *cur0 = vlib_buffer_get_current (b[0]); void *cur1 = vlib_buffer_get_current (b[1]); vxlan_header_t *vxlan0 = cur0; vxlan_header_t *vxlan1 = cur1; u8 error0 = vxlan0->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0; u8 error1 = vxlan1->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0; ip4_header_t *ip4_0, *ip4_1; ip6_header_t *ip6_0, *ip6_1; if (is_ip4) { ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t); ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t); } else { ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t); ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t); } /* pop vxlan */ vlib_buffer_advance (b[0], sizeof *vxlan0); vlib_buffer_advance (b[1], sizeof *vxlan1); u32 fi0 = buf_fib_index (b[0], is_ip4); u32 fi1 = buf_fib_index (b[1], is_ip4); vxlan_tunnel_t *t0, *stats_t0; vxlan_tunnel_t *t1, *stats_t1; if (is_ip4) { t0 = vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_t0); t1 = vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_t1); } else { t0 = vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_t0); t1 = vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_t1); } error0 = t0 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error0; error1 = t1 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error1; /* Prefetch next iteration. */ CLIB_PREFETCH (b[2]->data, CLIB_CACHE_LINE_BYTES, LOAD); CLIB_PREFETCH (b[3]->data, CLIB_CACHE_LINE_BYTES, LOAD); u32 len0 = vlib_buffer_length_in_chain (vm, b[0]); u32 len1 = vlib_buffer_length_in_chain (vm, b[1]); /* Validate VXLAN tunnel encap-fib index against packet */ if (PREDICT_FALSE (error0 != 0)) { next[0] = VXLAN_INPUT_NEXT_DROP; if (error0 == VXLAN_ERROR_BAD_FLAGS) { vlib_increment_combined_counter (drop_counter, thread_index, stats_t0->sw_if_index, 1, len0); } b[0]->error = node->errors[error0]; pkts_dropped++; } else { next[0] = t0->decap_next_index; /* Required to make the l2 tag push / pop code work on l2 subifs */ if (PREDICT_TRUE (next[0] == VXLAN_INPUT_NEXT_L2_INPUT)) vnet_update_l2_len (b[0]); /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = t0->sw_if_index; vlib_increment_combined_counter (rx_counter, thread_index, stats_t0->sw_if_index, 1, len0); } /* Validate VXLAN tunnel encap-fib index against packet */ if (PREDICT_FALSE (error1 != 0)) { next[1] = VXLAN_INPUT_NEXT_DROP; if (error1 == VXLAN_ERROR_BAD_FLAGS) { vlib_increment_combined_counter (drop_counter, thread_index, stats_t1->sw_if_index, 1, len1); } b[1]->error = node->errors[error1]; pkts_dropped++; } else { next[1] = t1->decap_next_index; /* Required to make the l2 tag push / pop code work on l2 subifs */ if (PREDICT_TRUE (next[1] == VXLAN_INPUT_NEXT_L2_INPUT)) vnet_update_l2_len (b[1]); /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */ vnet_buffer (b[1])->sw_if_index[VLIB_RX] = t1->sw_if_index; vlib_increment_combined_counter (rx_counter, thread_index, stats_t1->sw_if_index, 1, len1); } if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { vxlan_rx_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr)); tr->next_index = next[0]; tr->error = error0; tr->tunnel_index = t0 == 0 ? ~0 : t0 - vxm->tunnels; tr->vni = vnet_get_vni (vxlan0); } if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED)) { vxlan_rx_trace_t *tr = vlib_add_trace (vm, node, b[1], sizeof (*tr)); tr->next_index = next[1]; tr->error = error1; tr->tunnel_index = t1 == 0 ? ~0 : t1 - vxm->tunnels; tr->vni = vnet_get_vni (vxlan1); } b += 2; next += 2; n_left_from -= 2; } while (n_left_from > 0) { /* udp leaves current_data pointing at the vxlan header */ void *cur0 = vlib_buffer_get_current (b[0]); vxlan_header_t *vxlan0 = cur0; u8 error0 = vxlan0->flags != VXLAN_FLAGS_I ? VXLAN_ERROR_BAD_FLAGS : 0; ip4_header_t *ip4_0; ip6_header_t *ip6_0; if (is_ip4) ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t); else ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t); /* pop (ip, udp, vxlan) */ vlib_buffer_advance (b[0], sizeof (*vxlan0)); u32 fi0 = buf_fib_index (b[0], is_ip4); vxlan_tunnel_t *t0, *stats_t0; if (is_ip4) t0 = vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_t0); else t0 = vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_t0); error0 = t0 == 0 ? VXLAN_ERROR_NO_SUCH_TUNNEL : error0; uword len0 = vlib_buffer_length_in_chain (vm, b[0]); /* Validate VXLAN tunnel encap-fib index against packet */ if (PREDICT_FALSE (error0 != 0)) { next[0] = VXLAN_INPUT_NEXT_DROP; if (error0 == VXLAN_ERROR_BAD_FLAGS) { vlib_increment_combined_counter (drop_counter, thread_index, stats_t0->sw_if_index, 1, len0); } b[0]->error = node->errors[error0]; pkts_dropped++; } else { next[0] = t0->decap_next_index; /* Required to make the l2 tag push / pop code work on l2 subifs */ if (PREDICT_TRUE (next[0] == VXLAN_INPUT_NEXT_L2_INPUT)) vnet_update_l2_len (b[0]); /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = t0->sw_i
/*
 * Copyright (c) 2020 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <stdio.h>
#include <assert.h>
#include <vlibapi/api.h>
#include "vat2/test/vat2_test.api_types.h"
#include "vat2/test/vat2_test.api_tojson.h"
#include "vat2/test/vat2_test.api_fromjson.h"

typedef cJSON *(* tojson_fn_t)(void *);
typedef void *(* fromjson_fn_t)(cJSON *o, int *len);

static void
test (tojson_fn_t tojson, fromjson_fn_t fromjson, cJSON *o, bool should_fail)
{
  // convert JSON object to API
  int len = 0;
  void *mp = (fromjson)(o, &len);
  assert(mp);

  // convert API to JSON
  cJSON *o2 = (tojson)(mp);
  assert(o2);

  if (should_fail)
    assert(!cJSON_Compare(o, o2, 1));
  else
    assert(cJSON_Compare(o, o2, 1));
  char *s2 = cJSON_Print(o2);
  assert(s2);

  char *in = cJSON_Print(o);
  printf("%s\n%s\n", in, s2);

  free(in);
  free(mp);
  cJSON_Delete(o2);
  free(s2);
}

struct msgs {
  char *name;
  tojson_fn_t tojson;
  fromjson_fn_t fromjson;
};
struct tests {
  char *s;
  bool should_fail;
};

uword *function_by_name_tojson;
uword *function_by_name_fromjson;
static void
register_functions(struct msgs msgs[], int n)
{
  int i;
  function_by_name_tojson = hash_create_string (0, sizeof (uword));
  function_by_name_fromjson = hash_create_string (0, sizeof (uword));
  for (i = 0; i < n; i++) {
    hash_set_mem(function_by_name_tojson, msgs[i].name, msgs[i].tojson);
    hash_set_mem(function_by_name_fromjson, msgs[i].name, msgs[i].fromjson);
  }
}

static void
runtest (char *s, bool should_fail)
{
  cJSON *o = cJSON_Parse(s);
  assert(o);
  char *name = cJSON_GetStringValue(cJSON_GetObjectItem(o, "_msgname"));
  assert(name);

  uword *p = hash_get_mem(function_by_name_tojson, name);
  printf ("Message name: %s\n", name);
  assert(p);
  tojson_fn_t tojson = (tojson_fn_t)p[0];

  p = hash_get_mem(function_by_name_fromjson, name);
  assert(p);
  fromjson_fn_t fromjson = (fromjson_fn_t)p[0];

  test(tojson, fromjson, o, should_fail);
  cJSON_Delete(o);
}

struct msgs msgs[] = {
  {
    .name = "test_prefix",
    .tojson = (tojson_fn_t) vl_api_test_prefix_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_prefix_t_fromjson,
  },
  {
    .name = "test_enum",
    .tojson = (tojson_fn_t) vl_api_test_enum_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_enum_t_fromjson,
  },
  {
    .name = "test_string",
    .tojson = (tojson_fn_t) vl_api_test_string_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_string_t_fromjson,
  },
  {
    .name = "test_string2",
    .tojson = (tojson_fn_t) vl_api_test_string2_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_string2_t_fromjson,
  },
  {
    .name = "test_vla",
    .tojson = (tojson_fn_t) vl_api_test_vla_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_vla_t_fromjson,
  },
  {
    .name = "test_vla2",
    .tojson = (tojson_fn_t) vl_api_test_vla2_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_vla2_t_fromjson,
  },
  {
    .name = "test_vla3",
    .tojson = (tojson_fn_t) vl_api_test_vla3_t_tojson,
    .fromjson = (fromjson_fn_t) vl_api_test_vla3_t_fromjson,
  },
  {
    .name =