/* * Copyright (c) 2018 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include /** * @file * @brief VXLAN GBP. * * VXLAN GBP provides the features of vxlan and carry group policy id. */ static vlib_punt_hdl_t punt_hdl; vxlan_gbp_main_t vxlan_gbp_main; u8 * format_vxlan_gbp_tunnel_mode (u8 * s, va_list * args) { vxlan_gbp_tunnel_mode_t mode = va_arg (*args, vxlan_gbp_tunnel_mode_t); switch (mode) { case VXLAN_GBP_TUNNEL_MODE_L2: s = format (s, "L2"); break; case VXLAN_GBP_TUNNEL_MODE_L3: s = format (s, "L3"); break; } return (s); } u8 * format_vxlan_gbp_tunnel (u8 * s, va_list * args) { vxlan_gbp_tunnel_t *t = va_arg (*args, vxlan_gbp_tunnel_t *); s = format (s, "[%d] instance %d src %U dst %U vni %d fib-idx %d" " sw-if-idx %d mode %U ", t->dev_instance, t->user_instance, format_ip46_address, &t->src, IP46_TYPE_ANY, format_ip46_address, &t->dst, IP46_TYPE_ANY, t->vni, t->encap_fib_index, t->sw_if_index, format_vxlan_gbp_tunnel_mode, t->mode); s = format (s, "encap-dpo-idx %d ", t->next_dpo.dpoi_index); if (PREDICT_FALSE (ip46_address_is_multicast (&t->dst))) s = format (s, "mcast-sw-if-idx %d ", t->mcast_sw_if_index); return s; } static u8 * format_vxlan_gbp_name (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); vxlan_gbp_main_t *vxm = &vxlan_gbp_main; vxlan_gbp_tunnel_t *t; if (dev_instance == ~0) return format (s, ""); if (dev_instance >= vec_len (vxm->tunnels)) return format (s, ""); t = pool_elt_at_index (vxm->tunnels, dev_instance); return format (s, "vxlan_gbp_tunnel%d", t->user_instance); } static clib_error_t * vxlan_gbp_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags) { u32 hw_flags = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0; vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags); return /* no error */ 0; } /* *INDENT-OFF* */ VNET_DEVICE_CLASS (vxlan_gbp_device_class, static) = { .name = "VXLAN-GBP", .format_device_name = format_vxlan_gbp_name, .format_tx_trace = format_vxlan_gbp_encap_trace, .admin_up_down_function = vxlan_gbp_interface_admin_up_down, }; /* *INDENT-ON* */ static u8 * format_vxlan_gbp_header_with_length (u8 * s, va_list * args) { u32 dev_instance = va_arg (*args, u32); s = format (s, "unimplemented dev %u", dev_instance); return s; } /* *INDENT-OFF* */ VNET_HW_INTERFACE_CLASS (vxlan_gbp_hw_class) = { .name = "VXLAN-GBP", .format_header = format_vxlan_gbp_header_with_length, .build_rewrite = default_build_rewrite, }; /* *INDENT-ON* */ static void vxlan_gbp_tunnel_restack_dpo (vxlan_gbp_tunnel_t * t) { u8 is_ip4 = ip46_address_is_ip4 (&t->dst); dpo_id_t dpo = DPO_INVALID; fib_forward_chain_type_t forw_type = is_ip4 ? FIB_FORW_CHAIN_TYPE_UNICAST_IP4 : FIB_FORW_CHAIN_TYPE_UNICAST_IP6; fib_entry_contribute_forwarding (t->fib_entry_index, forw_type, &dpo); /* vxlan_gbp uses the payload hash as the udp source port * hence the packet's hash is unknown * skip single bucket load balance dpo's */ while (DPO_LOAD_BALANCE == dpo.dpoi_type) { load_balance_t *lb = load_balance_get (dpo.dpoi_index); if (lb->lb_n_buckets > 1) break; dpo_copy (&dpo, load_balance_get_bucket_i (lb, 0)); } u32 encap_index = is_ip4 ? vxlan4_gbp_encap_node.index : vxlan6_gbp_encap_node.index; dpo_stack_from_node (encap_index, &t->next_dpo, &dpo); dpo_reset (&dpo); } static vxlan_gbp_tunnel_t * vxlan_gbp_tunnel_from_fib_node (fib_node_t * node) { ASSERT (FIB_NODE_TYPE_VXLAN_GBP_TUNNEL == node->fn_type); return ((vxlan_gbp_tunnel_t *) (((char *) node) - STRUCT_OFFSET_OF (vxlan_gbp_tunnel_t, node))); } /** * Function definition to backwalk a FIB node - * Here we will restack the new dpo of VXLAN DIP to encap node. */ static fib_node_back_walk_rc_t vxlan_gbp_tunnel_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx) { vxlan_gbp_tunnel_restack_dpo (vxlan_gbp_tunnel_from_fib_node (node)); return (FIB_NODE_BACK_WALK_CONTINUE); } /** * Function definition to get a FIB node from its index */ static fib_node_t * vxlan_gbp_tunnel_fib_node_get (fib_node_index_t index) { vxlan_gbp_tunnel_t *t; vxlan_gbp_main_t *vxm = &vxlan_gbp_main; t = pool_elt_at_index (vxm->tunnels, index); return (&t->node); } /** * Function definition to inform the FIB node that its last lock has gone. */ static void vxlan_gbp_tunnel_last_lock_gone (fib_node_t * node) { /* * The VXLAN GBP tunnel is a root of the graph. As such * it never has children and thus is never locked. */ ASSERT (0); } /* * Virtual function table registered by VXLAN GBP tunnels * for participation in the FIB object graph. */ const static fib_node_vft_t vxlan_gbp_vft = { .fnv_get = vxlan_gbp_tunnel_fib_node_get, .fnv_last_lock = vxlan_gbp_tunnel_last_lock_gone, .fnv_back_walk = vxlan_gbp_tunnel_back_walk, }; #define foreach_copy_field \ _(vni) \ _(mode) \ _(mcast_sw_if_index) \ _(encap_fib_index) \ _(src) \ _(dst) static void vxlan_gbp_rewrite (vxlan_gbp_tunnel_t * t, bool is_ip6) { union { ip4_vxlan_gbp_header_t h4; ip6_vxlan_gbp_header_t h6; } h; int len = is_ip6 ? sizeof h.h6 : sizeof h.h4; udp_header_t *udp; vxlan_gbp_header_t *vxlan_gbp; /* Fixed portion of the (outer) ip header */ clib_memset (&h, 0, sizeof (h)); if (!is_ip6) { ip4_header_t *ip = &h.h4.ip4; udp = &h.h4.udp, vxlan_gbp = &h.h4.vxlan_gbp; ip->ip_version_and_header_length = 0x45; ip->ttl = 254; ip->protocol = IP_PROTOCOL_UDP; ip->src_address = t->src.ip4; ip->dst_address = t->dst.ip4; /* we fix up the ip4 header length and checksum after-the-fact */ ip->checksum = ip4_header_checksum (ip); } else { ip6_header_t *ip = &h.h6.ip6; udp = &h.h6.udp, vxlan_gbp = &h.h6.vxlan_gbp; ip->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32 (6 << 28); ip->hop_limit = 255; ip->protocol = IP_PROTOCOL_UDP; ip->src_address = t->src.ip6; ip->dst_address = t->dst.ip6; } /* UDP header, randomize src port on something, maybe? */ udp->src_port = clib_host_to_net_u16 (47789); udp->dst_port = clib_host_to_net_u16 (UDP_DST_PORT_vxlan_gbp); /* VXLAN header */ vxlan_gbp_set_header (vxlan_gbp, t->vni); vnet_rewrite_set_data (*t, &h, len); } static uword vtep_addr_ref (ip46_address_t * ip) { uword *vtep = ip46_address_is_ip4 (ip) ? hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) : hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6); if (vtep) return ++(*vtep); ip46_address_is_ip4 (ip) ? hash_set (vxlan_gbp_main.vtep4, ip->ip4.as_u32, 1) : hash_set_mem_alloc (&vxlan_gbp_main.vtep6, &ip->ip6, 1); return 1; } static uword vtep_addr_unref (ip46_address_t * ip) { uword *vtep = ip46_address_is_ip4 (ip) ? hash_get (vxlan_gbp_main.vtep4, ip->ip4.as_u32) : hash_get_mem (vxlan_gbp_main.vtep6, &ip->ip6); ASSERT (vtep); if (--(*vtep) != 0) return *vtep; ip46_address_is_ip4 (ip) ? hash_unset (vxlan_gbp_main.vtep4, ip->ip4.as_u32) : hash_unset_mem_free (&vxlan_gbp_main.vtep6, &ip->ip6); return 0; } /* *INDENT-OFF* */ typedef CLIB_PACKED(union { struct { fib_node_index_t mfib_entry_index; adj_index_t mcast_adj_index; }; u64 as_u64; }) mcast_shared_t; /* *INDENT-ON* */ static inline mcast_shared_t mcast_shared_get (ip46_address_t * ip) { ASSERT (ip46_address_is_multicast (ip)); uword *p = hash_get_mem (vxlan_gbp_main.mcast_shared, ip); ASSERT (p); mcast_shared_t ret = {.as_u64 = *p }; return ret; } static inline void mcast_shared_add (ip46_address_t * dst, fib_node_index_t mfei, adj_index_t ai) { mcast_shared_t new_ep = { .mcast_adj_index = ai, .mfib_entry_index = mfei, }; hash_set_mem_alloc (&vxlan_gbp_main.mcast_shared, dst, new_ep.as_u64); } static inline void mcast_shared_remove (ip46_address_t * dst) { mcast_shared_t ep = mcast_shared_get (dst); adj_unlock (ep.mcast_adj_index); mfib_table_entry_delete_index (ep.mfib_entry_index, MFIB_SOURCE_VXLAN_GBP); hash_unset_mem_free (&vxlan_gbp_main.mcast_shared, dst); } inline void vxlan_gbp_register_udp_ports (void) { vxlan_gbp_main_t *vxm = &vxlan_gbp_main; if (vxm->udp_ports_registered == 0) { udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan_gbp, vxlan4_gbp_input_node.index, /* is_ip4 */ 1); udp_register_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan6_gbp, vxlan6_gbp_input_node.index, /* is_ip4 */ 0); } /* * Counts the number of vxlan_gbp tunnels */ vxm->udp_ports_registered += 1; } inline void vxlan_gbp_unregister_udp_ports (void) { vxlan_gbp_main_t *vxm = &vxlan_gbp_main; ASSERT (vxm->udp_ports_registered != 0); if (vxm->udp_ports_registered == 1) { udp_unregister_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan_gbp, /* is_ip4 */ 1); udp_unregister_dst_port (vxm->vlib_main, UDP_DST_PORT_vxlan6_gbp, /* is_ip4 */ 0); } vxm->udp_ports_registered -= 1; } int vnet_vxlan_gbp_tunnel_add_del (vnet_vxlan_gbp_tunnel_add_del_args_t * a, u32 * sw_if_indexp) { vxlan_gbp_main_t *vxm = &vxlan_gbp_main; vxlan_gbp_tunnel_t *t = 0; vnet_main_t *vnm = vxm->vnet_main; u64 *p; u32 sw_if_index = ~0; vxlan4_gbp_tunnel_key_t key4; vxlan6_gbp_tunnel_key_t key6; u32 is_ip6 = a->is_ip6; int not_found; if (!is_ip6) { key4.key[0] = ip46_address_is_multicast (&a->dst) ? a->dst.ip4.as_u32 : a->dst.ip4.as_u32 | (((u64) a->src.ip4.as_u32) << 32); key4.key[1] = (((u64) a->encap_fib_index) << 32) | clib_host_to_net_u32 (a->vni << 8); not_found = clib_bihash_search_inline_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4); p = &key4.value; } else { key6.key[0] = a->dst.ip6.as_u64[0]; key6.key[1] = a->dst.ip6.as_u64[1]; key6.key[2] = (((u64) a->encap_fib_index) << 32) | clib_host_to_net_u32 (a->vni << 8); not_found = clib_bihash_search_inline_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, &key6); p = &key6.value; } if (not_found) p = 0; if (a->is_add) { l2input_main_t *l2im = &l2input_main; u32 dev_instance; /* real dev instance tunnel index */ u32 user_instance; /* request and actual instance number */ /* adding a tunnel: tunnel must not already exist */ if (p) { t = pool_elt_at_index (vxm->tunnels, *p); *sw_if_indexp = t->sw_if_index; return VNET_API_ERROR_TUNNEL_EXIST; } pool_get_aligned (vxm->tunnels, t, CLIB_CACHE_LINE_BYTES); clib_memset (t, 0, sizeof (*t)); dev_instance = t - vxm->tunnels; /* copy from arg structure */ #define _(x) t->x = a->x; foreach_copy_field; #undef _ vxlan_gbp_rewrite (t, is_ip6); /* * Reconcile the real dev_instance and a possible requested instance. */ user_instance = a->instance; if (user_instance == ~0) user_instance = dev_instance; if (hash_get (vxm->instance_used, user_instance)) { pool_put (vxm->tunnels, t); return VNET_API_ERROR_INSTANCE_IN_USE; } hash_set (vxm->instance_used, user_instance, 1); t->dev_instance = dev_instance; /* actual */ t->user_instance = user_instance; /* name */ /* copy the key */ int add_failed; if (is_ip6) { key6.value = (u64) dev_instance; add_failed = clib_bihash_add_del_24_8 (&vxm->vxlan6_gbp_tunnel_by_key, &key6, 1 /*add */ ); } else { key4.value = (u64) dev_instance; add_failed = clib_bihash_add_del_16_8 (&vxm->vxlan4_gbp_tunnel_by_key, &key4, 1 /*add */ ); } if (add_failed) { pool_put (vxm->tunnels, t); return VNET_API_ERROR_INVALID_REGISTRATION; } vxlan_gbp_register_udp_ports (); t->hw_if_index = vnet_register_interface (vnm, vxlan_gbp_device_class.index, dev_instance, vxlan_gbp_hw_class.index, dev_instance); vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index); /* Set vxlan_gbp tunnel output node */ u32 encap_index = !is_ip6 ? vxlan4_gbp_encap_node.index : vxlan6_gbp_encap_node.index; vnet_set_interface_output_node (vnm, t->hw_if_index, encap_index); t->sw_if_index = sw_if_index = hi->sw_if_index; if (VXLAN_GBP_TUNNEL_MODE_L3 == t->mode) { ip4_sw_interface_enable_disable (t->sw_if_index, 1); ip6_sw_interface_enable_disable (t->sw_if_index, 1); } vec_validate_init_empty (vxm->tunnel_index_by_sw_if_index, sw_if_index, ~0); vxm->tunnel_index_by_sw_if_index[sw_if_index] = dev_instance; /* setup l2 input config with l2 feature and bd 0 to drop packet */ vec_validate (l2im->configs, sw_if_index); l2im->configs[sw_if_index].feature_bitmap = L2INPUT_FEAT_DROP; l2im->configs[sw_if_index].bd_index = 0; vnet_sw_interface_t *si = vnet_get_sw_interface (vnm, sw_if_index); si->flags &= ~VNET_SW_INTERFACE_FLAG_HIDDEN; vnet_sw_interface_set_flags (vnm, sw_if_index, VNET_SW_INTERFACE_FLAG_ADMIN_UP); fib_node_init (&t->node, FIB_NODE_TYPE_VXLAN_GBP_TUNNEL); fib_prefix_t tun_dst_pfx; vnet_flood_class_t flood_class = VNET_FLOOD_CLASS_TUNNEL_NORMAL; fib_prefix_from_ip46_addr (&t->dst, &tun_dst_pfx); if (!ip46_address_is_multicast (&t->dst)) { /* Unicast tunnel - * source the FIB entry for the tunnel's destination * and become a child thereof. The tunnel will then get poked * when the forwarding for the entry updates, and the tunnel can * re-stack accordingly */ vtep_addr_ref (&t->src); t->fib_entry_index = fib_entry_track (t->encap_fib_index, &tun_dst_pfx, FIB_NODE_TYPE_VXLAN_GBP_TUNNEL, dev_instance, &t->sibling_index); vxlan_gbp_tunnel_restack_dpo (t); } else { /* Multicast tunnel - * as the same mcast group can be used for multiple mcast tunnels * with different VNIs, create the output fib adjacency only if * it does not already exist */ fib_protocol_t fp = fib_ip_proto (is_ip6); if (vtep_addr_ref (&t->dst) == 1) { fib_node_index_t mfei; adj_index_t ai; fib_route_path_t path = { .frp_proto = fib_proto_to_dpo (fp), .frp_addr = zero_addr, .frp_sw_if_index = 0xffffffff, .frp_fib_index = ~0, .frp_weight = 0, .frp_flags = FIB_ROUTE_PATH_LOCAL, .frp_mitf_flags = MFIB_ITF_FLAG_FORWARD, }; const mfib_prefix_t mpfx = { .fp_proto = fp, .fp_len = (is_ip6 ? 128 : 32), .fp_grp_addr = tun_dst_pfx.fp_addr, }; /* * Setup the (*,G) to receive traffic on the mcast group * - the forwarding interface is for-us * - the accepting interface is that from the API */ mfib_table_entry_path_update (t->encap_fib_index, &mpfx, MFIB_SOURCE_VXLAN_GBP, &path); path.frp_sw_if_index = a->mcast_sw_if_index; path.frp_flags = FIB_ROU
/*
 * Copyright (c) 2016,2020 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include <plugins/adl/adl.h>
#include <vnet/fib/ip4_fib.h>
#include <vnet/dpo/load_balance.h>

typedef struct {
  u32 next_index;
  u32 sw_if_index;
} ip4_adl_allowlist_trace_t;

/* packet trace format function */
static u8 * format_ip4_adl_allowlist_trace (u8 * s, va_list * args)
{
  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
  ip4_adl_allowlist_trace_t * t = va_arg (*args, ip4_adl_allowlist_trace_t *);

  s = format (s, "IP4_ADL_ALLOWLIST: sw_if_index %d, next index %d",
              t->sw_if_index, t->next_index);
  return s;
}

#define foreach_ip4_adl_allowlist_error         \
_(ALLOWED, "ip4 allowlist allowed")             \
_(DROPPED, "ip4 allowlist dropped")

typedef enum {
#define _(sym,str) IP4_ADL_ALLOWLIST_ERROR_##sym,
  foreach_ip4_adl_allowlist_error
#undef _
  IP4_ADL_ALLOWLIST_N_ERROR,
} ip4_adl_allowlist_error_t;

static char * ip4_adl_allowlist_error_strings[] = {
#define _(sym,string) string,
  foreach_ip4_adl_allowlist_error
#undef _
};

VLIB_NODE_FN (ip4_adl_allowlist_node) (vlib_main_t * vm,
		  vlib_node_runtime_t * node,
		  vlib_frame_t * frame)
{
  u32 n_left_from, * from, * to_next;
  adl_feature_type_t next_index;
  adl_main_t *cm = &adl_main;
  vlib_combined_counter_main_t * vcm = &load_balance_main.lbm_via_counters;
  u32 thread_index = vm->thread_index;
  u32 allowed_packets;

  from = vlib_frame_vector_args (frame);
  n_left_from = frame->n_vectors;
  allowed_packets = n_left_from;
  next_index = node->cached_next_index;

  while (n_left_from > 0)
    {
      u32 n_left_to_next;

      vlib_get_next_frame (vm, node, next_index,
			   to_next, n_left_to_next);

      while (n_left_from >= 4 && n_left_to_next >= 2)
      	{
          u32 bi0, bi1;
          vlib_buffer_t * b0, * b1;
          u32 next0, next1;
          u32 sw_if_index0, sw_if_index1;
          ip4_header_t * ip0, * ip1;
          adl_config_main_t * ccm0, * ccm1;</