/* * Copyright (c) 2015 Cisco and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ETHER_MAX_LEN 1518 /**< Maximum frame len, including CRC. */ dpdk_main_t dpdk_main; dpdk_config_main_t dpdk_config_main; #define LINK_STATE_ELOGS 0 /* Port configuration, mildly modified Intel app values */ static dpdk_port_type_t port_type_from_speed_capa (struct rte_eth_dev_info *dev_info) { if (dev_info->speed_capa & ETH_LINK_SPEED_100G) return VNET_DPDK_PORT_TYPE_ETH_100G; else if (dev_info->speed_capa & ETH_LINK_SPEED_56G) return VNET_DPDK_PORT_TYPE_ETH_56G; else if (dev_info->speed_capa & ETH_LINK_SPEED_50G) return VNET_DPDK_PORT_TYPE_ETH_50G; else if (dev_info->speed_capa & ETH_LINK_SPEED_40G) return VNET_DPDK_PORT_TYPE_ETH_40G; else if (dev_info->speed_capa & ETH_LINK_SPEED_25G) return VNET_DPDK_PORT_TYPE_ETH_25G; else if (dev_info->speed_capa & ETH_LINK_SPEED_20G) return VNET_DPDK_PORT_TYPE_ETH_20G; else if (dev_info->speed_capa & ETH_LINK_SPEED_10G) return VNET_DPDK_PORT_TYPE_ETH_10G; else if (dev_info->speed_capa & ETH_LINK_SPEED_5G) return VNET_DPDK_PORT_TYPE_ETH_5G; else if (dev_info->speed_capa & ETH_LINK_SPEED_2_5G) return VNET_DPDK_PORT_TYPE_ETH_2_5G; else if (dev_info->speed_capa & ETH_LINK_SPEED_1G) return VNET_DPDK_PORT_TYPE_ETH_1G; return VNET_DPDK_PORT_TYPE_UNKNOWN; } static dpdk_port_type_t port_type_from_link_speed (u32 link_speed) { switch (link_speed) { case ETH_SPEED_NUM_1G: return VNET_DPDK_PORT_TYPE_ETH_1G; case ETH_SPEED_NUM_2_5G: return VNET_DPDK_PORT_TYPE_ETH_2_5G; case ETH_SPEED_NUM_5G: return VNET_DPDK_PORT_TYPE_ETH_5G; case ETH_SPEED_NUM_10G: return VNET_DPDK_PORT_TYPE_ETH_10G; case ETH_SPEED_NUM_20G: return VNET_DPDK_PORT_TYPE_ETH_20G; case ETH_SPEED_NUM_25G: return VNET_DPDK_PORT_TYPE_ETH_25G; case ETH_SPEED_NUM_40G: return VNET_DPDK_PORT_TYPE_ETH_40G; case ETH_SPEED_NUM_50G: return VNET_DPDK_PORT_TYPE_ETH_50G; case ETH_SPEED_NUM_56G: return VNET_DPDK_PORT_TYPE_ETH_56G; case ETH_SPEED_NUM_100G: return VNET_DPDK_PORT_TYPE_ETH_100G; default: return VNET_DPDK_PORT_TYPE_UNKNOWN; } } static u32 dpdk_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags) { dpdk_main_t *dm = &dpdk_main; dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance); u32 old = 0; if (ETHERNET_INTERFACE_FLAG_CONFIG_PROMISC (flags)) { old = (xd->flags & DPDK_DEVICE_FLAG_PROMISC) != 0; if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) xd->flags |= DPDK_DEVICE_FLAG_PROMISC; else xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC; if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) { if (xd->flags & DPDK_DEVICE_FLAG_PROMISC) rte_eth_promiscuous_enable (xd->port_id); else rte_eth_promiscuous_disable (xd->port_id); } } else if (ETHERNET_INTERFACE_FLAG_CONFIG_MTU (flags)) { xd->port_conf.rxmode.max_rx_pkt_len = hi->max_packet_bytes; dpdk_device_setup (xd); } return old; } static void dpdk_device_lock_init (dpdk_device_t * xd) { int q; vec_validate (xd->lockp, xd->tx_q_used - 1); for (q = 0; q < xd->tx_q_used; q++) { xd->lockp[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); clib_memset ((void *) xd->lockp[q], 0, CLIB_CACHE_LINE_BYTES); } } static int dpdk_port_crc_strip_enabled (dpdk_device_t * xd) { return !(xd->port_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC); } /* The function check_l3cache helps check if Level 3 cache exists or not on current CPUs return value 1: exist. return value 0: not exist. */ static int check_l3cache () { struct dirent *dp; clib_error_t *err; const char *sys_cache_dir = "/sys/devices/system/cpu/cpu0/cache"; DIR *dir_cache = opendir (sys_cache_dir); if (dir_cache == NULL) return -1; while ((dp = readdir (dir_cache)) != NULL) { if (dp->d_type == DT_DIR) { u8 *p = NULL; int level_cache = -1; p = format (p, "%s/%s/%s", sys_cache_dir, dp->d_name, "level"); if ((err = clib_sysfs_read ((char *) p, "%d", &level_cache))) clib_error_free (err); if (level_cache == 3) { closedir (dir_cache); return 1; } } } if (dir_cache != NULL) closedir (dir_cache); return 0; } static clib_error_t * dpdk_lib_init (dpdk_main_t * dm) { u32 nports; u32 mtu, max_rx_frame; int i; clib_error_t *error; vlib_main_t *vm = vlib_get_main (); vnet_main_t *vnm = vnet_get_main (); vlib_thread_main_t *tm = vlib_get_thread_main (); vnet_device_main_t *vdm = &vnet_device_main; vnet_sw_interface_t *sw; vnet_hw_interface_t *hi; dpdk_device_t *xd; vlib_pci_addr_t last_pci_addr; u32 last_pci_addr_port = 0; vlib_thread_registration_t *tr_hqos; uword *p_hqos; u32 next_hqos_cpu = 0; u8 af_packet_instance_num = 0; last_pci_addr.as_u32 = ~0; dm->hqos_cpu_first_index = 0; dm->hqos_cpu_count = 0; /* find out which cpus will be used for I/O TX */ p_hqos = hash_get_mem (tm->thread_registrations_by_name, "hqos-threads"); tr_hqos = p_hqos ? (vlib_thread_registration_t *) p_hqos[0] : 0; if (tr_hqos && tr_hqos->count > 0) { dm->hqos_cpu_first_index = tr_hqos->first_index; dm->hqos_cpu_count = tr_hqos->count; } vec_validate_aligned (dm->devices_by_hqos_cpu, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); nports = rte_eth_dev_count_avail (); if (nports < 1) { dpdk_log_notice ("DPDK drivers found no Ethernet devices..."); } if (CLIB_DEBUG > 0) dpdk_log_notice ("DPDK drivers found %d ports...", nports); if (dm->conf->enable_tcp_udp_checksum) dm->buffer_flags_template &= ~(VNET_BUFFER_F_L4_CHECKSUM_CORRECT | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED); /* vlib_buffer_t template */ vec_validate_aligned (dm->per_thread_data, tm->n_vlib_mains - 1, CLIB_CACHE_LINE_BYTES); for (i = 0; i < tm->n_vlib_mains; i++) { dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, i); clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t)); ptd->buffer_template.flags = dm->buffer_flags_template; vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0; } /* *INDENT-OFF* */ RTE_ETH_FOREACH_DEV(i) { u8 addr[6]; int vlan_off; struct rte_eth_dev_info dev_info; struct rte_pci_device *pci_dev; struct rte_eth_link l; dpdk_portid_t next_port_id; dpdk_device_config_t *devconf = 0; vlib_pci_addr_t pci_addr; uword *p = 0; if (!rte_eth_dev_is_valid_port(i)) continue; rte_eth_link_get_nowait (i, &l); rte_eth_dev_info_get (i, &dev_info); if (dev_info.device == 0) { dpdk_log_notice ("DPDK bug: missing device info. Skipping %s device", dev_info.driver_name); continue; } pci_dev = dpdk_get_pci_device (&dev_info); if (pci_dev) { pci_addr.domain = pci_dev->addr.domain; pci_addr.bus = pci_dev->addr.bus; pci_addr.slot = pci_dev->addr.devid; pci_addr.function = pci_dev->addr.function; p = hash_get (dm->conf->device_config_index_by_pci_addr, pci_addr.as_u32); } /* Create vnet interface */ vec_add2_aligned (dm->devices, xd, 1, CLIB_CACHE_LINE_BYTES); xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT; xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT; xd->cpu_socket = (i8) rte_eth_dev_socket_id (i); if (p) { devconf = pool_elt_at_index (dm->conf->dev_confs, p[0]); xd->name = devconf->name; } else devconf = &dm->conf->default_devconf; /* Handle interface naming for devices with multiple ports sharing same PCI ID */ if (pci_dev && ((next_port_id = rte_eth_find_next (i + 1)) != RTE_MAX_ETHPORTS)) { struct rte_eth_dev_info di = { 0 }; struct rte_pci_device *next_pci_dev; rte_eth_dev_info_get (next_port_id, &di); next_pci_dev = di.device ? RTE_DEV_TO_PCI (di.device) : 0; if (next_pci_dev && pci_addr.as_u32 != last_pci_addr.as_u32 && memcmp (&pci_dev->addr, &next_pci_dev->addr, sizeof (struct rte_pci_addr)) == 0) { xd->interface_name_suffix = format (0, "0"); last_pci_addr.as_u32 = pci_addr.as_u32; last_pci_addr_port = i; } else if (pci_addr.as_u32 == last_pci_addr.as_u32) { xd->interface_name_suffix = format (0, "%u", i - last_pci_addr_port); } else { last_pci_addr.as_u32 = ~0; } } else last_pci_addr.as_u32 = ~0; clib_memcpy (&xd->tx_conf, &dev_info.default_txconf, sizeof (struct rte_eth_txconf)); if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) { xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; xd->flags |= DPDK_DEVICE_FLAG_RX_IP4_CKSUM; } if (dm->conf->no_multi_seg) { xd->port_conf.txmode.offloads &= ~DEV_TX_OFFLOAD_MULTI_SEGS; xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; } else { xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; xd->flags |= DPDK_DEVICE_FLAG_MAYBE_MULTISEG; } xd->tx_q_used = clib_min (dev_info.max_tx_queues, tm->n_vlib_mains); if (devconf->num_tx_queues > 0 && devconf->num_tx_queues < xd->tx_q_used) xd->tx_q_used = clib_min (xd->tx_q_used, devconf->num_tx_queues); if (devconf->num_rx_queues > 1 && dev_info.max_rx_queues >= devconf->num_rx_queues) { xd->rx_q_used = devconf->num_rx_queues; xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; if (devconf->rss_fn == 0) xd->port_conf.rx_adv_conf.rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP; else { u64 unsupported_bits; xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn; unsupported_bits = xd->port_conf.rx_adv_conf.rss_conf.rss_hf; unsupported_bits &= ~dev_info.flow_type_rss_offloads; if (unsupported_bits) dpdk_log_warn ("Unsupported RSS hash functions: %U", format_dpdk_rss_hf_name, unsupported_bits); } xd->port_conf.rx_adv_conf.rss_conf.rss_hf &= dev_info.flow_type_rss_offloads; } else xd->rx_q_used = 1; xd->flags |= DPDK_DEVICE_FLAG_PMD; /* workaround for drivers not setting driver_name */ if ((!dev_info.driver_name) && (pci_dev)) dev_info.driver_name = pci_dev->driver->driver.name; ASSERT (dev_info.driver_name); if (!xd->pmd) { #define _(s,f) else if (dev_info.driver_name && \ !strcmp(dev_info.driver_name, s)) \ xd->pmd = VNET_DPDK_PMD_##f; if (0) ; foreach_dpdk_pmd #undef _ else xd->pmd = VNET_DPDK_PMD_UNKNOWN; xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; xd->nb_rx_desc = DPDK_NB_RX_DESC_DEFAULT; xd->nb_tx_desc = DPDK_NB_TX_DESC_DEFAULT; switch (xd->pmd) { /* Drivers with valid speed_capa set */ case VNET_DPDK_PMD_E1000EM: case VNET_DPDK_PMD_IGB: case VNET_DPDK_PMD_IXGBE: case VNET_DPDK_PMD_I40E: case VNET_DPDK_PMD_ICE: xd->port_type = port_type_from_speed_capa (&dev_info); xd->supported_flow_actions = VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_REDIRECT_TO_QUEUE | VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP; if (dm->conf->no_tx_checksum_offload == 0) { xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; } break; case VNET_DPDK_PMD_CXGBE: case VNET_DPDK_PMD_MLX4: case VNET_DPDK_PMD_MLX5: case VNET_DPDK_PMD_QEDE: xd->port_type = port_type_from_speed_capa (&dev_info); break; /* SR-IOV VFs */ case VNET_DPDK_PMD_IGBVF: case VNET_DPDK_PMD_IXGBEVF: case VNET_DPDK_PMD_I40EVF: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; if (dm->conf->no_tx_checksum_offload == 0) { xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; } break; case VNET_DPDK_PMD_THUNDERX: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; if (dm->conf->no_tx_checksum_offload == 0) { xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD; } break; case VNET_DPDK_PMD_ENA: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; break; case VNET_DPDK_PMD_DPAA2: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; break; /* Cisco VIC */ case VNET_DPDK_PMD_ENIC: xd->port_type = port_type_from_link_speed (l.link_speed); break; /* Intel Red Rock Canyon */ case VNET_DPDK_PMD_FM10K: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH; break; /* virtio */ case VNET_DPDK_PMD_VIRTIO: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; xd->nb_rx_desc = DPDK_NB_RX_DESC_VIRTIO; xd->nb_tx_desc = DPDK_NB_TX_DESC_VIRTIO; break; /* vmxnet3 */ case VNET_DPDK_PMD_VMXNET3: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; break; case VNET_DPDK_PMD_AF_PACKET: xd->port_type = VNET_DPDK_PORT_TYPE_AF_PACKET; xd->af_packet_instance_num = af_packet_instance_num++; break; case VNET_DPDK_PMD_VIRTIO_USER: xd->port_type = VNET_DPDK_PORT_TYPE_VIRTIO_USER; break; case VNET_DPDK_PMD_VHOST_ETHER: xd->port_type = VNET_DPDK_PORT_TYPE_VHOST_ETHER; break; case VNET_DPDK_PMD_LIOVF_ETHER: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; break; case VNET_DPDK_PMD_FAILSAFE: xd->port_type = VNET_DPDK_PORT_TYPE_FAILSAFE; xd->port_conf.intr_conf.lsc = 1; break; case VNET_DPDK_PMD_NETVSC: xd->port_type = port_type_from_link_speed (l.link_speed); break; default: xd->port_type = VNET_DPDK_PORT_TYPE_UNKNOWN; } if (devconf->num_rx_desc) xd->nb_rx_desc = devconf->num_rx_desc; else { /* If num_rx_desc is not specified by VPP user, the current CPU is working
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <vnet/session/application_namespace.h>
#include <vnet/session/session_table.h>
#include <vnet/session/session.h>
#include <vnet/fib/fib_table.h>

/**
 * Hash table of application namespaces by app ns ids
 */
uword *app_namespace_lookup_table;

/**
 * Pool of application namespaces
 */
static app_namespace_t *app_namespace_pool;

app_namespace_t *
app_namespace_get (u32 index)
{
  return pool_elt_at_index (app_namespace_pool, index);
}

app_namespace_t *
app_namespace_get_from_id (const u8 * ns_id)
{
  u32 index = app_namespace_index_from_id (ns_id);
  if (index == APP_NAMESPACE_INVALID_INDEX)
    return 0;
  return app_namespace_get (index);
}

u32
app_namespace_index (app_namespace_t * app_ns)
{
  return (app_ns - app_namespace_pool);
}

app_namespace_t *
app_namespace_alloc (u8 * ns_id)
{
  app_namespace_t *app_ns;
  pool_get (app_namespace_pool, app_ns);
  memset (app_ns, 0, sizeof (*app_ns));
  app_ns->ns_id = vec_dup (ns_id);
  hash_set_mem (app_namespace_lookup_table, app_ns->ns_id,
		app_ns - app_namespace_pool);
  return app_ns;
}

clib_error_t *
vnet_app_namespace_add_del (vnet_app_namespace_add_del_args_t * a)
{
  app_namespace_t *app_ns;
  session_table_t *st;

  if (a->is_add)
    {
      if (a->sw_if_index != APP_NAMESPACE_INVALID_INDEX
	  && !vnet_get_sw_interface_safe (vnet_get_main (), a->sw_if_index))
	return clib_error_return_code (0, VNET_API_ERROR_INVALID_SW_IF_INDEX,
				       0, "sw_if_index %u doesn't exist",
				       a->sw_if_index);

      if (a->sw_if_index != APP_NAMESPACE_INVALID_INDEX)
	{
	  a->ip4_fib_id =
	    fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
						    a->sw_if_index);
	  a->ip6_fib_id =
	    fib_table_get_table_id_for_sw_if_index (FIB_PROTOCOL_IP4,
						    a->sw_if_index);
	}
      if (a->sw_if_index == APP_NAMESPACE_INVALID_INDEX
	  && a->ip4_fib_id == APP_NAMESPACE_INVALID_INDEX)
	return clib_error_return_code (0, VNET_API_ERROR_INVALID_VALUE, 0,
				       "sw_if_index or fib_id must be "
				       "configured");
      app_ns = app_namespace_get_from_id (a->ns_id);
      if (!app_ns)
	{
	  app_ns = app_namespace_alloc (a->ns_id);
	  st = session_table_alloc ();
	  session_table_init (st, FIB_PROTOCOL_MAX);
	  st->is_local = 1;
	  st->appns_index = app_namespace_index (app_ns);
	  app_ns->local_table_index = session_table_index (st);
	}
      app_ns->ns_secret = a->secret;
      app_ns->sw_if_index = a->sw_if_index;
      app_ns->ip4_fib_index =
	fib_table_find (FIB_PROTOCOL_IP4, a->ip4_fib_id);
      app_ns->ip6_fib_index =
	fib_table_find (FIB_PROTOCOL_IP6, a->ip6_fib_id);
      session_lookup_set_tables_appns (app_ns);
    }
  else
    {
      return clib_error_return_code (0, VNET_API_ERROR_UNIMPLEMENTED, 0,
				     "namespace deletion not supported");
    }
  return 0;
}

const u8 *
app_namespace_id (app_namespace_t * app_ns)
{
  return app_ns->ns_id;
}

u32
app_namespace_index_from_id (const u8 * ns_id)
{
  uword *indexp;
  indexp = hash_get_mem (app_namespace_lookup_table, ns_id);
  if (!indexp)
    return APP_NAMESPACE_INVALID_INDEX;
  return *indexp;
}

const u8 *
app_namespace_id_from_index (u32 index)
{
  app_namespace_t *app_ns;

  app_ns = app_namespace_get (index);
  return app_namespace_id (app_ns);
}

u32
app_namespace_get_fib_index (app_namespace_t * app_ns, u8 fib_proto)
{
  return fib_proto == FIB_PROTOCOL_IP4 ?
    app_ns->ip4_fib_index : app_ns->ip6_fib_index;
}

session_table_t *
app_namespace_get_local_table (app_namespace_t * app_ns)
{
  return session_table_get (app_ns->local_table_index);
}

void
app_namespaces_init (void)
{
  u8 *ns_id = format (0, "default");

  if (!app_namespace_lookup_table)
    app_namespace_lookup_table =
      hash_create_vec (0, sizeof (u8), sizeof (uword));

  /*
   * Allocate default namespace
   */
  vnet_app_namespace_add_del_args_t a = {
    .ns_id = ns_id,
    .secret = 0,
    .sw_if_index = APP_NAMESPACE_INVALID_INDEX,
    .is_add = 1
  };
  vnet_app_namespace_add_del (&a);
  vec_free (ns_id);
}

static clib_error_t *
app_ns_fn (vlib_main_t * vm, unformat_input_t * input,
	   vlib_cli_command_t * cmd)
{
  unformat_input_t _line_input, *line_input = &_line_input;
  u8 is_add = 0, *ns_id = 0, secret_set = 0, sw_if_index_set = 0;
  u32 sw_if_index, fib_id = APP_NAMESPACE_INVALID_INDEX;
  u64 secret;
  clib_error_t *error = 0;

  session_cli_return_if_not_enabled ();

  if (!unformat_user (input, unformat_line_input, line_input))
    return 0;

  while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
    {
      if (unformat (line_input, "add"))
	is_add = 1;
      else if (unformat (line_input, "id %_%v%_", &ns_id))
	;
      else if (unformat (line_input, "secret %lu", &secret))
	secret_set = 1;
      else if (unformat (line_input, "sw_if_index %u", &sw_if_index))
	sw_if_index_set = 1;
      else if (unformat (line_input, "fib_id", &fib_id))
	;
      else
	{
	  error = clib_error_return (0, "unknown input `%U'",
				     format_unformat_error, line_input);
	  unformat_free (line_input);
	  return error;
	}
    }