aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/devices
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/devices')
-rw-r--r--src/vnet/devices/af_packet/af_packet.api61
-rw-r--r--src/vnet/devices/af_packet/af_packet.c433
-rw-r--r--src/vnet/devices/af_packet/af_packet.h73
-rw-r--r--src/vnet/devices/af_packet/af_packet_api.c143
-rw-r--r--src/vnet/devices/af_packet/cli.c211
-rw-r--r--src/vnet/devices/af_packet/device.c354
-rw-r--r--src/vnet/devices/af_packet/dir.dox29
-rw-r--r--src/vnet/devices/af_packet/node.c310
-rw-r--r--src/vnet/devices/devices.c365
-rw-r--r--src/vnet/devices/devices.h168
-rw-r--r--src/vnet/devices/netmap/cli.c236
-rw-r--r--src/vnet/devices/netmap/device.c256
-rw-r--r--src/vnet/devices/netmap/dir.dox27
-rw-r--r--src/vnet/devices/netmap/net_netmap.h650
-rw-r--r--src/vnet/devices/netmap/netmap.api54
-rw-r--r--src/vnet/devices/netmap/netmap.c312
-rw-r--r--src/vnet/devices/netmap/netmap.h166
-rw-r--r--src/vnet/devices/netmap/netmap_api.c137
-rw-r--r--src/vnet/devices/netmap/node.c302
-rw-r--r--src/vnet/devices/ssvm/node.c345
-rw-r--r--src/vnet/devices/ssvm/ssvm_eth.c491
-rw-r--r--src/vnet/devices/ssvm/ssvm_eth.h141
-rw-r--r--src/vnet/devices/virtio/dir.dox27
-rw-r--r--src/vnet/devices/virtio/vhost-user.c3671
-rw-r--r--src/vnet/devices/virtio/vhost-user.h342
-rw-r--r--src/vnet/devices/virtio/vhost_user.api105
-rw-r--r--src/vnet/devices/virtio/vhost_user_api.c254
27 files changed, 9663 insertions, 0 deletions
diff --git a/src/vnet/devices/af_packet/af_packet.api b/src/vnet/devices/af_packet/af_packet.api
new file mode 100644
index 00000000..8d40ad60
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.api
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+*/
+define af_packet_create
+{
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief Delete host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+*/
+autoreply define af_packet_delete
+{
+ u32 client_index;
+ u32 context;
+
+ u8 host_if_name[64];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet.c b/src/vnet/devices/af_packet/af_packet.c
new file mode 100644
index 00000000..32696014
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.c
@@ -0,0 +1,433 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#include <vppinfra/linux/sysfs.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define AF_PACKET_DEBUG_SOCKET 0
+
+#define AF_PACKET_TX_FRAMES_PER_BLOCK 1024
+#define AF_PACKET_TX_FRAME_SIZE (2048 * 5)
+#define AF_PACKET_TX_BLOCK_NR 1
+#define AF_PACKET_TX_FRAME_NR (AF_PACKET_TX_BLOCK_NR * \
+ AF_PACKET_TX_FRAMES_PER_BLOCK)
+#define AF_PACKET_TX_BLOCK_SIZE (AF_PACKET_TX_FRAME_SIZE * \
+ AF_PACKET_TX_FRAMES_PER_BLOCK)
+
+#define AF_PACKET_RX_FRAMES_PER_BLOCK 1024
+#define AF_PACKET_RX_FRAME_SIZE (2048 * 5)
+#define AF_PACKET_RX_BLOCK_NR 1
+#define AF_PACKET_RX_FRAME_NR (AF_PACKET_RX_BLOCK_NR * \
+ AF_PACKET_RX_FRAMES_PER_BLOCK)
+#define AF_PACKET_RX_BLOCK_SIZE (AF_PACKET_RX_FRAME_SIZE * \
+ AF_PACKET_RX_FRAMES_PER_BLOCK)
+
+#if AF_PACKET_DEBUG_SOCKET == 1
+#define DBG_SOCK(args...) clib_warning(args);
+#else
+#define DBG_SOCK(args...)
+#endif
+
+/*defined in net/if.h but clashes with dpdk headers */
+unsigned int if_nametoindex (const char *ifname);
+
+typedef struct tpacket_req tpacket_req_t;
+
+static u32
+af_packet_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
+{
+ clib_error_t *error;
+ u8 *s;
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hi->dev_instance);
+
+ if (ETHERNET_INTERFACE_FLAG_MTU == (flags & ETHERNET_INTERFACE_FLAG_MTU))
+ {
+ s = format (0, "/sys/class/net/%s/mtu%c", apif->host_if_name, 0);
+
+ error = clib_sysfs_write ((char *) s, "%d", hi->max_packet_bytes);
+ vec_free (s);
+
+ if (error)
+ {
+ clib_error_report (error);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+af_packet_fd_read_ready (clib_file_t * uf)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 idx = uf->private_data;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, idx);
+
+ apm->pending_input_bitmap =
+ clib_bitmap_set (apm->pending_input_bitmap, idx, 1);
+
+ /* Schedule the rx node */
+ vnet_device_input_set_interrupt_pending (vnm, apif->hw_if_index, 0);
+
+ return 0;
+}
+
+static int
+is_bridge (const u8 * host_if_name)
+{
+ u8 *s;
+ DIR *dir = NULL;
+
+ s = format (0, "/sys/class/net/%s/bridge%c", host_if_name, 0);
+ dir = opendir ((char *) s);
+ vec_free (s);
+
+ if (dir)
+ {
+ closedir (dir);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int
+create_packet_v2_sock (int host_if_index, tpacket_req_t * rx_req,
+ tpacket_req_t * tx_req, int *fd, u8 ** ring)
+{
+ int ret, err;
+ struct sockaddr_ll sll;
+ int ver = TPACKET_V2;
+ socklen_t req_sz = sizeof (struct tpacket_req);
+ u32 ring_sz = rx_req->tp_block_size * rx_req->tp_block_nr +
+ tx_req->tp_block_size * tx_req->tp_block_nr;
+
+ if ((*fd = socket (AF_PACKET, SOCK_RAW, htons (ETH_P_ALL))) < 0)
+ {
+ DBG_SOCK ("Failed to create socket");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_VERSION, &ver, sizeof (ver))) < 0)
+ {
+ DBG_SOCK ("Failed to set rx packet interface version");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ int opt = 1;
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_LOSS, &opt, sizeof (opt))) < 0)
+ {
+ DBG_SOCK ("Failed to set packet tx ring error handling option");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_RX_RING, rx_req, req_sz)) < 0)
+ {
+ DBG_SOCK ("Failed to set packet rx ring options");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_TX_RING, tx_req, req_sz)) < 0)
+ {
+ DBG_SOCK ("Failed to set packet rx ring options");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ *ring =
+ mmap (NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, *fd,
+ 0);
+ if (*ring == MAP_FAILED)
+ {
+ DBG_SOCK ("mmap failure");
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ memset (&sll, 0, sizeof (sll));
+ sll.sll_family = PF_PACKET;
+ sll.sll_protocol = htons (ETH_P_ALL);
+ sll.sll_ifindex = host_if_index;
+
+ if ((err = bind (*fd, (struct sockaddr *) &sll, sizeof (sll))) < 0)
+ {
+ DBG_SOCK ("Failed to bind rx packet socket (error %d)", err);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ return 0;
+error:
+ if (*fd >= 0)
+ close (*fd);
+ *fd = -1;
+ return ret;
+}
+
+int
+af_packet_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u32 * sw_if_index)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ int ret, fd = -1;
+ struct tpacket_req *rx_req = 0;
+ struct tpacket_req *tx_req = 0;
+ u8 *ring = 0;
+ af_packet_if_t *apif = 0;
+ u8 hw_addr[6];
+ clib_error_t *error;
+ vnet_sw_interface_t *sw;
+ vnet_hw_interface_t *hw;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
+ uword if_index;
+ u8 *host_if_name_dup = vec_dup (host_if_name);
+ int host_if_index = -1;
+
+ p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
+ if (p)
+ {
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ }
+
+ vec_validate (rx_req, 0);
+ rx_req->tp_block_size = AF_PACKET_RX_BLOCK_SIZE;
+ rx_req->tp_frame_size = AF_PACKET_RX_FRAME_SIZE;
+ rx_req->tp_block_nr = AF_PACKET_RX_BLOCK_NR;
+ rx_req->tp_frame_nr = AF_PACKET_RX_FRAME_NR;
+
+ vec_validate (tx_req, 0);
+ tx_req->tp_block_size = AF_PACKET_TX_BLOCK_SIZE;
+ tx_req->tp_frame_size = AF_PACKET_TX_FRAME_SIZE;
+ tx_req->tp_block_nr = AF_PACKET_TX_BLOCK_NR;
+ tx_req->tp_frame_nr = AF_PACKET_TX_FRAME_NR;
+
+ host_if_index = if_nametoindex ((const char *) host_if_name);
+
+ if (!host_if_index)
+ {
+ DBG_SOCK ("Wrong host interface name");
+ return VNET_API_ERROR_INVALID_INTERFACE;
+ }
+
+ ret = create_packet_v2_sock (host_if_index, rx_req, tx_req, &fd, &ring);
+
+ if (ret != 0)
+ goto error;
+
+ ret = is_bridge (host_if_name);
+
+ if (ret == 0) /* is a bridge, ignore state */
+ host_if_index = -1;
+
+ /* So far everything looks good, let's create interface */
+ pool_get (apm->interfaces, apif);
+ if_index = apif - apm->interfaces;
+
+ apif->host_if_index = host_if_index;
+ apif->fd = fd;
+ apif->rx_ring = ring;
+ apif->tx_ring = ring + rx_req->tp_block_size * rx_req->tp_block_nr;
+ apif->rx_req = rx_req;
+ apif->tx_req = tx_req;
+ apif->host_if_name = host_if_name_dup;
+ apif->per_interface_next_index = ~0;
+ apif->next_tx_frame = 0;
+ apif->next_rx_frame = 0;
+
+ if (tm->n_vlib_mains > 1)
+ clib_spinlock_init (&apif->lockp);
+
+ {
+ clib_file_t template = { 0 };
+ template.read_function = af_packet_fd_read_ready;
+ template.file_descriptor = fd;
+ template.private_data = if_index;
+ template.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
+ apif->clib_file_index = clib_file_add (&file_main, &template);
+ }
+
+ /*use configured or generate random MAC address */
+ if (hw_addr_set)
+ clib_memcpy (hw_addr, hw_addr_set, 6);
+ else
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ clib_memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface (vnm, af_packet_device_class.index,
+ if_index, hw_addr, &apif->hw_if_index,
+ af_packet_eth_flag_change);
+
+ if (error)
+ {
+ memset (apif, 0, sizeof (*apif));
+ pool_put (apm->interfaces, apif);
+ clib_error_report (error);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ sw = vnet_get_hw_sw_interface (vnm, apif->hw_if_index);
+ hw = vnet_get_hw_interface (vnm, apif->hw_if_index);
+ apif->sw_if_index = sw->sw_if_index;
+ vnet_hw_interface_set_input_node (vnm, apif->hw_if_index,
+ af_packet_input_node.index);
+
+ vnet_hw_interface_assign_rx_thread (vnm, apif->hw_if_index, 0, /* queue */
+ ~0 /* any cpu */ );
+
+ hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
+ vnet_hw_interface_set_flags (vnm, apif->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ vnet_hw_interface_set_rx_mode (vnm, apif->hw_if_index, 0,
+ VNET_HW_INTERFACE_RX_MODE_INTERRUPT);
+
+ mhash_set_mem (&apm->if_index_by_host_if_name, host_if_name_dup, &if_index,
+ 0);
+ if (sw_if_index)
+ *sw_if_index = apif->sw_if_index;
+
+ return 0;
+
+error:
+ vec_free (host_if_name_dup);
+ vec_free (rx_req);
+ vec_free (tx_req);
+ return ret;
+}
+
+int
+af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif;
+ uword *p;
+ uword if_index;
+ u32 ring_sz;
+
+ p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ apif = pool_elt_at_index (apm->interfaces, p[0]);
+ if_index = apif - apm->interfaces;
+
+ /* bring down the interface */
+ vnet_hw_interface_set_flags (vnm, apif->hw_if_index, 0);
+ vnet_hw_interface_unassign_rx_thread (vnm, apif->hw_if_index, 0);
+
+ /* clean up */
+ if (apif->clib_file_index != ~0)
+ {
+ clib_file_del (&file_main, file_main.file_pool + apif->clib_file_index);
+ apif->clib_file_index = ~0;
+ }
+ else
+ close (apif->fd);
+
+ ring_sz = apif->rx_req->tp_block_size * apif->rx_req->tp_block_nr +
+ apif->tx_req->tp_block_size * apif->tx_req->tp_block_nr;
+ if (munmap (apif->rx_ring, ring_sz))
+ clib_warning ("Host interface %s could not free rx/tx ring",
+ host_if_name);
+ apif->rx_ring = NULL;
+ apif->tx_ring = NULL;
+ apif->fd = -1;
+
+ vec_free (apif->rx_req);
+ apif->rx_req = NULL;
+ vec_free (apif->tx_req);
+ apif->tx_req = NULL;
+
+ vec_free (apif->host_if_name);
+ apif->host_if_name = NULL;
+ apif->host_if_index = -1;
+
+ mhash_unset (&apm->if_index_by_host_if_name, host_if_name, &if_index);
+
+ ethernet_delete_interface (vnm, apif->hw_if_index);
+
+ pool_put (apm->interfaces, apif);
+
+ return 0;
+}
+
+static clib_error_t *
+af_packet_init (vlib_main_t * vm)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ memset (apm, 0, sizeof (af_packet_main_t));
+
+ mhash_init_vec_string (&apm->if_index_by_host_if_name, sizeof (uword));
+
+ vec_validate_aligned (apm->rx_buffers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (af_packet_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet.h b/src/vnet/devices/af_packet/af_packet.h
new file mode 100644
index 00000000..95c7e7cf
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet.h
@@ -0,0 +1,73 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.h - linux kernel packet interface header file
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vppinfra/lock.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ clib_spinlock_t lockp;
+ u8 *host_if_name;
+ int host_if_index;
+ int fd;
+ struct tpacket_req *rx_req;
+ struct tpacket_req *tx_req;
+ u8 *rx_ring;
+ u8 *tx_ring;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ u32 clib_file_index;
+
+ u32 next_rx_frame;
+ u32 next_tx_frame;
+
+ u32 per_interface_next_index;
+ u8 is_admin_up;
+} af_packet_if_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ af_packet_if_t *interfaces;
+
+ /* bitmap of pending rx interfaces */
+ uword *pending_input_bitmap;
+
+ /* rx buffer cache */
+ u32 **rx_buffers;
+
+ /* hash of host interface names */
+ mhash_t if_index_by_host_if_name;
+} af_packet_main_t;
+
+af_packet_main_t af_packet_main;
+extern vnet_device_class_t af_packet_device_class;
+extern vlib_node_registration_t af_packet_input_node;
+
+int af_packet_create_if (vlib_main_t * vm, u8 * host_if_name,
+ u8 * hw_addr_set, u32 * sw_if_index);
+int af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/af_packet_api.c b/src/vnet/devices/af_packet/af_packet_api.c
new file mode 100644
index 00000000..414c838c
--- /dev/null
+++ b/src/vnet/devices/af_packet/af_packet_api.c
@@ -0,0 +1,143 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet_api.c - af-packet api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/af_packet/af_packet.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(AF_PACKET_CREATE, af_packet_create) \
+_(AF_PACKET_DELETE, af_packet_delete)
+
+static void
+vl_api_af_packet_create_t_handler (vl_api_af_packet_create_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_af_packet_create_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+ u32 sw_if_index;
+
+ host_if_name = format (0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_create_if (vm, host_if_name,
+ mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ &sw_if_index);
+
+ vec_free (host_if_name);
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_AF_PACKET_CREATE_REPLY,
+ ({
+ rmp->sw_if_index = clib_host_to_net_u32(sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_af_packet_delete_t_handler (vl_api_af_packet_delete_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_af_packet_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *host_if_name = NULL;
+
+ host_if_name = format (0, "%s", mp->host_if_name);
+ vec_add1 (host_if_name, 0);
+
+ rv = af_packet_delete_if (vm, host_if_name);
+
+ vec_free (host_if_name);
+
+ REPLY_MACRO (VL_API_AF_PACKET_DELETE_REPLY);
+}
+
+/*
+ * af_packet_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_af_packet;
+#undef _
+}
+
+static clib_error_t *
+af_packet_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (af_packet_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/cli.c b/src/vnet/devices/af_packet/cli.c
new file mode 100644
index 00000000..44dc5179
--- /dev/null
+++ b/src/vnet/devices/af_packet/cli.c
@@ -0,0 +1,211 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+/**
+ * @file
+ * @brief CLI for Host Interface Device Driver.
+ *
+ * This file contains the source code for CLI for the host interface.
+ */
+
+static clib_error_t *
+af_packet_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
+ u32 sw_if_index;
+ int r;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ hw_addr_ptr = hwaddr;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (host_if_name == NULL)
+ {
+ error = clib_error_return (0, "missing host interface name");
+ goto done;
+ }
+
+ r = af_packet_create_if (vm, host_if_name, hw_addr_ptr, &sw_if_index);
+
+ if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
+ {
+ error = clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
+ goto done;
+ }
+
+ if (r == VNET_API_ERROR_INVALID_INTERFACE)
+ {
+ error = clib_error_return (0, "Invalid interface name");
+ goto done;
+ }
+
+ if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
+ {
+ error = clib_error_return (0, "Interface elready exists");
+ goto done;
+ }
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+
+done:
+ vec_free (host_if_name);
+ unformat_free (line_input);
+
+ return error;
+}
+
+/*?
+ * Create a host interface that will attach to a linux AF_PACKET
+ * interface, one side of a veth pair. The veth pair must already
+ * exist. Once created, a new host interface will exist in VPP
+ * with the name '<em>host-<ifname></em>', where '<em><ifname></em>'
+ * is the name of the specified veth pair. Use the
+ * '<em>show interface</em>' command to display host interface details.
+ *
+ * This command has the following optional parameters:
+ *
+ * - <b>hw-addr <mac-addr></b> - Optional ethernet address, can be in either
+ * X:X:X:X:X:X unix or X.X.X cisco format.
+ *
+ * @cliexpar
+ * Example of how to create a host interface tied to one side of an
+ * existing linux veth pair named vpp1:
+ * @cliexstart{create host-interface name vpp1}
+ * host-vpp1
+ * @cliexend
+ * Once the host interface is created, enable the interface using:
+ * @cliexcmd{set interface state host-vpp1 up}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (af_packet_create_command, static) = {
+ .path = "create host-interface",
+ .short_help = "create host-interface name <ifname> [hw-addr <mac-addr>]",
+ .function = af_packet_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+af_packet_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (host_if_name == NULL)
+ {
+ error = clib_error_return (0, "missing host interface name");
+ goto done;
+ }
+
+ af_packet_delete_if (vm, host_if_name);
+
+done:
+ vec_free (host_if_name);
+ unformat_free (line_input);
+
+ return error;
+}
+
+/*?
+ * Delete a host interface. Use the linux interface name to identify
+ * the host interface to be deleted. In VPP, host interfaces are
+ * named as '<em>host-<ifname></em>', where '<em><ifname></em>'
+ * is the name of the linux interface.
+ *
+ * @cliexpar
+ * Example of how to delete a host interface named host-vpp1:
+ * @cliexcmd{delete host-interface name vpp1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (af_packet_delete_command, static) = {
+ .path = "delete host-interface",
+ .short_help = "delete host-interface name <ifname>",
+ .function = af_packet_delete_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+af_packet_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (af_packet_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c
new file mode 100644
index 00000000..e01b1c71
--- /dev/null
+++ b/src/vnet/devices/af_packet/device.c
@@ -0,0 +1,354 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_packet.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define foreach_af_packet_tx_func_error \
+_(FRAME_NOT_READY, "tx frame not ready") \
+_(TXRING_EAGAIN, "tx sendto temporary failure") \
+_(TXRING_FATAL, "tx sendto fatal failure") \
+_(TXRING_OVERRUN, "tx ring overrun")
+
+typedef enum
+{
+#define _(f,s) AF_PACKET_TX_ERROR_##f,
+ foreach_af_packet_tx_func_error
+#undef _
+ AF_PACKET_TX_N_ERROR,
+} af_packet_tx_func_error_t;
+
+static char *af_packet_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_af_packet_tx_func_error
+#undef _
+};
+
+
+static u8 *
+format_af_packet_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, i);
+
+ s = format (s, "host-%s", apif->host_if_name);
+ return s;
+}
+
+static u8 *
+format_af_packet_device (u8 * s, va_list * args)
+{
+ s = format (s, "Linux PACKET socket interface");
+ return s;
+}
+
+static u8 *
+format_af_packet_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+static uword
+af_packet_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ u32 n_sent = 0;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, rd->dev_instance);
+ int block = 0;
+ u32 block_size = apif->tx_req->tp_block_size;
+ u32 frame_size = apif->tx_req->tp_frame_size;
+ u32 frame_num = apif->tx_req->tp_frame_nr;
+ u8 *block_start = apif->tx_ring + block * block_size;
+ u32 tx_frame = apif->next_tx_frame;
+ struct tpacket2_hdr *tph;
+ u32 frame_not_ready = 0;
+
+ clib_spinlock_lock_if_init (&apif->lockp);
+
+ while (n_left > 0)
+ {
+ u32 len;
+ u32 offset = 0;
+ vlib_buffer_t *b0;
+ n_left--;
+ u32 bi = buffers[0];
+ buffers++;
+
+ tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
+
+ if (PREDICT_FALSE
+ (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
+ {
+ frame_not_ready++;
+ goto next;
+ }
+
+ do
+ {
+ b0 = vlib_get_buffer (vm, bi);
+ len = b0->current_length;
+ clib_memcpy ((u8 *) tph +
+ TPACKET_ALIGN (sizeof (struct tpacket2_hdr)) + offset,
+ vlib_buffer_get_current (b0), len);
+ offset += len;
+ }
+ while ((bi =
+ (b0->flags & VLIB_BUFFER_NEXT_PRESENT) ? b0->next_buffer : 0));
+
+ tph->tp_len = tph->tp_snaplen = offset;
+ tph->tp_status = TP_STATUS_SEND_REQUEST;
+ n_sent++;
+ next:
+ /* check if we've exhausted the ring */
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ break;
+
+ tx_frame = (tx_frame + 1) % frame_num;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+
+ if (PREDICT_TRUE (n_sent))
+ {
+ apif->next_tx_frame = tx_frame;
+
+ if (PREDICT_FALSE (sendto (apif->fd, NULL, 0,
+ MSG_DONTWAIT, NULL, 0) == -1))
+ {
+ /* Uh-oh, drop & move on, but count whether it was fatal or not.
+ * Note that we have no reliable way to properly determine the
+ * disposition of the packets we just enqueued for delivery.
+ */
+ vlib_error_count (vm, node->node_index,
+ unix_error_is_fatal (errno) ?
+ AF_PACKET_TX_ERROR_TXRING_FATAL :
+ AF_PACKET_TX_ERROR_TXRING_EAGAIN, n_sent);
+ }
+ }
+
+ clib_spinlock_unlock_if_init (&apif->lockp);
+
+ if (PREDICT_FALSE (frame_not_ready))
+ vlib_error_count (vm, node->node_index,
+ AF_PACKET_TX_ERROR_FRAME_NOT_READY, frame_not_ready);
+
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ vlib_error_count (vm, node->node_index, AF_PACKET_TX_ERROR_TXRING_OVERRUN,
+ n_left);
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static void
+af_packet_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ apif->per_interface_next_index = node_index;
+ return;
+ }
+
+ apif->per_interface_next_index =
+ vlib_node_add_next (vlib_get_main (), af_packet_input_node.index,
+ node_index);
+}
+
+static void
+af_packet_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+af_packet_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ u32 hw_flags;
+ int rv, fd = socket (AF_UNIX, SOCK_DGRAM, 0);
+ struct ifreq ifr;
+
+ if (0 > fd)
+ {
+ clib_unix_warning ("af_packet_%s could not open socket",
+ apif->host_if_name);
+ return 0;
+ }
+
+ /* if interface is a bridge ignore */
+ if (apif->host_if_index < 0)
+ goto error; /* no error */
+
+ /* use host_if_index in case host name has changed */
+ ifr.ifr_ifindex = apif->host_if_index;
+ if ((rv = ioctl (fd, SIOCGIFNAME, &ifr)) < 0)
+ {
+ clib_unix_warning ("af_packet_%s ioctl could not retrieve eth name",
+ apif->host_if_name);
+ goto error;
+ }
+
+ apif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if ((rv = ioctl (fd, SIOCGIFFLAGS, &ifr)) < 0)
+ {
+ clib_unix_warning ("af_packet_%s error: %d",
+ apif->is_admin_up ? "up" : "down", rv);
+ goto error;
+ }
+
+ if (apif->is_admin_up)
+ {
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP;
+ ifr.ifr_flags |= IFF_UP;
+ }
+ else
+ {
+ hw_flags = 0;
+ ifr.ifr_flags &= ~IFF_UP;
+ }
+
+ if ((rv = ioctl (fd, SIOCSIFFLAGS, &ifr)) < 0)
+ {
+ clib_unix_warning ("af_packet_%s error: %d",
+ apif->is_admin_up ? "up" : "down", rv);
+ goto error;
+ }
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+error:
+ if (0 <= fd)
+ close (fd);
+
+ return 0; /* no error */
+}
+
+static clib_error_t *
+af_packet_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+static clib_error_t *af_packet_set_mac_address_function
+ (struct vnet_hw_interface_t *hi, char *address)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hi->dev_instance);
+ int rv, fd = socket (AF_UNIX, SOCK_DGRAM, 0);
+ struct ifreq ifr;
+
+ if (0 > fd)
+ {
+ clib_unix_warning ("af_packet_%s could not open socket",
+ apif->host_if_name);
+ return 0;
+ }
+
+ /* if interface is a bridge ignore */
+ if (apif->host_if_index < 0)
+ goto error; /* no error */
+
+ /* use host_if_index in case host name has changed */
+ ifr.ifr_ifindex = apif->host_if_index;
+ if ((rv = ioctl (fd, SIOCGIFNAME, &ifr)) < 0)
+ {
+ clib_unix_warning
+ ("af_packet_%s ioctl could not retrieve eth name, error: %d",
+ apif->host_if_name, rv);
+ goto error;
+ }
+
+ clib_memcpy (ifr.ifr_hwaddr.sa_data, address, 6);
+ ifr.ifr_hwaddr.sa_family = ARPHRD_ETHER;
+
+ if ((rv = ioctl (fd, SIOCSIFHWADDR, &ifr)) < 0)
+ {
+ clib_unix_warning ("af_packet_%s ioctl could not set mac, error: %d",
+ apif->host_if_name, rv);
+ goto error;
+ }
+
+error:
+
+ if (0 <= fd)
+ close (fd);
+
+ return 0; /* no error */
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (af_packet_device_class) = {
+ .name = "af-packet",
+ .tx_function = af_packet_interface_tx,
+ .format_device_name = format_af_packet_device_name,
+ .format_device = format_af_packet_device,
+ .format_tx_trace = format_af_packet_tx_trace,
+ .tx_function_n_errors = AF_PACKET_TX_N_ERROR,
+ .tx_function_error_strings = af_packet_tx_func_error_strings,
+ .rx_redirect_to_node = af_packet_set_interface_next_node,
+ .clear_counters = af_packet_clear_hw_interface_counters,
+ .admin_up_down_function = af_packet_interface_admin_up_down,
+ .subif_add_del_function = af_packet_subif_add_del_function,
+ .mac_addr_change_function = af_packet_set_mac_address_function,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (af_packet_device_class,
+ af_packet_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/af_packet/dir.dox b/src/vnet/devices/af_packet/dir.dox
new file mode 100644
index 00000000..78991c6d
--- /dev/null
+++ b/src/vnet/devices/af_packet/dir.dox
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief Host Interface Implementation.
+
+This directory contains the source code for Host Interface driver. The
+Host Interface driver leverages the DPDK AF_PACKET driver.
+
+
+*/
+/*? %%clicmd:group_label Host Interface %% ?*/
+/*? %%syscfg:group_label Host Interface %% ?*/
diff --git a/src/vnet/devices/af_packet/node.c b/src/vnet/devices/af_packet/node.c
new file mode 100644
index 00000000..99c91f38
--- /dev/null
+++ b/src/vnet/devices/af_packet/node.c
@@ -0,0 +1,310 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_packet.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/af_packet/af_packet.h>
+
+#define foreach_af_packet_input_error
+
+typedef enum
+{
+#define _(f,s) AF_PACKET_INPUT_ERROR_##f,
+ foreach_af_packet_input_error
+#undef _
+ AF_PACKET_INPUT_N_ERROR,
+} af_packet_input_error_t;
+
+static char *af_packet_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_af_packet_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ u32 hw_if_index;
+ int block;
+ struct tpacket2_hdr tph;
+} af_packet_input_trace_t;
+
+static u8 *
+format_af_packet_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "af_packet: hw_if_index %d next-index %d",
+ t->hw_if_index, t->next_index);
+
+ s =
+ format (s,
+ "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
+ "\n%Usec 0x%x nsec 0x%x vlan %U"
+#ifdef TP_STATUS_VLAN_TPID_VALID
+ " vlan_tpid %u"
+#endif
+ ,
+ format_white_space, indent + 2,
+ format_white_space, indent + 4,
+ t->tph.tp_status,
+ t->tph.tp_len,
+ t->tph.tp_snaplen,
+ t->tph.tp_mac,
+ t->tph.tp_net,
+ format_white_space, indent + 4,
+ t->tph.tp_sec,
+ t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.tp_vlan_tci
+#ifdef TP_STATUS_VLAN_TPID_VALID
+ , t->tph.tp_vlan_tpid
+#endif
+ );
+ return s;
+}
+
+always_inline void
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
+
+ /* update first buffer */
+ first_b->total_length_not_including_first_buffer += b->current_length;
+
+ /* update previous buffer */
+ prev_b->next_buffer = bi;
+ prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* update current buffer */
+ b->next_buffer = 0;
+}
+
+always_inline uword
+af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, af_packet_if_t * apif)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ struct tpacket2_hdr *tph;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u32 block = 0;
+ u32 rx_frame;
+ u32 n_free_bufs;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 *to_next = 0;
+ u32 block_size = apif->rx_req->tp_block_size;
+ u32 frame_size = apif->rx_req->tp_frame_size;
+ u32 frame_num = apif->rx_req->tp_frame_nr;
+ u8 *block_start = apif->rx_ring + block * block_size;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 thread_index = vlib_get_thread_index ();
+ u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
+
+ if (apif->per_interface_next_index != ~0)
+ next_index = apif->per_interface_next_index;
+
+ n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+ {
+ vec_validate (apm->rx_buffers[thread_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &apm->rx_buffers[thread_index][n_free_bufs],
+ VLIB_FRAME_SIZE);
+ _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs;
+ }
+
+ rx_frame = apif->next_rx_frame;
+ tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
+ {
+ vlib_buffer_t *b0 = 0, *first_b0 = 0;
+ u32 next0 = next_index;
+
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
+ n_left_to_next)
+ {
+ u32 data_len = tph->tp_snaplen;
+ u32 offset = 0;
+ u32 bi0 = 0, first_bi0 = 0, prev_bi0;
+
+ while (data_len)
+ {
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (apm->rx_buffers[thread_index]) - 1;
+ prev_bi0 = bi0;
+ bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
+ b0 = vlib_get_buffer (vm, bi0);
+ _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer;
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 vlan_len = 0;
+ u32 bytes_copied = 0;
+ b0->current_data = 0;
+ /* Kernel removes VLAN headers, so reconstruct VLAN */
+ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
+ {
+ if (PREDICT_TRUE (offset == 0))
+ {
+ clib_memcpy (vlib_buffer_get_current (b0),
+ (u8 *) tph + tph->tp_mac,
+ sizeof (ethernet_header_t));
+ ethernet_header_t *eth = vlib_buffer_get_current (b0);
+ ethernet_vlan_header_t *vlan =
+ (ethernet_vlan_header_t *) (eth + 1);
+ vlan->priority_cfi_and_id =
+ clib_host_to_net_u16 (tph->tp_vlan_tci);
+ vlan->type = eth->type;
+ eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ vlan_len = sizeof (ethernet_vlan_header_t);
+ bytes_copied = sizeof (ethernet_header_t);
+ }
+ }
+ clib_memcpy (((u8 *) vlib_buffer_get_current (b0)) +
+ bytes_copied + vlan_len,
+ (u8 *) tph + tph->tp_mac + offset + bytes_copied,
+ (bytes_to_copy - bytes_copied));
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy + vlan_len;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ first_bi0 = bi0;
+ first_b0 = vlib_get_buffer (vm, first_bi0);
+ }
+ else
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
+
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+ n_rx_packets++;
+ n_rx_bytes += tph->tp_snaplen;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+
+ /* trace */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ af_packet_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */
+ 0);
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = apif->hw_if_index;
+ clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr));
+ }
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next0, b0);
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0, next0);
+
+ /* next packet */
+ tph->tp_status = TP_STATUS_KERNEL;
+ rx_frame = (rx_frame + 1) % frame_num;
+ tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ apif->next_rx_frame = rx_frame;
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, n_rx_packets);
+ return n_rx_packets;
+}
+
+static uword
+af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_rx_packets = 0;
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
+ vnet_device_and_queue_t *dq;
+
+ foreach_device_and_queue (dq, rt->devices_and_queues)
+ {
+ af_packet_if_t *apif;
+ apif = vec_elt_at_index (apm->interfaces, dq->dev_instance);
+ if (apif->is_admin_up)
+ n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (af_packet_input_node) = {
+ .function = af_packet_input_fn,
+ .name = "af-packet-input",
+ .sibling_of = "device-input",
+ .format_trace = format_af_packet_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .n_errors = AF_PACKET_INPUT_N_ERROR,
+ .error_strings = af_packet_input_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (af_packet_input_node, af_packet_input_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/devices.c b/src/vnet/devices/devices.c
new file mode 100644
index 00000000..a38ecd2d
--- /dev/null
+++ b/src/vnet/devices/devices.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+#include <vnet/ip/ip.h>
+#include <vnet/ethernet/ethernet.h>
+
+vnet_device_main_t vnet_device_main;
+
+static uword
+device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (device_input_node) = {
+ .function = device_input_fn,
+ .name = "device-input",
+ .runtime_data_bytes = sizeof (vnet_device_input_runtime_t),
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+ .n_next_nodes = VNET_DEVICE_INPUT_N_NEXT_NODES,
+ .next_nodes = VNET_DEVICE_INPUT_NEXT_NODES,
+};
+
+/* Table defines how much we need to advance current data pointer
+ in the buffer if we shortcut to l3 nodes */
+
+const u32 __attribute__((aligned (CLIB_CACHE_LINE_BYTES)))
+device_input_next_node_advance[((VNET_DEVICE_INPUT_N_NEXT_NODES /
+ CLIB_CACHE_LINE_BYTES) +1) * CLIB_CACHE_LINE_BYTES] =
+{
+ [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = sizeof (ethernet_header_t),
+ [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = sizeof (ethernet_header_t),
+};
+
+VNET_FEATURE_ARC_INIT (device_input, static) =
+{
+ .arc_name = "device-input",
+ .start_nodes = VNET_FEATURES ("device-input"),
+ .arc_index_ptr = &feature_main.device_input_feature_arc_index,
+};
+
+VNET_FEATURE_INIT (l2_patch, static) = {
+ .arc_name = "device-input",
+ .node_name = "l2-patch",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (worker_handoff, static) = {
+ .arc_name = "device-input",
+ .node_name = "worker-handoff",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (span_input, static) = {
+ .arc_name = "device-input",
+ .node_name = "span-input",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (p2p_ethernet_node, static) = {
+ .arc_name = "device-input",
+ .node_name = "p2p-ethernet-input",
+ .runs_before = VNET_FEATURES ("ethernet-input"),
+};
+
+VNET_FEATURE_INIT (ethernet_input, static) = {
+ .arc_name = "device-input",
+ .node_name = "ethernet-input",
+ .runs_before = 0, /* not before any other features */
+};
+/* *INDENT-ON* */
+
+static int
+vnet_device_queue_sort (void *a1, void *a2)
+{
+ vnet_device_and_queue_t *dq1 = a1;
+ vnet_device_and_queue_t *dq2 = a2;
+
+ if (dq1->dev_instance > dq2->dev_instance)
+ return 1;
+ else if (dq1->dev_instance < dq2->dev_instance)
+ return -1;
+ else if (dq1->queue_id > dq2->queue_id)
+ return 1;
+ else if (dq1->queue_id < dq2->queue_id)
+ return -1;
+ else
+ return 0;
+}
+
+static void
+vnet_device_queue_update (vnet_main_t * vnm, vnet_device_input_runtime_t * rt)
+{
+ vnet_device_and_queue_t *dq;
+ vnet_hw_interface_t *hw;
+
+ vec_sort_with_function (rt->devices_and_queues, vnet_device_queue_sort);
+
+ vec_foreach (dq, rt->devices_and_queues)
+ {
+ hw = vnet_get_hw_interface (vnm, dq->hw_if_index);
+ vec_validate (hw->dq_runtime_index_by_queue, dq->queue_id);
+ hw->dq_runtime_index_by_queue[dq->queue_id] = dq - rt->devices_and_queues;
+ }
+}
+
+void
+vnet_hw_interface_assign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id, uword thread_index)
+{
+ vnet_device_main_t *vdm = &vnet_device_main;
+ vlib_main_t *vm, *vm0;
+ vnet_device_input_runtime_t *rt;
+ vnet_device_and_queue_t *dq;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+
+ ASSERT (hw->input_node_index > 0);
+
+ if (vdm->first_worker_thread_index == 0)
+ thread_index = 0;
+
+ if (thread_index != 0 &&
+ (thread_index < vdm->first_worker_thread_index ||
+ thread_index > vdm->last_worker_thread_index))
+ {
+ thread_index = vdm->next_worker_thread_index++;
+ if (vdm->next_worker_thread_index > vdm->last_worker_thread_index)
+ vdm->next_worker_thread_index = vdm->first_worker_thread_index;
+ }
+
+ vm = vlib_mains[thread_index];
+ vm0 = vlib_get_main ();
+
+ vlib_worker_thread_barrier_sync (vm0);
+
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+
+ vec_add2 (rt->devices_and_queues, dq, 1);
+ dq->hw_if_index = hw_if_index;
+ dq->dev_instance = hw->dev_instance;
+ dq->queue_id = queue_id;
+ dq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
+ rt->enabled_node_state = VLIB_NODE_STATE_POLLING;
+
+ vnet_device_queue_update (vnm, rt);
+ vec_validate (hw->input_node_thread_index_by_queue, queue_id);
+ vec_validate (hw->rx_mode_by_queue, queue_id);
+ hw->input_node_thread_index_by_queue[queue_id] = thread_index;
+ hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_POLLING;
+
+ vlib_worker_thread_barrier_release (vm0);
+
+ vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
+}
+
+int
+vnet_hw_interface_unassign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id)
+{
+ vlib_main_t *vm, *vm0;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_device_input_runtime_t *rt;
+ vnet_device_and_queue_t *dq;
+ uword old_thread_index;
+ vnet_hw_interface_rx_mode mode;
+
+ if (hw->input_node_thread_index_by_queue == 0)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ if (vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ old_thread_index = hw->input_node_thread_index_by_queue[queue_id];
+
+ vm = vlib_mains[old_thread_index];
+
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+
+ vec_foreach (dq, rt->devices_and_queues)
+ if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
+ {
+ mode = dq->mode;
+ goto delete;
+ }
+
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+delete:
+
+ vm0 = vlib_get_main ();
+ vlib_worker_thread_barrier_sync (vm0);
+ vec_del1 (rt->devices_and_queues, dq - rt->devices_and_queues);
+ vnet_device_queue_update (vnm, rt);
+ hw->rx_mode_by_queue[queue_id] = VNET_HW_INTERFACE_RX_MODE_UNKNOWN;
+ vlib_worker_thread_barrier_release (vm0);
+
+ if (vec_len (rt->devices_and_queues) == 0)
+ vlib_node_set_state (vm, hw->input_node_index, VLIB_NODE_STATE_DISABLED);
+ else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ {
+ /*
+ * if the deleted interface is polling, we may need to set the node state
+ * to interrupt if there is no more polling interface for this device's
+ * corresponding thread. This is because mixed interfaces
+ * (polling and interrupt), assigned to the same thread, set the
+ * thread to polling prior to the deletion.
+ */
+ vec_foreach (dq, rt->devices_and_queues)
+ {
+ if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ return 0;
+ }
+ rt->enabled_node_state = VLIB_NODE_STATE_INTERRUPT;
+ vlib_node_set_state (vm, hw->input_node_index, rt->enabled_node_state);
+ }
+
+ return 0;
+}
+
+
+int
+vnet_hw_interface_set_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id, vnet_hw_interface_rx_mode mode)
+{
+ vlib_main_t *vm;
+ uword thread_index;
+ vnet_device_and_queue_t *dq;
+ vlib_node_state_t enabled_node_state;
+ ASSERT (mode < VNET_HW_INTERFACE_NUM_RX_MODES);
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_device_input_runtime_t *rt;
+ int is_polling = 0;
+
+ if (mode == VNET_HW_INTERFACE_RX_MODE_DEFAULT)
+ mode = hw->default_rx_mode;
+
+ if (hw->input_node_thread_index_by_queue == 0 || hw->rx_mode_by_queue == 0)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ if (hw->rx_mode_by_queue[queue_id] == mode)
+ return 0;
+
+ if (mode != VNET_HW_INTERFACE_RX_MODE_POLLING &&
+ (hw->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE) == 0)
+ return VNET_API_ERROR_UNSUPPORTED;
+
+ if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
+ (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
+ return VNET_API_ERROR_INVALID_QUEUE;
+
+ hw->rx_mode_by_queue[queue_id] = mode;
+ thread_index = hw->input_node_thread_index_by_queue[queue_id];
+ vm = vlib_mains[thread_index];
+
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+
+ vec_foreach (dq, rt->devices_and_queues)
+ {
+ if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
+ dq->mode = mode;
+ if (dq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ is_polling = 1;
+ }
+
+ if (is_polling)
+ enabled_node_state = VLIB_NODE_STATE_POLLING;
+ else
+ enabled_node_state = VLIB_NODE_STATE_INTERRUPT;
+
+ if (rt->enabled_node_state != enabled_node_state)
+ {
+ rt->enabled_node_state = enabled_node_state;
+ if (vlib_node_get_state (vm, hw->input_node_index) !=
+ VLIB_NODE_STATE_DISABLED)
+ vlib_node_set_state (vm, hw->input_node_index, enabled_node_state);
+ }
+
+ return 0;
+}
+
+int
+vnet_hw_interface_get_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id, vnet_hw_interface_rx_mode * mode)
+{
+ vlib_main_t *vm;
+ uword thread_index;
+ vnet_device_and_queue_t *dq;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_device_input_runtime_t *rt;
+
+ if (hw->input_node_thread_index_by_queue == 0)
+ return VNET_API_ERROR_INVALID_INTERFACE;
+
+ if ((vec_len (hw->input_node_thread_index_by_queue) < queue_id + 1) ||
+ (vec_len (hw->rx_mode_by_queue) < queue_id + 1))
+ return VNET_API_ERROR_INVALID_QUEUE;
+
+ thread_index = hw->input_node_thread_index_by_queue[queue_id];
+ vm = vlib_mains[thread_index];
+
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+
+ vec_foreach (dq, rt->devices_and_queues)
+ if (dq->hw_if_index == hw_if_index && dq->queue_id == queue_id)
+ {
+ *mode = dq->mode;
+ return 0;
+ }
+
+ return VNET_API_ERROR_INVALID_INTERFACE;
+}
+
+
+
+static clib_error_t *
+vnet_device_init (vlib_main_t * vm)
+{
+ vnet_device_main_t *vdm = &vnet_device_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
+
+ vec_validate_aligned (vdm->workers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = p ? (vlib_thread_registration_t *) p[0] : 0;
+ if (tr && tr->count > 0)
+ {
+ vdm->first_worker_thread_index = tr->first_index;
+ vdm->next_worker_thread_index = tr->first_index;
+ vdm->last_worker_thread_index = tr->first_index + tr->count - 1;
+ }
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vnet_device_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/devices.h b/src/vnet/devices/devices.h
new file mode 100644
index 00000000..b74e3713
--- /dev/null
+++ b/src/vnet/devices/devices.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef included_vnet_vnet_device_h
+#define included_vnet_vnet_device_h
+
+#include <vnet/unix/pcap.h>
+#include <vnet/l3_types.h>
+
+typedef enum
+{
+ VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT,
+ VNET_DEVICE_INPUT_NEXT_IP4_INPUT,
+ VNET_DEVICE_INPUT_NEXT_IP6_INPUT,
+ VNET_DEVICE_INPUT_NEXT_MPLS_INPUT,
+ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT,
+ VNET_DEVICE_INPUT_NEXT_DROP,
+ VNET_DEVICE_INPUT_N_NEXT_NODES,
+} vnet_device_input_next_t;
+
+#define VNET_DEVICE_INPUT_NEXT_NODES { \
+ [VNET_DEVICE_INPUT_NEXT_DROP] = "error-drop", \
+ [VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input", \
+ [VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT] = "ip4-input-no-checksum", \
+ [VNET_DEVICE_INPUT_NEXT_IP4_INPUT] = "ip4-input", \
+ [VNET_DEVICE_INPUT_NEXT_IP6_INPUT] = "ip6-input", \
+ [VNET_DEVICE_INPUT_NEXT_MPLS_INPUT] = "mpls-input", \
+}
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* total input packet counter */
+ u64 aggregate_rx_packets;
+} vnet_device_per_worker_data_t;
+
+typedef struct
+{
+ vnet_device_per_worker_data_t *workers;
+ uword first_worker_thread_index;
+ uword last_worker_thread_index;
+ uword next_worker_thread_index;
+} vnet_device_main_t;
+
+typedef struct
+{
+ u32 hw_if_index;
+ u32 dev_instance;
+ u16 queue_id;
+ vnet_hw_interface_rx_mode mode;
+ u32 interrupt_pending;
+} vnet_device_and_queue_t;
+
+typedef struct
+{
+ vnet_device_and_queue_t *devices_and_queues;
+ vlib_node_state_t enabled_node_state;
+} vnet_device_input_runtime_t;
+
+extern vnet_device_main_t vnet_device_main;
+extern vlib_node_registration_t device_input_node;
+extern const u32 device_input_next_node_advance[];
+
+static inline void
+vnet_hw_interface_set_input_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ hw->input_node_index = node_index;
+}
+
+void vnet_hw_interface_assign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id, uword thread_index);
+int vnet_hw_interface_unassign_rx_thread (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id);
+int vnet_hw_interface_set_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id,
+ vnet_hw_interface_rx_mode mode);
+int vnet_hw_interface_get_rx_mode (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id,
+ vnet_hw_interface_rx_mode * mode);
+
+static inline u64
+vnet_get_aggregate_rx_packets (void)
+{
+ vnet_device_main_t *vdm = &vnet_device_main;
+ u64 sum = 0;
+ vnet_device_per_worker_data_t *pwd;
+
+ vec_foreach (pwd, vdm->workers) sum += pwd->aggregate_rx_packets;
+
+ return sum;
+}
+
+static inline void
+vnet_device_increment_rx_packets (u32 thread_index, u64 count)
+{
+ vnet_device_main_t *vdm = &vnet_device_main;
+ vnet_device_per_worker_data_t *pwd;
+
+ pwd = vec_elt_at_index (vdm->workers, thread_index);
+ pwd->aggregate_rx_packets += count;
+}
+
+static_always_inline vnet_device_and_queue_t *
+vnet_get_device_and_queue (vlib_main_t * vm, vlib_node_runtime_t * node)
+{
+ vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
+ return rt->devices_and_queues;
+}
+
+static_always_inline uword
+vnet_get_device_input_thread_index (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id)
+{
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ ASSERT (queue_id < vec_len (hw->input_node_thread_index_by_queue));
+ return hw->input_node_thread_index_by_queue[queue_id];
+}
+
+static_always_inline void
+vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index,
+ u16 queue_id)
+{
+ vlib_main_t *vm;
+ vnet_hw_interface_t *hw;
+ vnet_device_input_runtime_t *rt;
+ vnet_device_and_queue_t *dq;
+ uword idx;
+
+ hw = vnet_get_hw_interface (vnm, hw_if_index);
+ idx = vnet_get_device_input_thread_index (vnm, hw_if_index, queue_id);
+ vm = vlib_mains[idx];
+ rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
+ idx = hw->dq_runtime_index_by_queue[queue_id];
+ dq = vec_elt_at_index (rt->devices_and_queues, idx);
+ dq->interrupt_pending = 1;
+
+ vlib_node_set_interrupt_pending (vm, hw->input_node_index);
+}
+
+#define foreach_device_and_queue(var,vec) \
+ for (var = (vec); var < vec_end (vec); var++) \
+ if (clib_smp_swap (&((var)->interrupt_pending), 0) || \
+ var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+
+#endif /* included_vnet_vnet_device_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/cli.c b/src/vnet/devices/netmap/cli.c
new file mode 100644
index 00000000..71363294
--- /dev/null
+++ b/src/vnet/devices/netmap/cli.c
@@ -0,0 +1,236 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+static clib_error_t *
+netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
+ int r;
+ u8 is_pipe = 0;
+ u8 is_master = 0;
+ u32 sw_if_index = ~0;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ hw_addr_ptr = hwaddr;
+ else if (unformat (line_input, "pipe"))
+ is_pipe = 1;
+ else if (unformat (line_input, "master"))
+ is_master = 1;
+ else if (unformat (line_input, "slave"))
+ is_master = 0;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (host_if_name == NULL)
+ {
+ error = clib_error_return (0, "missing host interface name");
+ goto done;
+ }
+
+ r =
+ netmap_create_if (vm, host_if_name, hw_addr_ptr, is_pipe, is_master,
+ &sw_if_index);
+
+ if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
+ {
+ error = clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
+ goto done;
+ }
+
+ if (r == VNET_API_ERROR_INVALID_INTERFACE)
+ {
+ error = clib_error_return (0, "Invalid interface name");
+ goto done;
+ }
+
+ if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
+ {
+ error = clib_error_return (0, "Interface already exists");
+ goto done;
+ }
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/*?
+ * '<em>netmap</em>' is a framework for very fast packet I/O from userspace.
+ * '<em>VALE</em>' is an equally fast in-kernel software switch using the
+ * netmap API. '<em>netmap</em>' includes '<em>netmap pipes</em>', a shared
+ * memory packet transport channel. Together, they provide a high speed
+ * user-space interface that allows VPP to patch into a linux namespace, a
+ * linux container, or a physical NIC without the use of DPDK. Netmap/VALE
+ * generates the '<em>netmap.ko</em>' kernel module that needs to be loaded
+ * before netmap interfaces can be created.
+ * - https://github.com/luigirizzo/netmap - Netmap/VALE repo.
+ * - https://github.com/vpp-dev/netmap - VPP development package for Netmap/VALE,
+ * which is a snapshot of the Netmap/VALE repo with minor changes to work
+ * with containers and modified kernel drivers to work with NICs.
+ *
+ * Create a netmap interface that will attach to a linux interface.
+ * The interface must already exist. Once created, a new netmap interface
+ * will exist in VPP with the name '<em>netmap-<ifname></em>', where
+ * '<em><ifname></em>' takes one of two forms:
+ * - <b>ifname</b> - Linux interface to bind too.
+ * - <b>valeXXX:YYY</b> -
+ * - Where '<em>valeXXX</em>' is an arbitrary name for a VALE
+ * interface that must start with '<em>vale</em>' and is less
+ * than 16 characters.
+ * - Where '<em>YYY</em>' is an existing linux namespace.
+ *
+ * This command has the following optional parameters:
+ *
+ * - <b>hw-addr <mac-addr></b> - Optional ethernet address, can be in either
+ * X:X:X:X:X:X unix or X.X.X cisco format.
+ *
+ * - <b>pipe</b> - Optional flag to indicate that a '<em>netmap pipe</em>'
+ * instance should be created.
+ *
+ * - <b>master | slave</b> - Optional flag to indicate whether VPP should
+ * be the master or slave of the '<em>netmap pipe</em>'. Only considered
+ * if '<em>pipe</em>' is entered. Defaults to '<em>slave</em>' if not entered.
+ *
+ * @cliexpar
+ * Example of how to create a netmap interface tied to the linux
+ * namespace '<em>vpp1</em>':
+ * @cliexstart{create netmap name vale00:vpp1 hw-addr 02:FE:3F:34:15:9B pipe master}
+ * netmap-vale00:vpp1
+ * @cliexend
+ * Once the netmap interface is created, enable the interface using:
+ * @cliexcmd{set interface state netmap-vale00:vpp1 up}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (netmap_create_command, static) = {
+ .path = "create netmap",
+ .short_help = "create netmap name <ifname>|valeXXX:YYY "
+ "[hw-addr <mac-addr>] [pipe] [master|slave]",
+ .function = netmap_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "name %s", &host_if_name))
+ ;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ if (host_if_name == NULL)
+ {
+ error = clib_error_return (0, "missing host interface name");
+ goto done;
+ }
+
+ netmap_delete_if (vm, host_if_name);
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/*?
+ * Delete a netmap interface. Use the '<em><ifname></em>' to identify
+ * the netmap interface to be deleted. In VPP, netmap interfaces are
+ * named as '<em>netmap-<ifname></em>', where '<em><ifname></em>'
+ * takes one of two forms:
+ * - <b>ifname</b> - Linux interface to bind too.
+ * - <b>valeXXX:YYY</b> -
+ * - Where '<em>valeXXX</em>' is an arbitrary name for a VALE
+ * interface that must start with '<em>vale</em>' and is less
+ * than 16 characters.
+ * - Where '<em>YYY</em>' is an existing linux namespace.
+ *
+ * @cliexpar
+ * Example of how to delete a netmap interface named '<em>netmap-vale00:vpp1</em>':
+ * @cliexcmd{delete netmap name vale00:vpp1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (netmap_delete_command, static) = {
+ .path = "delete netmap",
+ .short_help = "delete netmap name <ifname>|valeXXX:YYY",
+ .function = netmap_delete_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+netmap_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (netmap_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/device.c b/src/vnet/devices/netmap/device.c
new file mode 100644
index 00000000..aea9ddf4
--- /dev/null
+++ b/src/vnet/devices/netmap/device.c
@@ -0,0 +1,256 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#define foreach_netmap_tx_func_error \
+_(NO_FREE_SLOTS, "no free tx slots") \
+_(PENDING_MSGS, "pending msgs in tx ring")
+
+typedef enum
+{
+#define _(f,s) NETMAP_TX_ERROR_##f,
+ foreach_netmap_tx_func_error
+#undef _
+ NETMAP_TX_N_ERROR,
+} netmap_tx_func_error_t;
+
+static char *netmap_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_netmap_tx_func_error
+#undef _
+};
+
+
+static u8 *
+format_netmap_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ netmap_main_t *apm = &netmap_main;
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, i);
+
+ s = format (s, "netmap-%s", nif->host_if_name);
+ return s;
+}
+
+static u8 *
+format_netmap_device (u8 * s, va_list * args)
+{
+ u32 dev_instance = va_arg (*args, u32);
+ int verbose = va_arg (*args, int);
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nif = vec_elt_at_index (nm->interfaces, dev_instance);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "NETMAP interface");
+ if (verbose)
+ {
+ s = format (s, "\n%U version %d flags 0x%x"
+ "\n%U region %u memsize 0x%x offset 0x%x"
+ "\n%U tx_slots %u rx_slots %u tx_rings %u rx_rings %u",
+ format_white_space, indent + 2,
+ nif->req->nr_version,
+ nif->req->nr_flags,
+ format_white_space, indent + 2,
+ nif->mem_region,
+ nif->req->nr_memsize,
+ nif->req->nr_offset,
+ format_white_space, indent + 2,
+ nif->req->nr_tx_slots,
+ nif->req->nr_rx_slots,
+ nif->req->nr_tx_rings, nif->req->nr_rx_rings);
+ }
+ return s;
+}
+
+static u8 *
+format_netmap_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+static uword
+netmap_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ netmap_main_t *nm = &netmap_main;
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ f64 const time_constant = 1e3;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance);
+ int cur_ring;
+
+ clib_spinlock_lock_if_init (&nif->lockp);
+
+ cur_ring = nif->first_tx_ring;
+
+ while (n_left && cur_ring <= nif->last_tx_ring)
+ {
+ struct netmap_ring *ring = NETMAP_TXRING (nif->nifp, cur_ring);
+ int n_free_slots = nm_ring_space (ring);
+ uint cur = ring->cur;
+
+ if (nm_tx_pending (ring))
+ {
+ if (ioctl (nif->fd, NIOCTXSYNC, NULL) < 0)
+ clib_unix_warning ("NIOCTXSYNC");
+ clib_cpu_time_wait (time_constant);
+
+ if (nm_tx_pending (ring) && !n_free_slots)
+ {
+ cur_ring++;
+ continue;
+ }
+ }
+
+ while (n_left && n_free_slots)
+ {
+ vlib_buffer_t *b0 = 0;
+ u32 bi = buffers[0];
+ u32 len;
+ u32 offset = 0;
+ buffers++;
+
+ struct netmap_slot *slot = &ring->slot[cur];
+
+ do
+ {
+ b0 = vlib_get_buffer (vm, bi);
+ len = b0->current_length;
+ /* memcpy */
+ clib_memcpy ((u8 *) NETMAP_BUF (ring, slot->buf_idx) + offset,
+ vlib_buffer_get_current (b0), len);
+ offset += len;
+ }
+ while ((bi = b0->next_buffer));
+
+ slot->len = offset;
+ cur = (cur + 1) % ring->num_slots;
+ n_free_slots--;
+ n_left--;
+ }
+ CLIB_MEMORY_BARRIER ();
+ ring->head = ring->cur = cur;
+ }
+
+ if (n_left < frame->n_vectors)
+ ioctl (nif->fd, NIOCTXSYNC, NULL);
+
+ clib_spinlock_unlock_if_init (&nif->lockp);
+
+ if (n_left)
+ vlib_error_count (vm, node->node_index,
+ (n_left ==
+ frame->n_vectors ? NETMAP_TX_ERROR_PENDING_MSGS :
+ NETMAP_TX_ERROR_NO_FREE_SLOTS), n_left);
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static void
+netmap_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ netmap_main_t *apm = &netmap_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ nif->per_interface_next_index = node_index;
+ return;
+ }
+
+ nif->per_interface_next_index =
+ vlib_node_add_next (vlib_get_main (), netmap_input_node.index,
+ node_index);
+}
+
+static void
+netmap_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+netmap_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ netmap_main_t *apm = &netmap_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ u32 hw_flags;
+
+ nif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if (nif->is_admin_up)
+ hw_flags = VNET_HW_INTERFACE_FLAG_LINK_UP;
+ else
+ hw_flags = 0;
+
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
+
+ return 0;
+}
+
+static clib_error_t *
+netmap_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (netmap_device_class) = {
+ .name = "netmap",
+ .tx_function = netmap_interface_tx,
+ .format_device_name = format_netmap_device_name,
+ .format_device = format_netmap_device,
+ .format_tx_trace = format_netmap_tx_trace,
+ .tx_function_n_errors = NETMAP_TX_N_ERROR,
+ .tx_function_error_strings = netmap_tx_func_error_strings,
+ .rx_redirect_to_node = netmap_set_interface_next_node,
+ .clear_counters = netmap_clear_hw_interface_counters,
+ .admin_up_down_function = netmap_interface_admin_up_down,
+ .subif_add_del_function = netmap_subif_add_del_function,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH(netmap_device_class,
+ netmap_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/dir.dox b/src/vnet/devices/netmap/dir.dox
new file mode 100644
index 00000000..7ddbf947
--- /dev/null
+++ b/src/vnet/devices/netmap/dir.dox
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2017 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief netmap Interface Implementation.
+
+This directory contains the source code for the netmap driver.
+
+*/
+/*? %%clicmd:group_label netmap %% ?*/
+/*? %%syscfg:group_label netmap %% ?*/
diff --git a/src/vnet/devices/netmap/net_netmap.h b/src/vnet/devices/netmap/net_netmap.h
new file mode 100644
index 00000000..fd4253b7
--- /dev/null
+++ b/src/vnet/devices/netmap/net_netmap.h
@@ -0,0 +1,650 @@
+/*
+ * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``S IS''AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * $FreeBSD: head/sys/net/netmap.h 251139 2013-05-30 14:07:14Z luigi $
+ *
+ * Definitions of constants and the structures used by the netmap
+ * framework, for the part visible to both kernel and userspace.
+ * Detailed info on netmap is available with "man netmap" or at
+ *
+ * http://info.iet.unipi.it/~luigi/netmap/
+ *
+ * This API is also used to communicate with the VALE software switch
+ */
+
+#ifndef _NET_NETMAP_H_
+#define _NET_NETMAP_H_
+
+#define NETMAP_API 11 /* current API version */
+
+#define NETMAP_MIN_API 11 /* min and max versions accepted */
+#define NETMAP_MAX_API 15
+/*
+ * Some fields should be cache-aligned to reduce contention.
+ * The alignment is architecture and OS dependent, but rather than
+ * digging into OS headers to find the exact value we use an estimate
+ * that should cover most architectures.
+ */
+#define NM_CACHE_ALIGN 128
+
+/*
+ * --- Netmap data structures ---
+ *
+ * The userspace data structures used by netmap are shown below.
+ * They are allocated by the kernel and mmap()ed by userspace threads.
+ * Pointers are implemented as memory offsets or indexes,
+ * so that they can be easily dereferenced in kernel and userspace.
+
+ KERNEL (opaque, obviously)
+
+ ====================================================================
+ |
+ USERSPACE | struct netmap_ring
+ +---->+---------------+
+ / | head,cur,tail |
+ struct netmap_if (nifp, 1 per fd) / | buf_ofs |
+ +---------------+ / | other fields |
+ | ni_tx_rings | / +===============+
+ | ni_rx_rings | / | buf_idx, len | slot[0]
+ | | / | flags, ptr |
+ | | / +---------------+
+ +===============+ / | buf_idx, len | slot[1]
+ | txring_ofs[0] | (rel.to nifp)--' | flags, ptr |
+ | txring_ofs[1] | +---------------+
+ (tx+1 entries) (num_slots entries)
+ | txring_ofs[t] | | buf_idx, len | slot[n-1]
+ +---------------+ | flags, ptr |
+ | rxring_ofs[0] | +---------------+
+ | rxring_ofs[1] |
+ (rx+1 entries)
+ | rxring_ofs[r] |
+ +---------------+
+
+ * For each "interface" (NIC, host stack, PIPE, VALE switch port) bound to
+ * a file descriptor, the mmap()ed region contains a (logically readonly)
+ * struct netmap_if pointing to struct netmap_ring's.
+ *
+ * There is one netmap_ring per physical NIC ring, plus one tx/rx ring
+ * pair attached to the host stack (this pair is unused for non-NIC ports).
+ *
+ * All physical/host stack ports share the same memory region,
+ * so that zero-copy can be implemented between them.
+ * VALE switch ports instead have separate memory regions.
+ *
+ * The netmap_ring is the userspace-visible replica of the NIC ring.
+ * Each slot has the index of a buffer (MTU-sized and residing in the
+ * mmapped region), its length and some flags. An extra 64-bit pointer
+ * is provided for user-supplied buffers in the tx path.
+ *
+ * In user space, the buffer address is computed as
+ * (char *)ring + buf_ofs + index * NETMAP_BUF_SIZE
+ *
+ * Added in NETMAP_API 11:
+ *
+ * + NIOCREGIF can request the allocation of extra spare buffers from
+ * the same memory pool. The desired number of buffers must be in
+ * nr_arg3. The ioctl may return fewer buffers, depending on memory
+ * availability. nr_arg3 will return the actual value, and, once
+ * mapped, nifp->ni_bufs_head will be the index of the first buffer.
+ *
+ * The buffers are linked to each other using the first uint32_t
+ * as the index. On close, ni_bufs_head must point to the list of
+ * buffers to be released.
+ *
+ * + NIOCREGIF can request space for extra rings (and buffers)
+ * allocated in the same memory space. The number of extra rings
+ * is in nr_arg1, and is advisory. This is a no-op on NICs where
+ * the size of the memory space is fixed.
+ *
+ * + NIOCREGIF can attach to PIPE rings sharing the same memory
+ * space with a parent device. The ifname indicates the parent device,
+ * which must already exist. Flags in nr_flags indicate if we want to
+ * bind the master or slave side, the index (from nr_ringid)
+ * is just a cookie and does not need to be sequential.
+ *
+ * + NIOCREGIF can also attach to 'monitor' rings that replicate
+ * the content of specific rings, also from the same memory space.
+ *
+ * Extra flags in nr_flags support the above functions.
+ * Application libraries may use the following naming scheme:
+ * netmap:foo all NIC ring pairs
+ * netmap:foo^ only host ring pair
+ * netmap:foo+ all NIC ring + host ring pairs
+ * netmap:foo-k the k-th NIC ring pair
+ * netmap:foo{k PIPE ring pair k, master side
+ * netmap:foo}k PIPE ring pair k, slave side
+ */
+
+/*
+ * struct netmap_slot is a buffer descriptor
+ */
+struct netmap_slot {
+ uint32_t buf_idx; /* buffer index */
+ uint16_t len; /* length for this slot */
+ uint16_t flags; /* buf changed, etc. */
+ uint64_t ptr; /* pointer for indirect buffers */
+};
+
+/*
+ * The following flags control how the slot is used
+ */
+
+#define NS_BUF_CHANGED 0x0001 /* buf_idx changed */
+ /*
+ * must be set whenever buf_idx is changed (as it might be
+ * necessary to recompute the physical address and mapping)
+ *
+ * It is also set by the kernel whenever the buf_idx is
+ * changed internally (e.g., by pipes). Applications may
+ * use this information to know when they can reuse the
+ * contents of previously prepared buffers.
+ */
+
+#define NS_REPORT 0x0002 /* ask the hardware to report results */
+ /*
+ * Request notification when slot is used by the hardware.
+ * Normally transmit completions are handled lazily and
+ * may be unreported. This flag lets us know when a slot
+ * has been sent (e.g. to terminate the sender).
+ */
+
+#define NS_FORWARD 0x0004 /* pass packet 'forward' */
+ /*
+ * (Only for physical ports, rx rings with NR_FORWARD set).
+ * Slot released to the kernel (i.e. before ring->head) with
+ * this flag set are passed to the peer ring (host/NIC),
+ * thus restoring the host-NIC connection for these slots.
+ * This supports efficient traffic monitoring or firewalling.
+ */
+
+#define NS_NO_LEARN 0x0008 /* disable bridge learning */
+ /*
+ * On a VALE switch, do not 'learn' the source port for
+ * this buffer.
+ */
+
+#define NS_INDIRECT 0x0010 /* userspace buffer */
+ /*
+ * (VALE tx rings only) data is in a userspace buffer,
+ * whose address is in the 'ptr' field in the slot.
+ */
+
+#define NS_MOREFRAG 0x0020 /* packet has more fragments */
+ /*
+ * (VALE ports only)
+ * Set on all but the last slot of a multi-segment packet.
+ * The 'len' field refers to the individual fragment.
+ */
+
+#define NS_PORT_SHIFT 8
+#define NS_PORT_MASK (0xff << NS_PORT_SHIFT)
+ /*
+ * The high 8 bits of the flag, if not zero, indicate the
+ * destination port for the VALE switch, overriding
+ * the lookup table.
+ */
+
+#define NS_RFRAGS(_slot) ( ((_slot)->flags >> 8) & 0xff)
+ /*
+ * (VALE rx rings only) the high 8 bits
+ * are the number of fragments.
+ */
+
+
+/*
+ * struct netmap_ring
+ *
+ * Netmap representation of a TX or RX ring (also known as "queue").
+ * This is a queue implemented as a fixed-size circular array.
+ * At the software level the important fields are: head, cur, tail.
+ *
+ * In TX rings:
+ *
+ * head first slot available for transmission.
+ * cur wakeup point. select() and poll() will unblock
+ * when 'tail' moves past 'cur'
+ * tail (readonly) first slot reserved to the kernel
+ *
+ * [head .. tail-1] can be used for new packets to send;
+ * 'head' and 'cur' must be incremented as slots are filled
+ * with new packets to be sent;
+ * 'cur' can be moved further ahead if we need more space
+ * for new transmissions. XXX todo (2014-03-12)
+ *
+ * In RX rings:
+ *
+ * head first valid received packet
+ * cur wakeup point. select() and poll() will unblock
+ * when 'tail' moves past 'cur'
+ * tail (readonly) first slot reserved to the kernel
+ *
+ * [head .. tail-1] contain received packets;
+ * 'head' and 'cur' must be incremented as slots are consumed
+ * and can be returned to the kernel;
+ * 'cur' can be moved further ahead if we want to wait for
+ * new packets without returning the previous ones.
+ *
+ * DATA OWNERSHIP/LOCKING:
+ * The netmap_ring, and all slots and buffers in the range
+ * [head .. tail-1] are owned by the user program;
+ * the kernel only accesses them during a netmap system call
+ * and in the user thread context.
+ *
+ * Other slots and buffers are reserved for use by the kernel
+ */
+struct netmap_ring {
+ /*
+ * buf_ofs is meant to be used through macros.
+ * It contains the offset of the buffer region from this
+ * descriptor.
+ */
+ const int64_t buf_ofs;
+ const uint32_t num_slots; /* number of slots in the ring. */
+ const uint32_t nr_buf_size;
+ const uint16_t ringid;
+ const uint16_t dir; /* 0: tx, 1: rx */
+
+ uint32_t head; /* (u) first user slot */
+ uint32_t cur; /* (u) wakeup point */
+ uint32_t tail; /* (k) first kernel slot */
+
+ uint32_t flags;
+
+ struct timeval ts; /* (k) time of last *sync() */
+
+ /* opaque room for a mutex or similar object */
+#if !defined(_WIN32) || defined(__CYGWIN__)
+ uint8_t __attribute__((__aligned__(NM_CACHE_ALIGN))) sem[128];
+#else
+ uint8_t __declspec(align(NM_CACHE_ALIGN)) sem[128];
+#endif
+
+ /* the slots follow. This struct has variable size */
+ struct netmap_slot slot[0]; /* array of slots. */
+};
+
+
+/*
+ * RING FLAGS
+ */
+#define NR_TIMESTAMP 0x0002 /* set timestamp on *sync() */
+ /*
+ * updates the 'ts' field on each netmap syscall. This saves
+ * saves a separate gettimeofday(), and is not much worse than
+ * software timestamps generated in the interrupt handler.
+ */
+
+#define NR_FORWARD 0x0004 /* enable NS_FORWARD for ring */
+ /*
+ * Enables the NS_FORWARD slot flag for the ring.
+ */
+
+
+/*
+ * Netmap representation of an interface and its queue(s).
+ * This is initialized by the kernel when binding a file
+ * descriptor to a port, and should be considered as readonly
+ * by user programs. The kernel never uses it.
+ *
+ * There is one netmap_if for each file descriptor on which we want
+ * to select/poll.
+ * select/poll operates on one or all pairs depending on the value of
+ * nmr_queueid passed on the ioctl.
+ */
+struct netmap_if {
+ char ni_name[IFNAMSIZ]; /* name of the interface. */
+ const uint32_t ni_version; /* API version, currently unused */
+ const uint32_t ni_flags; /* properties */
+#define NI_PRIV_MEM 0x1 /* private memory region */
+
+ /*
+ * The number of packet rings available in netmap mode.
+ * Physical NICs can have different numbers of tx and rx rings.
+ * Physical NICs also have a 'host' ring pair.
+ * Additionally, clients can request additional ring pairs to
+ * be used for internal communication.
+ */
+ const uint32_t ni_tx_rings; /* number of HW tx rings */
+ const uint32_t ni_rx_rings; /* number of HW rx rings */
+
+ uint32_t ni_bufs_head; /* head index for extra bufs */
+ uint32_t ni_spare1[5];
+ /*
+ * The following array contains the offset of each netmap ring
+ * from this structure, in the following order:
+ * NIC tx rings (ni_tx_rings); host tx ring (1); extra tx rings;
+ * NIC rx rings (ni_rx_rings); host tx ring (1); extra rx rings.
+ *
+ * The area is filled up by the kernel on NIOCREGIF,
+ * and then only read by userspace code.
+ */
+ const ssize_t ring_ofs[0];
+};
+
+
+#ifndef NIOCREGIF
+/*
+ * ioctl names and related fields
+ *
+ * NIOCTXSYNC, NIOCRXSYNC synchronize tx or rx queues,
+ * whose identity is set in NIOCREGIF through nr_ringid.
+ * These are non blocking and take no argument.
+ *
+ * NIOCGINFO takes a struct ifreq, the interface name is the input,
+ * the outputs are number of queues and number of descriptor
+ * for each queue (useful to set number of threads etc.).
+ * The info returned is only advisory and may change before
+ * the interface is bound to a file descriptor.
+ *
+ * NIOCREGIF takes an interface name within a struct nmre,
+ * and activates netmap mode on the interface (if possible).
+ *
+ * The argument to NIOCGINFO/NIOCREGIF overlays struct ifreq so we
+ * can pass it down to other NIC-related ioctls.
+ *
+ * The actual argument (struct nmreq) has a number of options to request
+ * different functions.
+ * The following are used in NIOCREGIF when nr_cmd == 0:
+ *
+ * nr_name (in)
+ * The name of the port (em0, valeXXX:YYY, etc.)
+ * limited to IFNAMSIZ for backward compatibility.
+ *
+ * nr_version (in/out)
+ * Must match NETMAP_API as used in the kernel, error otherwise.
+ * Always returns the desired value on output.
+ *
+ * nr_tx_slots, nr_tx_slots, nr_tx_rings, nr_rx_rings (in/out)
+ * On input, non-zero values may be used to reconfigure the port
+ * according to the requested values, but this is not guaranteed.
+ * On output the actual values in use are reported.
+ *
+ * nr_ringid (in)
+ * Indicates how rings should be bound to the file descriptors.
+ * If nr_flags != 0, then the low bits (in NETMAP_RING_MASK)
+ * are used to indicate the ring number, and nr_flags specifies
+ * the actual rings to bind. NETMAP_NO_TX_POLL is unaffected.
+ *
+ * NOTE: THE FOLLOWING (nr_flags == 0) IS DEPRECATED:
+ * If nr_flags == 0, NETMAP_HW_RING and NETMAP_SW_RING control
+ * the binding as follows:
+ * 0 (default) binds all physical rings
+ * NETMAP_HW_RING | ring number binds a single ring pair
+ * NETMAP_SW_RING binds only the host tx/rx rings
+ *
+ * NETMAP_NO_TX_POLL can be OR-ed to make select()/poll() push
+ * packets on tx rings only if POLLOUT is set.
+ * The default is to push any pending packet.
+ *
+ * NETMAP_DO_RX_POLL can be OR-ed to make select()/poll() release
+ * packets on rx rings also when POLLIN is NOT set.
+ * The default is to touch the rx ring only with POLLIN.
+ * Note that this is the opposite of TX because it
+ * reflects the common usage.
+ *
+ * NOTE: NETMAP_PRIV_MEM IS DEPRECATED, use nr_arg2 instead.
+ * NETMAP_PRIV_MEM is set on return for ports that do not use
+ * the global memory allocator.
+ * This information is not significant and applications
+ * should look at the region id in nr_arg2
+ *
+ * nr_flags is the recommended mode to indicate which rings should
+ * be bound to a file descriptor. Values are NR_REG_*
+ *
+ * nr_arg1 (in) The number of extra rings to be reserved.
+ * Especially when allocating a VALE port the system only
+ * allocates the amount of memory needed for the port.
+ * If more shared memory rings are desired (e.g. for pipes),
+ * the first invocation for the same basename/allocator
+ * should specify a suitable number. Memory cannot be
+ * extended after the first allocation without closing
+ * all ports on the same region.
+ *
+ * nr_arg2 (in/out) The identity of the memory region used.
+ * On input, 0 means the system decides autonomously,
+ * other values may try to select a specific region.
+ * On return the actual value is reported.
+ * Region '1' is the global allocator, normally shared
+ * by all interfaces. Other values are private regions.
+ * If two ports the same region zero-copy is possible.
+ *
+ * nr_arg3 (in/out) number of extra buffers to be allocated.
+ *
+ *
+ *
+ * nr_cmd (in) if non-zero indicates a special command:
+ * NETMAP_BDG_ATTACH and nr_name = vale*:ifname
+ * attaches the NIC to the switch; nr_ringid specifies
+ * which rings to use. Used by vale-ctl -a ...
+ * nr_arg1 = NETMAP_BDG_HOST also attaches the host port
+ * as in vale-ctl -h ...
+ *
+ * NETMAP_BDG_DETACH and nr_name = vale*:ifname
+ * disconnects a previously attached NIC.
+ * Used by vale-ctl -d ...
+ *
+ * NETMAP_BDG_LIST
+ * list the configuration of VALE switches.
+ *
+ * NETMAP_BDG_VNET_HDR
+ * Set the virtio-net header length used by the client
+ * of a VALE switch port.
+ *
+ * NETMAP_BDG_NEWIF
+ * create a persistent VALE port with name nr_name.
+ * Used by vale-ctl -n ...
+ *
+ * NETMAP_BDG_DELIF
+ * delete a persistent VALE port. Used by vale-ctl -d ...
+ *
+ * nr_arg1, nr_arg2, nr_arg3 (in/out) command specific
+ *
+ *
+ *
+ */
+
+
+/*
+ * struct nmreq overlays a struct ifreq (just the name)
+ */
+struct nmreq {
+ char nr_name[IFNAMSIZ];
+ uint32_t nr_version; /* API version */
+ uint32_t nr_offset; /* nifp offset in the shared region */
+ uint32_t nr_memsize; /* size of the shared region */
+ uint32_t nr_tx_slots; /* slots in tx rings */
+ uint32_t nr_rx_slots; /* slots in rx rings */
+ uint16_t nr_tx_rings; /* number of tx rings */
+ uint16_t nr_rx_rings; /* number of rx rings */
+
+ uint16_t nr_ringid; /* ring(s) we care about */
+#define NETMAP_HW_RING 0x4000 /* single NIC ring pair */
+#define NETMAP_SW_RING 0x2000 /* only host ring pair */
+
+#define NETMAP_RING_MASK 0x0fff /* the ring number */
+
+#define NETMAP_NO_TX_POLL 0x1000 /* no automatic txsync on poll */
+
+#define NETMAP_DO_RX_POLL 0x8000 /* DO automatic rxsync on poll */
+
+ uint16_t nr_cmd;
+#define NETMAP_BDG_ATTACH 1 /* attach the NIC */
+#define NETMAP_BDG_DETACH 2 /* detach the NIC */
+#define NETMAP_BDG_REGOPS 3 /* register bridge callbacks */
+#define NETMAP_BDG_LIST 4 /* get bridge's info */
+#define NETMAP_BDG_VNET_HDR 5 /* set the port virtio-net-hdr length */
+#define NETMAP_BDG_OFFSET NETMAP_BDG_VNET_HDR /* deprecated alias */
+#define NETMAP_BDG_NEWIF 6 /* create a virtual port */
+#define NETMAP_BDG_DELIF 7 /* destroy a virtual port */
+#define NETMAP_PT_HOST_CREATE 8 /* create ptnetmap kthreads */
+#define NETMAP_PT_HOST_DELETE 9 /* delete ptnetmap kthreads */
+#define NETMAP_BDG_POLLING_ON 10 /* delete polling kthread */
+#define NETMAP_BDG_POLLING_OFF 11 /* delete polling kthread */
+#define NETMAP_VNET_HDR_GET 12 /* get the port virtio-net-hdr length */
+ uint16_t nr_arg1; /* reserve extra rings in NIOCREGIF */
+#define NETMAP_BDG_HOST 1 /* attach the host stack on ATTACH */
+
+ uint16_t nr_arg2;
+ uint32_t nr_arg3; /* req. extra buffers in NIOCREGIF */
+ uint32_t nr_flags;
+ /* various modes, extends nr_ringid */
+ uint32_t spare2[1];
+};
+
+#define NR_REG_MASK 0xf /* values for nr_flags */
+enum { NR_REG_DEFAULT = 0, /* backward compat, should not be used. */
+ NR_REG_ALL_NIC = 1,
+ NR_REG_SW = 2,
+ NR_REG_NIC_SW = 3,
+ NR_REG_ONE_NIC = 4,
+ NR_REG_PIPE_MASTER = 5,
+ NR_REG_PIPE_SLAVE = 6,
+};
+/* monitor uses the NR_REG to select the rings to monitor */
+#define NR_MONITOR_TX 0x100
+#define NR_MONITOR_RX 0x200
+#define NR_ZCOPY_MON 0x400
+/* request exclusive access to the selected rings */
+#define NR_EXCLUSIVE 0x800
+/* request ptnetmap host support */
+#define NR_PASSTHROUGH_HOST NR_PTNETMAP_HOST /* deprecated */
+#define NR_PTNETMAP_HOST 0x1000
+#define NR_RX_RINGS_ONLY 0x2000
+#define NR_TX_RINGS_ONLY 0x4000
+/* Applications set this flag if they are able to deal with virtio-net headers,
+ * that is send/receive frames that start with a virtio-net header.
+ * If not set, NIOCREGIF will fail with netmap ports that require applications
+ * to use those headers. If the flag is set, the application can use the
+ * NETMAP_VNET_HDR_GET command to figure out the header length. */
+#define NR_ACCEPT_VNET_HDR 0x8000
+
+
+/*
+ * Windows does not have _IOWR(). _IO(), _IOW() and _IOR() are defined
+ * in ws2def.h but not sure if they are in the form we need.
+ * XXX so we redefine them
+ * in a convenient way to use for DeviceIoControl signatures
+ */
+#ifdef _WIN32
+#undef _IO // ws2def.h
+#define _WIN_NM_IOCTL_TYPE 40000
+#define _IO(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \
+ METHOD_BUFFERED, FILE_ANY_ACCESS )
+#define _IO_direct(_c, _n) CTL_CODE(_WIN_NM_IOCTL_TYPE, ((_n) + 0x800) , \
+ METHOD_OUT_DIRECT, FILE_ANY_ACCESS )
+
+#define _IOWR(_c, _n, _s) _IO(_c, _n)
+
+/* We havesome internal sysctl in addition to the externally visible ones */
+#define NETMAP_MMAP _IO_direct('i', 160) // note METHOD_OUT_DIRECT
+#define NETMAP_POLL _IO('i', 162)
+
+/* and also two setsockopt for sysctl emulation */
+#define NETMAP_SETSOCKOPT _IO('i', 140)
+#define NETMAP_GETSOCKOPT _IO('i', 141)
+
+
+//These linknames are for the Netmap Core Driver
+#define NETMAP_NT_DEVICE_NAME L"\\Device\\NETMAP"
+#define NETMAP_DOS_DEVICE_NAME L"\\DosDevices\\netmap"
+
+//Definition of a structure used to pass a virtual address within an IOCTL
+typedef struct _MEMORY_ENTRY {
+ PVOID pUsermodeVirtualAddress;
+} MEMORY_ENTRY, *PMEMORY_ENTRY;
+
+typedef struct _POLL_REQUEST_DATA {
+ int events;
+ int timeout;
+ int revents;
+} POLL_REQUEST_DATA;
+
+#endif /* _WIN32 */
+
+/*
+ * FreeBSD uses the size value embedded in the _IOWR to determine
+ * how much to copy in/out. So we need it to match the actual
+ * data structure we pass. We put some spares in the structure
+ * to ease compatibility with other versions
+ */
+#define NIOCGINFO _IOWR('i', 145, struct nmreq) /* return IF info */
+#define NIOCREGIF _IOWR('i', 146, struct nmreq) /* interface register */
+#define NIOCTXSYNC _IO('i', 148) /* sync tx queues */
+#define NIOCRXSYNC _IO('i', 149) /* sync rx queues */
+#define NIOCCONFIG _IOWR('i',150, struct nm_ifreq) /* for ext. modules */
+#endif /* !NIOCREGIF */
+
+
+/*
+ * Helper functions for kernel and userspace
+ */
+
+/*
+ * check if space is available in the ring.
+ */
+static inline int
+nm_ring_empty(struct netmap_ring *ring)
+{
+ return (ring->cur == ring->tail);
+}
+
+/*
+ * Opaque structure that is passed to an external kernel
+ * module via ioctl(fd, NIOCCONFIG, req) for a user-owned
+ * bridge port (at this point ephemeral VALE interface).
+ */
+#define NM_IFRDATA_LEN 256
+struct nm_ifreq {
+ char nifr_name[IFNAMSIZ];
+ char data[NM_IFRDATA_LEN];
+};
+
+/*
+ * netmap kernel thread configuration
+ */
+/* bhyve/vmm.ko MSIX parameters for IOCTL */
+struct ptn_vmm_ioctl_msix {
+ uint64_t msg;
+ uint64_t addr;
+};
+
+/* IOCTL parameters */
+struct nm_kth_ioctl {
+ u_long com;
+ /* TODO: use union */
+ union {
+ struct ptn_vmm_ioctl_msix msix;
+ } data;
+};
+
+/* Configuration of a ptnetmap ring */
+struct ptnet_ring_cfg {
+ uint64_t ioeventfd; /* eventfd in linux, tsleep() parameter in FreeBSD */
+ uint64_t irqfd; /* eventfd in linux, ioctl fd in FreeBSD */
+ struct nm_kth_ioctl ioctl; /* ioctl parameter to send irq (only used in bhyve/FreeBSD) */
+};
+#endif /* _NET_NETMAP_H_ */
diff --git a/src/vnet/devices/netmap/netmap.api b/src/vnet/devices/netmap/netmap.api
new file mode 100644
index 00000000..8dc698b9
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.api
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief Create netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param is_pipe - is pipe
+ @param is_master - 0=slave, 1=master
+*/
+autoreply define netmap_create
+{
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+ u8 hw_addr[6];
+ u8 use_random_hw_addr;
+ u8 is_pipe;
+ u8 is_master;
+};
+
+/** \brief Delete netmap
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param netmap_if_name - interface name
+*/
+autoreply define netmap_delete
+{
+ u32 client_index;
+ u32 context;
+
+ u8 netmap_if_name[64];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap.c b/src/vnet/devices/netmap/netmap.c
new file mode 100644
index 00000000..fc49ed62
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.c
@@ -0,0 +1,312 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <fcntl.h>
+#include <vnet/devices/netmap/net_netmap.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/netmap/netmap.h>
+
+static u32
+netmap_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+static clib_error_t *
+netmap_fd_read_ready (clib_file_t * uf)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ netmap_main_t *nm = &netmap_main;
+ u32 idx = uf->private_data;
+
+ nm->pending_input_bitmap =
+ clib_bitmap_set (nm->pending_input_bitmap, idx, 1);
+
+ /* Schedule the rx node */
+ vlib_node_set_interrupt_pending (vm, netmap_input_node.index);
+
+ return 0;
+}
+
+static void
+close_netmap_if (netmap_main_t * nm, netmap_if_t * nif)
+{
+ if (nif->clib_file_index != ~0)
+ {
+ clib_file_del (&file_main, file_main.file_pool + nif->clib_file_index);
+ nif->clib_file_index = ~0;
+ }
+ else if (nif->fd > -1)
+ close (nif->fd);
+
+ if (nif->mem_region)
+ {
+ netmap_mem_region_t *reg = &nm->mem_regions[nif->mem_region];
+ if (--reg->refcnt == 0)
+ {
+ munmap (reg->mem, reg->region_size);
+ reg->region_size = 0;
+ }
+ }
+
+
+ mhash_unset (&nm->if_index_by_host_if_name, nif->host_if_name,
+ &nif->if_index);
+ vec_free (nif->host_if_name);
+ vec_free (nif->req);
+
+ memset (nif, 0, sizeof (*nif));
+ pool_put (nm->interfaces, nif);
+}
+
+int
+netmap_worker_thread_enable ()
+{
+ /* if worker threads are enabled, switch to polling mode */
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }));
+
+ return 0;
+}
+
+int
+netmap_worker_thread_disable ()
+{
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_INTERRUPT);
+ }));
+
+ return 0;
+}
+
+int
+netmap_create_if (vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index)
+{
+ netmap_main_t *nm = &netmap_main;
+ int ret = 0;
+ netmap_if_t *nif = 0;
+ u8 hw_addr[6];
+ clib_error_t *error = 0;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
+ struct nmreq *req = 0;
+ netmap_mem_region_t *reg;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int fd;
+
+ p = mhash_get (&nm->if_index_by_host_if_name, if_name);
+ if (p)
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+
+ fd = open ("/dev/netmap", O_RDWR);
+ if (fd < 0)
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+
+ pool_get (nm->interfaces, nif);
+ nif->if_index = nif - nm->interfaces;
+ nif->fd = fd;
+ nif->clib_file_index = ~0;
+
+ vec_validate (req, 0);
+ nif->req = req;
+ req->nr_version = NETMAP_API;
+ req->nr_flags = NR_REG_ALL_NIC;
+
+ if (is_pipe)
+ req->nr_flags = is_master ? NR_REG_PIPE_MASTER : NR_REG_PIPE_SLAVE;
+ else
+ req->nr_flags = NR_REG_ALL_NIC;
+
+ req->nr_flags |= NR_ACCEPT_VNET_HDR;
+ snprintf (req->nr_name, IFNAMSIZ, "%s", if_name);
+ req->nr_name[IFNAMSIZ - 1] = 0;
+
+ if (ioctl (nif->fd, NIOCREGIF, req))
+ {
+ ret = VNET_API_ERROR_NOT_CONNECTED;
+ goto error;
+ }
+
+ nif->mem_region = req->nr_arg2;
+ vec_validate (nm->mem_regions, nif->mem_region);
+ reg = &nm->mem_regions[nif->mem_region];
+ if (reg->region_size == 0)
+ {
+ reg->mem = mmap (NULL, req->nr_memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ clib_warning ("mem %p", reg->mem);
+ if (reg->mem == MAP_FAILED)
+ {
+ ret = VNET_API_ERROR_NOT_CONNECTED;
+ goto error;
+ }
+ reg->region_size = req->nr_memsize;
+ }
+ reg->refcnt++;
+
+ nif->nifp = NETMAP_IF (reg->mem, req->nr_offset);
+ nif->first_rx_ring = 0;
+ nif->last_rx_ring = 0;
+ nif->first_tx_ring = 0;
+ nif->last_tx_ring = 0;
+ nif->host_if_name = if_name;
+ nif->per_interface_next_index = ~0;
+
+ if (tm->n_vlib_mains > 1)
+ clib_spinlock_init (&nif->lockp);
+
+ {
+ clib_file_t template = { 0 };
+ template.read_function = netmap_fd_read_ready;
+ template.file_descriptor = nif->fd;
+ template.private_data = nif->if_index;
+ nif->clib_file_index = clib_file_add (&file_main, &template);
+ }
+
+ /*use configured or generate random MAC address */
+ if (hw_addr_set)
+ memcpy (hw_addr, hw_addr_set, 6);
+ else
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ memcpy (hw_addr + 2, &rnd, sizeof (rnd));
+ hw_addr[0] = 2;
+ hw_addr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface (vnm, netmap_device_class.index,
+ nif->if_index, hw_addr,
+ &nif->hw_if_index,
+ netmap_eth_flag_change);
+
+ if (error)
+ {
+ clib_error_report (error);
+ ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+ goto error;
+ }
+
+ sw = vnet_get_hw_sw_interface (vnm, nif->hw_if_index);
+ nif->sw_if_index = sw->sw_if_index;
+
+ mhash_set_mem (&nm->if_index_by_host_if_name, if_name, &nif->if_index, 0);
+
+ if (sw_if_index)
+ *sw_if_index = nif->sw_if_index;
+
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 1)
+ netmap_worker_thread_enable ();
+
+ return 0;
+
+error:
+ close_netmap_if (nm, nif);
+ return ret;
+}
+
+int
+netmap_delete_if (vlib_main_t * vm, u8 * host_if_name)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nif;
+ uword *p;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ p = mhash_get (&nm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ nif = pool_elt_at_index (nm->interfaces, p[0]);
+
+ /* bring down the interface */
+ vnet_hw_interface_set_flags (vnm, nif->hw_if_index, 0);
+
+ ethernet_delete_interface (vnm, nif->hw_if_index);
+
+ close_netmap_if (nm, nif);
+
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 0)
+ netmap_worker_thread_disable ();
+
+ return 0;
+}
+
+static clib_error_t *
+netmap_init (vlib_main_t * vm)
+{
+ netmap_main_t *nm = &netmap_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
+
+ memset (nm, 0, sizeof (netmap_main_t));
+
+ nm->input_cpu_first_index = 0;
+ nm->input_cpu_count = 1;
+
+ /* find out which cpus will be used for input */
+ p = hash_get_mem (tm->thread_registrations_by_name, "workers");
+ tr = p ? (vlib_thread_registration_t *) p[0] : 0;
+
+ if (tr && tr->count > 0)
+ {
+ nm->input_cpu_first_index = tr->first_index;
+ nm->input_cpu_count = tr->count;
+ }
+
+ mhash_init_vec_string (&nm->if_index_by_host_if_name, sizeof (uword));
+
+ vec_validate_aligned (nm->rx_buffers, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (netmap_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap.h b/src/vnet/devices/netmap/netmap.h
new file mode 100644
index 00000000..04731890
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap.h
@@ -0,0 +1,166 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+/*
+ * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <vppinfra/lock.h>
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ clib_spinlock_t lockp;
+ u8 *host_if_name;
+ uword if_index;
+ u32 hw_if_index;
+ u32 sw_if_index;
+ u32 clib_file_index;
+
+ u32 per_interface_next_index;
+ u8 is_admin_up;
+
+ /* netmap */
+ struct nmreq *req;
+ u16 mem_region;
+ int fd;
+ struct netmap_if *nifp;
+ u16 first_tx_ring;
+ u16 last_tx_ring;
+ u16 first_rx_ring;
+ u16 last_rx_ring;
+
+} netmap_if_t;
+
+typedef struct
+{
+ char *mem;
+ u32 region_size;
+ int refcnt;
+} netmap_mem_region_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ netmap_if_t *interfaces;
+
+ /* bitmap of pending rx interfaces */
+ uword *pending_input_bitmap;
+
+ /* rx buffer cache */
+ u32 **rx_buffers;
+
+ /* hash of host interface names */
+ mhash_t if_index_by_host_if_name;
+
+ /* vector of memory regions */
+ netmap_mem_region_t *mem_regions;
+
+ /* first cpu index */
+ u32 input_cpu_first_index;
+
+ /* total cpu count */
+ u32 input_cpu_count;
+} netmap_main_t;
+
+netmap_main_t netmap_main;
+extern vnet_device_class_t netmap_device_class;
+extern vlib_node_registration_t netmap_input_node;
+
+int netmap_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index);
+int netmap_delete_if (vlib_main_t * vm, u8 * host_if_name);
+
+
+/* Macros and helper functions from sys/net/netmap_user.h */
+
+#ifdef _NET_NETMAP_H_
+
+#define _NETMAP_OFFSET(type, ptr, offset) \
+ ((type)(void *)((char *)(ptr) + (offset)))
+
+#define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs)
+
+#define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
+ nifp, (nifp)->ring_ofs[index] )
+
+#define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
+ nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] )
+
+#define NETMAP_BUF(ring, index) \
+ ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))
+
+#define NETMAP_BUF_IDX(ring, buf) \
+ ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \
+ (ring)->nr_buf_size )
+
+static inline uint32_t
+nm_ring_next (struct netmap_ring *ring, uint32_t i)
+{
+ return (PREDICT_FALSE (i + 1 == ring->num_slots) ? 0 : i + 1);
+}
+
+
+/*
+ * Return 1 if we have pending transmissions in the tx ring.
+ * When everything is complete ring->head = ring->tail + 1 (modulo ring size)
+ */
+static inline int
+nm_tx_pending (struct netmap_ring *ring)
+{
+ return nm_ring_next (ring, ring->tail) != ring->head;
+}
+
+static inline uint32_t
+nm_ring_space (struct netmap_ring *ring)
+{
+ int ret = ring->tail - ring->cur;
+ if (ret < 0)
+ ret += ring->num_slots;
+ return ret;
+}
+#endif
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/netmap_api.c b/src/vnet/devices/netmap/netmap_api.c
new file mode 100644
index 00000000..9a393b1f
--- /dev/null
+++ b/src/vnet/devices/netmap/netmap_api.c
@@ -0,0 +1,137 @@
+/*
+ *------------------------------------------------------------------
+ * netmap_api.c - netmap api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(NETMAP_CREATE, netmap_create) \
+_(NETMAP_DELETE, netmap_delete) \
+
+static void
+vl_api_netmap_create_t_handler (vl_api_netmap_create_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_netmap_create_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format (0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv =
+ netmap_create_if (vm, if_name, mp->use_random_hw_addr ? 0 : mp->hw_addr,
+ mp->is_pipe, mp->is_master, 0);
+
+ vec_free (if_name);
+
+ REPLY_MACRO (VL_API_NETMAP_CREATE_REPLY);
+}
+
+static void
+vl_api_netmap_delete_t_handler (vl_api_netmap_delete_t * mp)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vl_api_netmap_delete_reply_t *rmp;
+ int rv = 0;
+ u8 *if_name = NULL;
+
+ if_name = format (0, "%s", mp->netmap_if_name);
+ vec_add1 (if_name, 0);
+
+ rv = netmap_delete_if (vm, if_name);
+
+ vec_free (if_name);
+
+ REPLY_MACRO (VL_API_NETMAP_DELETE_REPLY);
+}
+
+/*
+ * netmap_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_netmap;
+#undef _
+}
+
+static clib_error_t *
+netmap_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (netmap_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/netmap/node.c b/src/vnet/devices/netmap/node.c
new file mode 100644
index 00000000..e120eeae
--- /dev/null
+++ b/src/vnet/devices/netmap/node.c
@@ -0,0 +1,302 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/netmap/net_netmap.h>
+#include <vnet/devices/netmap/netmap.h>
+
+#define foreach_netmap_input_error
+
+typedef enum
+{
+#define _(f,s) NETMAP_INPUT_ERROR_##f,
+ foreach_netmap_input_error
+#undef _
+ NETMAP_INPUT_N_ERROR,
+} netmap_input_error_t;
+
+static char *netmap_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_netmap_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ u32 hw_if_index;
+ struct netmap_slot slot;
+} netmap_input_trace_t;
+
+static u8 *
+format_netmap_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ netmap_input_trace_t *t = va_arg (*args, netmap_input_trace_t *);
+ uword indent = format_get_indent (s);
+
+ s = format (s, "netmap: hw_if_index %d next-index %d",
+ t->hw_if_index, t->next_index);
+ s = format (s, "\n%Uslot: flags 0x%x len %u buf_idx %u",
+ format_white_space, indent + 2,
+ t->slot.flags, t->slot.len, t->slot.buf_idx);
+ return s;
+}
+
+always_inline void
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
+
+ /* update first buffer */
+ first_b->total_length_not_including_first_buffer += b->current_length;
+
+ /* update previous buffer */
+ prev_b->next_buffer = bi;
+ prev_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
+
+ /* update current buffer */
+ b->next_buffer = 0;
+}
+
+always_inline uword
+netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, netmap_if_t * nif)
+{
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ netmap_main_t *nm = &netmap_main;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 *to_next = 0;
+ u32 n_free_bufs;
+ struct netmap_ring *ring;
+ int cur_ring;
+ u32 thread_index = vlib_get_thread_index ();
+ u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (nif->per_interface_next_index != ~0)
+ next_index = nif->per_interface_next_index;
+
+ n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+ {
+ vec_validate (nm->rx_buffers[thread_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
+ VLIB_FRAME_SIZE);
+ _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
+ }
+
+ cur_ring = nif->first_rx_ring;
+ while (cur_ring <= nif->last_rx_ring && n_free_bufs)
+ {
+ int r = 0;
+ u32 cur_slot_index;
+ ring = NETMAP_RXRING (nif->nifp, cur_ring);
+ r = nm_ring_space (ring);
+
+ if (!r)
+ {
+ cur_ring++;
+ continue;
+ }
+
+ if (r > n_free_bufs)
+ r = n_free_bufs;
+
+ cur_slot_index = ring->cur;
+ while (r)
+ {
+ u32 n_left_to_next;
+ u32 next0 = next_index;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (r && n_left_to_next)
+ {
+ vlib_buffer_t *first_b0 = 0;
+ u32 offset = 0;
+ u32 bi0 = 0, first_bi0 = 0, prev_bi0;
+ u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
+ u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
+ struct netmap_slot *slot = &ring->slot[cur_slot_index];
+ u32 data_len = slot->len;
+
+ /* prefetch 2 slots in advance */
+ CLIB_PREFETCH (&ring->slot[next2_slot_index],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ /* prefetch start of next packet */
+ CLIB_PREFETCH (NETMAP_BUF
+ (ring, ring->slot[next_slot_index].buf_idx),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+
+ while (data_len && n_free_bufs)
+ {
+ vlib_buffer_t *b0;
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (nm->rx_buffers[thread_index]) - 1;
+ prev_bi0 = bi0;
+ bi0 = nm->rx_buffers[thread_index][last_empty_buffer];
+ b0 = vlib_get_buffer (vm, bi0);
+ _vec_len (nm->rx_buffers[thread_index]) = last_empty_buffer;
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ b0->current_data = 0;
+ clib_memcpy (vlib_buffer_get_current (b0),
+ (u8 *) NETMAP_BUF (ring,
+ slot->buf_idx) + offset,
+ bytes_to_copy);
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ nif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ first_bi0 = bi0;
+ first_b0 = vlib_get_buffer (vm, first_bi0);
+ }
+ else
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
+
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+
+ /* trace */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ if (PREDICT_TRUE (first_b0 != 0))
+ {
+ netmap_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, first_b0,
+ /* follow_chain */ 0);
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = nif->hw_if_index;
+ memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
+ }
+ }
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (nif->sw_if_index, &next0,
+ first_b0);
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0,
+ next0);
+
+ /* next packet */
+ n_rx_packets++;
+ n_rx_bytes += slot->len;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+ cur_slot_index = next_slot_index;
+
+ r--;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ ring->head = ring->cur = cur_slot_index;
+ cur_ring++;
+ }
+
+ if (n_rx_packets)
+ ioctl (nif->fd, NIOCRXSYNC, NULL);
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ vlib_get_thread_index (), nif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, n_rx_packets);
+
+ return n_rx_packets;
+}
+
+static uword
+netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ int i;
+ u32 n_rx_packets = 0;
+ u32 thread_index = vlib_get_thread_index ();
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nmi;
+
+ for (i = 0; i < vec_len (nm->interfaces); i++)
+ {
+ nmi = vec_elt_at_index (nm->interfaces, i);
+ if (nmi->is_admin_up &&
+ (i % nm->input_cpu_count) ==
+ (thread_index - nm->input_cpu_first_index))
+ n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (netmap_input_node) = {
+ .function = netmap_input_fn,
+ .name = "netmap-input",
+ .sibling_of = "device-input",
+ .format_trace = format_netmap_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ /* default state is INTERRUPT mode, switch to POLLING if worker threads are enabled */
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .n_errors = NETMAP_INPUT_N_ERROR,
+ .error_strings = netmap_input_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (netmap_input_node, netmap_input_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/node.c b/src/vnet/devices/ssvm/node.c
new file mode 100644
index 00000000..b7a8db05
--- /dev/null
+++ b/src/vnet/devices/ssvm/node.c
@@ -0,0 +1,345 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ssvm_eth.h"
+
+vlib_node_registration_t ssvm_eth_input_node;
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+} ssvm_eth_input_trace_t;
+
+/* packet trace format function */
+static u8 *
+format_ssvm_eth_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ssvm_eth_input_trace_t *t = va_arg (*args, ssvm_eth_input_trace_t *);
+
+ s = format (s, "SSVM_ETH_INPUT: sw_if_index %d, next index %d",
+ t->sw_if_index, t->next_index);
+ return s;
+}
+
+vlib_node_registration_t ssvm_eth_input_node;
+
+#define foreach_ssvm_eth_input_error \
+_(NO_BUFFERS, "Rx packet drops (no buffers)")
+
+typedef enum
+{
+#define _(sym,str) SSVM_ETH_INPUT_ERROR_##sym,
+ foreach_ssvm_eth_input_error
+#undef _
+ SSVM_ETH_INPUT_N_ERROR,
+} ssvm_eth_input_error_t;
+
+static char *ssvm_eth_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ssvm_eth_input_error
+#undef _
+};
+
+typedef enum
+{
+ SSVM_ETH_INPUT_NEXT_DROP,
+ SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT,
+ SSVM_ETH_INPUT_NEXT_IP4_INPUT,
+ SSVM_ETH_INPUT_NEXT_IP6_INPUT,
+ SSVM_ETH_INPUT_NEXT_MPLS_INPUT,
+ SSVM_ETH_INPUT_N_NEXT,
+} ssvm_eth_input_next_t;
+
+static inline uword
+ssvm_eth_device_input (ssvm_eth_main_t * em,
+ ssvm_private_t * intfc, vlib_node_runtime_t * node)
+{
+ ssvm_shared_header_t *sh = intfc->sh;
+ vlib_main_t *vm = em->vlib_main;
+ unix_shared_memory_queue_t *q;
+ ssvm_eth_queue_elt_t *elt, *elts;
+ u32 elt_index;
+ u32 my_pid = intfc->my_pid;
+ int rx_queue_index;
+ u32 n_to_alloc = VLIB_FRAME_SIZE * 2;
+ u32 n_allocated, n_present_in_cache;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ vlib_buffer_free_list_t *fl;
+ u32 n_left_to_next, *to_next;
+ u32 next0;
+ u32 n_buffers;
+ u32 n_available;
+ u32 bi0, saved_bi0;
+ vlib_buffer_t *b0, *prev;
+ u32 saved_cache_size = 0;
+ ethernet_header_t *eh0;
+ u16 type0;
+ u32 n_rx_bytes = 0, l3_offset0;
+ u32 thread_index = vlib_get_thread_index ();
+ u32 trace_cnt __attribute__ ((unused)) = vlib_get_trace_count (vm, node);
+ volatile u32 *lock;
+ u32 *elt_indices;
+ uword n_trace = vlib_get_trace_count (vm, node);
+
+ /* Either side down? buh-bye... */
+ if (pointer_to_uword (sh->opaque[MASTER_ADMIN_STATE_INDEX]) == 0 ||
+ pointer_to_uword (sh->opaque[SLAVE_ADMIN_STATE_INDEX]) == 0)
+ return 0;
+
+ if (intfc->i_am_master)
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_MASTER_Q_INDEX]);
+ else
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_SLAVE_Q_INDEX]);
+
+ /* Nothing to do? */
+ if (q->cursize == 0)
+ return 0;
+
+ fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ vec_reset_length (intfc->rx_queue);
+
+ lock = (u32 *) q;
+ while (__sync_lock_test_and_set (lock, 1))
+ ;
+ while (q->cursize > 0)
+ {
+ unix_shared_memory_queue_sub_raw (q, (u8 *) & elt_index);
+ ASSERT (elt_index < 2048);
+ vec_add1 (intfc->rx_queue, elt_index);
+ }
+ CLIB_MEMORY_BARRIER ();
+ *lock = 0;
+
+ n_present_in_cache = vec_len (em->buffer_cache);
+
+ if (vec_len (em->buffer_cache) < vec_len (intfc->rx_queue) * 2)
+ {
+ vec_validate (em->buffer_cache,
+ n_to_alloc + vec_len (em->buffer_cache) - 1);
+ n_allocated =
+ vlib_buffer_alloc (vm, &em->buffer_cache[n_present_in_cache],
+ n_to_alloc);
+
+ n_present_in_cache += n_allocated;
+ _vec_len (em->buffer_cache) = n_present_in_cache;
+ }
+
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
+
+ n_buffers = vec_len (intfc->rx_queue);
+ rx_queue_index = 0;
+
+ while (n_buffers > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_buffers > 0 && n_left_to_next > 0)
+ {
+ elt = elts + intfc->rx_queue[rx_queue_index];
+
+ saved_cache_size = n_present_in_cache;
+ if (PREDICT_FALSE (saved_cache_size == 0))
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ goto out;
+ }
+ saved_bi0 = bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ prev = 0;
+
+ while (1)
+ {
+ vlib_buffer_init_for_free_list (b0, fl);
+
+ b0->current_data = elt->current_data_hint;
+ b0->current_length = elt->length_this_buffer;
+ b0->total_length_not_including_first_buffer =
+ elt->total_length_not_including_first_buffer;
+
+ clib_memcpy (b0->data + b0->current_data, elt->data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev != 0))
+ prev->next_buffer = bi0;
+
+ if (PREDICT_FALSE (elt->flags & SSVM_BUFFER_NEXT_PRESENT))
+ {
+ prev = b0;
+ if (PREDICT_FALSE (n_present_in_cache == 0))
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ goto out;
+ }
+ bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ }
+ else
+ break;
+ }
+
+ saved_cache_size = n_present_in_cache;
+
+ to_next[0] = saved_bi0;
+ to_next++;
+ n_left_to_next--;
+
+ b0 = vlib_get_buffer (vm, saved_bi0);
+ eh0 = vlib_buffer_get_current (b0);
+
+ type0 = clib_net_to_host_u16 (eh0->type);
+
+ next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
+
+ if (type0 == ETHERNET_TYPE_IP4)
+ next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
+ else if (type0 == ETHERNET_TYPE_IP6)
+ next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
+ else if (type0 == ETHERNET_TYPE_MPLS)
+ next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
+
+ l3_offset0 = ((next0 == SSVM_ETH_INPUT_NEXT_IP4_INPUT ||
+ next0 == SSVM_ETH_INPUT_NEXT_IP6_INPUT ||
+ next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ?
+ sizeof (ethernet_header_t) : 0);
+
+ n_rx_bytes += b0->current_length
+ + b0->total_length_not_including_first_buffer;
+
+ b0->current_data += l3_offset0;
+ b0->current_length -= l3_offset0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ ssvm_eth_input_trace_t *tr;
+
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 1);
+ vlib_set_trace_count (vm, node, --n_trace);
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+
+ tr->next_index = next0;
+ tr->sw_if_index = intfc->vlib_hw_if_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ n_buffers--;
+ rx_queue_index++;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+out:
+ if (em->buffer_cache)
+ _vec_len (em->buffer_cache) = saved_cache_size;
+ else
+ ASSERT (saved_cache_size == 0);
+
+ ssvm_lock (sh, my_pid, 2);
+
+ ASSERT (vec_len (intfc->rx_queue) > 0);
+
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+
+ clib_memcpy (&elt_indices[n_available], intfc->rx_queue,
+ vec_len (intfc->rx_queue) * sizeof (u32));
+
+ n_available += vec_len (intfc->rx_queue);
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
+
+ ssvm_unlock (sh);
+
+ vlib_error_count (vm, node->node_index, SSVM_ETH_INPUT_ERROR_NO_BUFFERS,
+ n_buffers);
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX, thread_index,
+ intfc->vlib_hw_if_index, rx_queue_index, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, rx_queue_index);
+
+ return rx_queue_index;
+}
+
+static uword
+ssvm_eth_input_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
+ uword n_rx_packets = 0;
+
+ vec_foreach (intfc, em->intfcs)
+ {
+ n_rx_packets += ssvm_eth_device_input (em, intfc, node);
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
+ .function = ssvm_eth_input_node_fn,
+ .name = "ssvm_eth_input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ssvm_eth_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .n_errors = ARRAY_LEN(ssvm_eth_input_error_strings),
+ .error_strings = ssvm_eth_input_error_strings,
+
+ .n_next_nodes = SSVM_ETH_INPUT_N_NEXT,
+
+ /* edit / add dispositions here */
+ .next_nodes = {
+ [SSVM_ETH_INPUT_NEXT_DROP] = "error-drop",
+ [SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT] = "ethernet-input",
+ [SSVM_ETH_INPUT_NEXT_IP4_INPUT] = "ip4-input",
+ [SSVM_ETH_INPUT_NEXT_IP6_INPUT] = "ip6-input",
+ [SSVM_ETH_INPUT_NEXT_MPLS_INPUT] = "mpls-input",
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (ssvm_eth_input_node, ssvm_eth_input_node_fn)
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/ssvm_eth.c b/src/vnet/devices/ssvm/ssvm_eth.c
new file mode 100644
index 00000000..db4fafa9
--- /dev/null
+++ b/src/vnet/devices/ssvm/ssvm_eth.c
@@ -0,0 +1,491 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "ssvm_eth.h"
+
+ssvm_eth_main_t ssvm_eth_main;
+
+#define foreach_ssvm_eth_tx_func_error \
+_(RING_FULL, "Tx packet drops (ring full)") \
+_(NO_BUFFERS, "Tx packet drops (no buffers)") \
+_(ADMIN_DOWN, "Tx packet drops (admin down)")
+
+typedef enum
+{
+#define _(f,s) SSVM_ETH_TX_ERROR_##f,
+ foreach_ssvm_eth_tx_func_error
+#undef _
+ SSVM_ETH_TX_N_ERROR,
+} ssvm_eth_tx_func_error_t;
+
+static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
+ vnet_hw_interface_t * hi, u32 flags);
+
+int
+ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
+{
+ ssvm_private_t *intfc;
+ void *oldheap;
+ clib_error_t *e;
+ unix_shared_memory_queue_t *q;
+ ssvm_shared_header_t *sh;
+ ssvm_eth_queue_elt_t *elts;
+ u32 *elt_indices;
+ u8 enet_addr[6];
+ int i, rv;
+
+ vec_add2 (em->intfcs, intfc, 1);
+
+ intfc->ssvm_size = em->segment_size;
+ intfc->i_am_master = 1;
+ intfc->name = name;
+ intfc->my_pid = getpid ();
+ if (is_master == 0)
+ {
+ rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */ );
+ if (rv < 0)
+ return rv;
+ goto create_vnet_interface;
+ }
+
+ intfc->requested_va = em->next_base_va;
+ em->next_base_va += em->segment_size;
+ rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */ );
+
+ if (rv < 0)
+ return rv;
+
+ /* OK, segment created, set up queues and so forth. */
+
+ sh = intfc->sh;
+ oldheap = ssvm_push_heap (sh);
+
+ q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_MASTER_Q_INDEX] = (void *) q;
+ q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_SLAVE_Q_INDEX] = (void *) q;
+
+ /*
+ * Preallocate the requested number of buffer chunks
+ * There must be a better way to do this, etc.
+ * Add some slop to avoid pool reallocation, which will not go well
+ */
+ elts = 0;
+ elt_indices = 0;
+
+ vec_validate_aligned (elts, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (elt_indices, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
+
+ for (i = 0; i < em->nbuffers; i++)
+ elt_indices[i] = i;
+
+ sh->opaque[CHUNK_POOL_INDEX] = (void *) elts;
+ sh->opaque[CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
+ sh->opaque[CHUNK_POOL_NFREE] = (void *) (uword) em->nbuffers;
+
+ ssvm_pop_heap (oldheap);
+
+create_vnet_interface:
+
+ sh = intfc->sh;
+
+ memset (enet_addr, 0, sizeof (enet_addr));
+ enet_addr[0] = 2;
+ enet_addr[1] = 0xFE;
+ enet_addr[2] = is_master;
+ enet_addr[5] = sh->master_index;
+
+ e = ethernet_register_interface
+ (em->vnet_main, ssvm_eth_device_class.index, intfc - em->intfcs,
+ /* ethernet address */ enet_addr,
+ &intfc->vlib_hw_if_index, ssvm_eth_flag_change);
+
+ if (e)
+ {
+ clib_error_report (e);
+ /* $$$$ unmap offending region? */
+ return VNET_API_ERROR_INVALID_INTERFACE;
+ }
+
+ /* Declare link up */
+ vnet_hw_interface_set_flags (em->vnet_main, intfc->vlib_hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ /* Let the games begin... */
+ if (is_master)
+ sh->ready = 1;
+ return 0;
+}
+
+static clib_error_t *
+ssvm_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ u8 *name;
+ int is_master = 1;
+ int i, rv;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "base-va %llx", &em->next_base_va))
+ ;
+ else if (unformat (input, "segment-size %lld", &em->segment_size))
+ em->segment_size = 1ULL << (max_log2 (em->segment_size));
+ else if (unformat (input, "nbuffers %lld", &em->nbuffers))
+ ;
+ else if (unformat (input, "queue-elts %lld", &em->queue_elts))
+ ;
+ else if (unformat (input, "slave"))
+ is_master = 0;
+ else if (unformat (input, "%s", &name))
+ vec_add1 (em->names, name);
+ else
+ break;
+ }
+
+ /* No configured instances, we're done... */
+ if (vec_len (em->names) == 0)
+ return 0;
+
+ for (i = 0; i < vec_len (em->names); i++)
+ {
+ rv = ssvm_eth_create (em, em->names[i], is_master);
+ if (rv < 0)
+ return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d",
+ em->names[i], rv);
+ }
+
+ vlib_node_set_state (vm, ssvm_eth_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ return 0;
+}
+
+VLIB_CONFIG_FUNCTION (ssvm_config, "ssvm_eth");
+
+
+static clib_error_t *
+ssvm_eth_init (vlib_main_t * vm)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+
+ if (((sizeof (ssvm_eth_queue_elt_t) / CLIB_CACHE_LINE_BYTES)
+ * CLIB_CACHE_LINE_BYTES) != sizeof (ssvm_eth_queue_elt_t))
+ clib_warning ("ssvm_eth_queue_elt_t size %d not a multiple of %d",
+ sizeof (ssvm_eth_queue_elt_t), CLIB_CACHE_LINE_BYTES);
+
+ em->vlib_main = vm;
+ em->vnet_main = vnet_get_main ();
+ em->elog_main = &vm->elog_main;
+
+ /* default config param values... */
+
+ em->next_base_va = 0x600000000ULL;
+ /*
+ * Allocate 2 full superframes in each dir (256 x 2 x 2 x 2048 bytes),
+ * 2mb; double that so we have plenty of space... 4mb
+ */
+ em->segment_size = 8 << 20;
+ em->nbuffers = 1024;
+ em->queue_elts = 512;
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ssvm_eth_init);
+
+static char *ssvm_eth_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_ssvm_eth_tx_func_error
+#undef _
+};
+
+static u8 *
+format_ssvm_eth_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+
+ s = format (s, "ssvmEthernet%d", i);
+ return s;
+}
+
+static u8 *
+format_ssvm_eth_device (u8 * s, va_list * args)
+{
+ s = format (s, "SSVM Ethernet");
+ return s;
+}
+
+static u8 *
+format_ssvm_eth_tx_trace (u8 * s, va_list * args)
+{
+ s = format (s, "Unimplemented...");
+ return s;
+}
+
+
+static uword
+ssvm_eth_interface_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, rd->dev_instance);
+ ssvm_shared_header_t *sh = intfc->sh;
+ unix_shared_memory_queue_t *q;
+ u32 *from;
+ u32 n_left;
+ ssvm_eth_queue_elt_t *elts, *elt, *prev_elt;
+ u32 my_pid = intfc->my_pid;
+ vlib_buffer_t *b0;
+ u32 bi0;
+ u32 size_this_buffer;
+ u32 chunks_this_buffer;
+ u8 i_am_master = intfc->i_am_master;
+ u32 elt_index;
+ int is_ring_full, interface_down;
+ int i;
+ volatile u32 *queue_lock;
+ u32 n_to_alloc = VLIB_FRAME_SIZE;
+ u32 n_allocated, n_present_in_cache, n_available;
+ u32 *elt_indices;
+
+ if (i_am_master)
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_SLAVE_Q_INDEX];
+ else
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_MASTER_Q_INDEX];
+
+ queue_lock = (u32 *) q;
+
+ from = vlib_frame_vector_args (f);
+ n_left = f->n_vectors;
+ is_ring_full = 0;
+ interface_down = 0;
+
+ n_present_in_cache = vec_len (em->chunk_cache);
+
+ /* admin / link up/down check */
+ if (sh->opaque[MASTER_ADMIN_STATE_INDEX] == 0 ||
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] == 0)
+ {
+ interface_down = 1;
+ goto out;
+ }
+
+ ssvm_lock (sh, my_pid, 1);
+
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
+
+ if (n_present_in_cache < n_left * 2)
+ {
+ vec_validate (em->chunk_cache, n_to_alloc + n_present_in_cache - 1);
+
+ n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
+
+ if (PREDICT_TRUE (n_allocated > 0))
+ {
+ clib_memcpy (&em->chunk_cache[n_present_in_cache],
+ &elt_indices[n_available - n_allocated],
+ sizeof (u32) * n_allocated);
+ }
+
+ n_present_in_cache += n_allocated;
+ n_available -= n_allocated;
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
+ _vec_len (em->chunk_cache) = n_present_in_cache;
+ }
+
+ ssvm_unlock (sh);
+
+ while (n_left)
+ {
+ bi0 = from[0];
+ b0 = vlib_get_buffer (vm, bi0);
+
+ size_this_buffer = vlib_buffer_length_in_chain (vm, b0);
+ chunks_this_buffer = (size_this_buffer + (SSVM_BUFFER_SIZE - 1))
+ / SSVM_BUFFER_SIZE;
+
+ /* If we're not going to be able to enqueue the buffer, tail drop. */
+ if (q->cursize >= q->maxsize)
+ {
+ is_ring_full = 1;
+ break;
+ }
+
+ prev_elt = 0;
+ elt_index = ~0;
+ for (i = 0; i < chunks_this_buffer; i++)
+ {
+ if (PREDICT_FALSE (n_present_in_cache == 0))
+ goto out;
+
+ elt_index = em->chunk_cache[--n_present_in_cache];
+ elt = elts + elt_index;
+
+ elt->type = SSVM_PACKET_TYPE;
+ elt->flags = 0;
+ elt->total_length_not_including_first_buffer =
+ b0->total_length_not_including_first_buffer;
+ elt->length_this_buffer = b0->current_length;
+ elt->current_data_hint = b0->current_data;
+ elt->owner = !i_am_master;
+ elt->tag = 1;
+
+ clib_memcpy (elt->data, b0->data + b0->current_data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev_elt != 0))
+ prev_elt->next_index = elt - elts;
+
+ if (PREDICT_FALSE (i < (chunks_this_buffer - 1)))
+ {
+ elt->flags = SSVM_BUFFER_NEXT_PRESENT;
+ ASSERT (b0->flags & VLIB_BUFFER_NEXT_PRESENT);
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ }
+ prev_elt = elt;
+ }
+
+ while (__sync_lock_test_and_set (queue_lock, 1))
+ ;
+
+ unix_shared_memory_queue_add_raw (q, (u8 *) & elt_index);
+ CLIB_MEMORY_BARRIER ();
+ *queue_lock = 0;
+
+ from++;
+ n_left--;
+ }
+
+out:
+ if (PREDICT_FALSE (n_left))
+ {
+ if (is_ring_full)
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_RING_FULL,
+ n_left);
+ else if (interface_down)
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_ADMIN_DOWN,
+ n_left);
+ else
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_NO_BUFFERS,
+ n_left);
+
+ vlib_buffer_free (vm, from, n_left);
+ }
+ else
+ vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
+
+ if (PREDICT_TRUE (vec_len (em->chunk_cache)))
+ _vec_len (em->chunk_cache) = n_present_in_cache;
+
+ return f->n_vectors;
+}
+
+static void
+ssvm_eth_clear_hw_interface_counters (u32 instance)
+{
+ /* Nothing for now */
+}
+
+static clib_error_t *
+ssvm_eth_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, hif->dev_instance);
+ ssvm_shared_header_t *sh;
+
+ /* publish link-state in shared-memory, to discourage buffer-wasting */
+ sh = intfc->sh;
+ if (intfc->i_am_master)
+ sh->opaque[MASTER_ADMIN_STATE_INDEX] = (void *) is_up;
+ else
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] = (void *) is_up;
+
+ return 0;
+}
+
+static clib_error_t *
+ssvm_eth_subif_add_del_function (vnet_main_t * vnm,
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
+{
+ /* Nothing for now */
+ return 0;
+}
+
+/*
+ * Dynamically redirect all pkts from a specific interface
+ * to the specified node
+ */
+static void
+ssvm_eth_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
+{
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
+ ssvm_private_t *intfc = pool_elt_at_index (em->intfcs, hw->dev_instance);
+
+ /* Shut off redirection */
+ if (node_index == ~0)
+ {
+ intfc->per_interface_next_index = node_index;
+ return;
+ }
+
+ intfc->per_interface_next_index =
+ vlib_node_add_next (em->vlib_main, ssvm_eth_input_node.index, node_index);
+}
+
+static u32
+ssvm_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+{
+ /* nothing for now */
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (ssvm_eth_device_class) = {
+ .name = "ssvm-eth",
+ .tx_function = ssvm_eth_interface_tx,
+ .tx_function_n_errors = SSVM_ETH_TX_N_ERROR,
+ .tx_function_error_strings = ssvm_eth_tx_func_error_strings,
+ .format_device_name = format_ssvm_eth_device_name,
+ .format_device = format_ssvm_eth_device,
+ .format_tx_trace = format_ssvm_eth_tx_trace,
+ .clear_counters = ssvm_eth_clear_hw_interface_counters,
+ .admin_up_down_function = ssvm_eth_interface_admin_up_down,
+ .subif_add_del_function = ssvm_eth_subif_add_del_function,
+ .rx_redirect_to_node = ssvm_eth_set_interface_next_node,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (ssvm_eth_device_class,
+ ssvm_eth_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/ssvm/ssvm_eth.h b/src/vnet/devices/ssvm/ssvm_eth.h
new file mode 100644
index 00000000..f877df3c
--- /dev/null
+++ b/src/vnet/devices/ssvm/ssvm_eth.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_ssvm_eth_h__
+#define __included_ssvm_eth_h__
+
+#include <vnet/vnet.h>
+
+#include <vppinfra/elog.h>
+#include <vppinfra/error.h>
+#include <vppinfra/format.h>
+#include <vppinfra/hash.h>
+#include <vppinfra/vec.h>
+#include <vppinfra/elog.h>
+#include <vlib/vlib.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/ip/ip.h>
+#include <vnet/pg/pg.h>
+#include <vlibmemory/unix_shared_memory_queue.h>
+
+#include <svm/ssvm.h>
+
+extern vnet_device_class_t ssvm_eth_device_class;
+extern vlib_node_registration_t ssvm_eth_input_node;
+
+#define SSVM_BUFFER_SIZE \
+ (VLIB_BUFFER_DATA_SIZE + VLIB_BUFFER_PRE_DATA_SIZE)
+#define SSVM_PACKET_TYPE 1
+
+typedef struct
+{
+ /* Type of queue element */
+ u8 type;
+ u8 flags;
+#define SSVM_BUFFER_NEXT_PRESENT (1<<0)
+ u8 owner;
+ u8 tag;
+ i16 current_data_hint;
+ u16 length_this_buffer;
+ u16 total_length_not_including_first_buffer;
+ u16 pad;
+ u32 next_index;
+ /* offset 16 */
+ u8 data[SSVM_BUFFER_SIZE];
+ /* pad to an even multiple of 64 octets */
+ u8 pad2[CLIB_CACHE_LINE_BYTES - 16];
+} ssvm_eth_queue_elt_t;
+
+typedef struct
+{
+ /* vector of point-to-point connections */
+ ssvm_private_t *intfcs;
+
+ u32 *buffer_cache;
+ u32 *chunk_cache;
+
+ /* Configurable parameters */
+ /* base address for next placement */
+ u64 next_base_va;
+ u64 segment_size;
+ u64 nbuffers;
+ u64 queue_elts;
+
+ /* Segment names */
+ u8 **names;
+
+ /* convenience */
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ elog_main_t *elog_main;
+} ssvm_eth_main_t;
+
+ssvm_eth_main_t ssvm_eth_main;
+
+typedef enum
+{
+ CHUNK_POOL_FREELIST_INDEX = 0,
+ CHUNK_POOL_INDEX,
+ CHUNK_POOL_NFREE,
+ TO_MASTER_Q_INDEX,
+ TO_SLAVE_Q_INDEX,
+ MASTER_ADMIN_STATE_INDEX,
+ SLAVE_ADMIN_STATE_INDEX,
+} ssvm_eth_opaque_index_t;
+
+/*
+ * debug scaffolding.
+ */
+static inline void
+ssvm_eth_validate_freelists (int need_lock)
+{
+#if CLIB_DEBUG > 0
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
+ ssvm_shared_header_t *sh;
+ u32 *elt_indices;
+ u32 n_available;
+ int i;
+
+ for (i = 0; i < vec_len (em->intfcs); i++)
+ {
+ intfc = em->intfcs + i;
+ sh = intfc->sh;
+ u32 my_pid = intfc->my_pid;
+
+ if (need_lock)
+ ssvm_lock (sh, my_pid, 15);
+
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) (uword) (sh->opaque[CHUNK_POOL_NFREE]);
+
+ for (i = 0; i < n_available; i++)
+ ASSERT (elt_indices[i] < 2048);
+
+ if (need_lock)
+ ssvm_unlock (sh);
+ }
+#endif
+}
+
+#endif /* __included_ssvm_eth_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/dir.dox b/src/vnet/devices/virtio/dir.dox
new file mode 100644
index 00000000..50150799
--- /dev/null
+++ b/src/vnet/devices/virtio/dir.dox
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Doxygen directory documentation */
+
+/**
+@dir
+@brief vHost User Interface Implementation.
+
+This directory contains the source code for vHost User driver.
+
+*/
+/*? %%clicmd:group_label vHost User %% ?*/
+/*? %%syscfg:group_label vHost User %% ?*/
diff --git a/src/vnet/devices/virtio/vhost-user.c b/src/vnet/devices/virtio/vhost-user.c
new file mode 100644
index 00000000..19ad9ab1
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost-user.c
@@ -0,0 +1,3671 @@
+/*
+ *------------------------------------------------------------------
+ * vhost.c - vhost-user
+ *
+ * Copyright (c) 2014 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <fcntl.h> /* for open */
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/uio.h> /* for iovec */
+#include <netinet/in.h>
+#include <sys/vfs.h>
+
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+
+#include <vnet/ip/ip.h>
+
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+#include <vnet/feature/feature.h>
+
+#include <vnet/devices/virtio/vhost-user.h>
+
+/**
+ * @file
+ * @brief vHost User Device Driver.
+ *
+ * This file contains the source code for vHost User interface.
+ */
+
+
+#define VHOST_DEBUG_VQ 0
+
+#define DBG_SOCK(args...) \
+ { \
+ vhost_user_main_t *_vum = &vhost_user_main; \
+ if (_vum->debug) \
+ clib_warning(args); \
+ };
+
+#if VHOST_DEBUG_VQ == 1
+#define DBG_VQ(args...) clib_warning(args);
+#else
+#define DBG_VQ(args...)
+#endif
+
+/*
+ * When an RX queue is down but active, received packets
+ * must be discarded. This value controls up to how many
+ * packets will be discarded during each round.
+ */
+#define VHOST_USER_DOWN_DISCARD_COUNT 256
+
+/*
+ * When the number of available buffers gets under this threshold,
+ * RX node will start discarding packets.
+ */
+#define VHOST_USER_RX_BUFFER_STARVATION 32
+
+/*
+ * On the receive side, the host should free descriptors as soon
+ * as possible in order to avoid TX drop in the VM.
+ * This value controls the number of copy operations that are stacked
+ * before copy is done for all and descriptors are given back to
+ * the guest.
+ * The value 64 was obtained by testing (48 and 128 were not as good).
+ */
+#define VHOST_USER_RX_COPY_THRESHOLD 64
+/*
+ * On the transmit side, we keep processing the buffers from vlib in the while
+ * loop and prepare the copy order to be executed later. However, the static
+ * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
+ * entries. In order to not corrupt memory, we have to do the copy when the
+ * static array reaches the copy threshold. We subtract 40 in case the code
+ * goes into the inner loop for a maximum of 64k frames which may require
+ * more array entries.
+ */
+#define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
+
+#define UNIX_GET_FD(unixfd_idx) \
+ (unixfd_idx != ~0) ? \
+ pool_elt_at_index (file_main.file_pool, \
+ unixfd_idx)->file_descriptor : -1;
+
+#define foreach_virtio_trace_flags \
+ _ (SIMPLE_CHAINED, 0, "Simple descriptor chaining") \
+ _ (SINGLE_DESC, 1, "Single descriptor packet") \
+ _ (INDIRECT, 2, "Indirect descriptor") \
+ _ (MAP_ERROR, 4, "Memory mapping error")
+
+typedef enum
+{
+#define _(n,i,s) VIRTIO_TRACE_F_##n,
+ foreach_virtio_trace_flags
+#undef _
+} virtio_trace_flag_t;
+
+vlib_node_registration_t vhost_user_input_node;
+
+#define foreach_vhost_user_tx_func_error \
+ _(NONE, "no error") \
+ _(NOT_READY, "vhost vring not ready") \
+ _(DOWN, "vhost interface is down") \
+ _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
+ _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
+ _(MMAP_FAIL, "mmap failure") \
+ _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
+
+typedef enum
+{
+#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
+ foreach_vhost_user_tx_func_error
+#undef _
+ VHOST_USER_TX_FUNC_N_ERROR,
+} vhost_user_tx_func_error_t;
+
+static char *vhost_user_tx_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_vhost_user_tx_func_error
+#undef _
+};
+
+#define foreach_vhost_user_input_func_error \
+ _(NO_ERROR, "no error") \
+ _(NO_BUFFER, "no available buffer") \
+ _(MMAP_FAIL, "mmap failure") \
+ _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \
+ _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \
+ _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)")
+
+typedef enum
+{
+#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
+ foreach_vhost_user_input_func_error
+#undef _
+ VHOST_USER_INPUT_FUNC_N_ERROR,
+} vhost_user_input_func_error_t;
+
+static char *vhost_user_input_func_error_strings[] = {
+#define _(n,s) s,
+ foreach_vhost_user_input_func_error
+#undef _
+};
+
+/* *INDENT-OFF* */
+static vhost_user_main_t vhost_user_main = {
+ .mtu_bytes = 1518,
+};
+
+VNET_HW_INTERFACE_CLASS (vhost_interface_class, static) = {
+ .name = "vhost-user",
+};
+/* *INDENT-ON* */
+
+static u8 *
+format_vhost_user_interface_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ u32 show_dev_instance = ~0;
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
+ show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
+
+ if (show_dev_instance != ~0)
+ i = show_dev_instance;
+
+ s = format (s, "VirtualEthernet0/0/%d", i);
+ return s;
+}
+
+static int
+vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
+{
+ // FIXME: check if the new dev instance is already used
+ vhost_user_main_t *vum = &vhost_user_main;
+ vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
+ hi->dev_instance, ~0);
+
+ vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
+ new_dev_instance;
+
+ DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
+ hi->dev_instance, new_dev_instance);
+
+ return 0;
+}
+
+static_always_inline void *
+map_guest_mem (vhost_user_intf_t * vui, uword addr, u32 * hint)
+{
+ int i = *hint;
+ if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
+ ((vui->regions[i].guest_phys_addr +
+ vui->regions[i].memory_size) > addr)))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+#if __SSE4_2__
+ __m128i rl, rh, al, ah, r;
+ al = _mm_set1_epi64x (addr + 1);
+ ah = _mm_set1_epi64x (addr);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_and_si128 (rl, rh);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
+
+ rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
+ rl = _mm_cmpgt_epi64 (al, rl);
+ rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
+ rh = _mm_cmpgt_epi64 (rh, ah);
+ r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
+
+ r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
+ i = __builtin_ctzll (_mm_movemask_epi8 (r) |
+ (1 << VHOST_MEMORY_MAX_NREGIONS));
+
+ if (i < vui->nregions)
+ {
+ *hint = i;
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+
+#else
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].guest_phys_addr <= addr) &&
+ ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ *hint = i;
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+ }
+#endif
+ DBG_VQ ("failed to map guest mem addr %llx", addr);
+ *hint = 0;
+ return 0;
+}
+
+static inline void *
+map_user_mem (vhost_user_intf_t * vui, uword addr)
+{
+ int i;
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].userspace_addr <= addr) &&
+ ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].userspace_addr);
+ }
+ }
+ return 0;
+}
+
+static long
+get_huge_page_size (int fd)
+{
+ struct statfs s;
+ fstatfs (fd, &s);
+ return s.f_bsize;
+}
+
+static void
+unmap_all_mem_regions (vhost_user_intf_t * vui)
+{
+ int i, r;
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if (vui->region_mmap_addr[i] != (void *) -1)
+ {
+
+ long page_sz = get_huge_page_size (vui->region_mmap_fd[i]);
+
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz - 1) & ~(page_sz - 1);
+
+ r =
+ munmap (vui->region_mmap_addr[i] - vui->regions[i].mmap_offset,
+ map_sz);
+
+ DBG_SOCK
+ ("unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
+ vui->region_mmap_addr[i], map_sz, page_sz);
+
+ vui->region_mmap_addr[i] = (void *) -1;
+
+ if (r == -1)
+ {
+ clib_warning ("failed to unmap memory region (errno %d)",
+ errno);
+ }
+ close (vui->region_mmap_fd[i]);
+ }
+ }
+ vui->nregions = 0;
+}
+
+static void
+vhost_user_tx_thread_placement (vhost_user_intf_t * vui)
+{
+ //Let's try to assign one queue to each thread
+ u32 qid = 0;
+ u32 thread_index = 0;
+ vui->use_tx_spinlock = 0;
+ while (1)
+ {
+ for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
+ {
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
+ if (!rxvq->started || !rxvq->enabled)
+ continue;
+
+ vui->per_cpu_tx_qid[thread_index] = qid;
+ thread_index++;
+ if (thread_index == vlib_get_thread_main ()->n_vlib_mains)
+ return;
+ }
+ //We need to loop, meaning the spinlock has to be used
+ vui->use_tx_spinlock = 1;
+ if (thread_index == 0)
+ {
+ //Could not find a single valid one
+ for (thread_index = 0;
+ thread_index < vlib_get_thread_main ()->n_vlib_mains;
+ thread_index++)
+ {
+ vui->per_cpu_tx_qid[thread_index] = 0;
+ }
+ return;
+ }
+ }
+}
+
+/**
+ * @brief Unassign existing interface/queue to thread mappings and re-assign
+ * new interface/queue to thread mappings
+ */
+static void
+vhost_user_rx_thread_placement ()
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vhost_user_vring_t *txvq;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 qid;
+ int rv;
+ u16 *queue;
+
+ // Scrap all existing mappings for all interfaces/queues
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ vec_foreach (queue, vui->rx_queues)
+ {
+ rv = vnet_hw_interface_unassign_rx_thread (vnm, vui->hw_if_index,
+ *queue);
+ if (rv)
+ clib_warning ("Warning: unable to unassign interface %d, "
+ "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
+ }
+ vec_reset_length (vui->rx_queues);
+ });
+ /* *INDENT-ON* */
+
+ // Create the rx_queues for all interfaces
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ for (qid = 0; qid < VHOST_VRING_MAX_N / 2; qid++)
+ {
+ txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+ if (txvq->started)
+ {
+ if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_UNKNOWN)
+ /* Set polling as the default */
+ txvq->mode = VNET_HW_INTERFACE_RX_MODE_POLLING;
+ vec_add1 (vui->rx_queues, qid);
+ }
+ }
+ });
+ /* *INDENT-ON* */
+
+ // Assign new mappings for all interfaces/queues
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ vnet_hw_interface_set_input_node (vnm, vui->hw_if_index,
+ vhost_user_input_node.index);
+ vec_foreach (queue, vui->rx_queues)
+ {
+ vnet_hw_interface_assign_rx_thread (vnm, vui->hw_if_index, *queue,
+ ~0);
+ txvq = &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
+ rv = vnet_hw_interface_set_rx_mode (vnm, vui->hw_if_index, *queue,
+ txvq->mode);
+ if (rv)
+ clib_warning ("Warning: unable to set rx mode for interface %d, "
+ "queue %d: rc=%d", vui->hw_if_index, *queue, rv);
+ }
+ });
+ /* *INDENT-ON* */
+}
+
+/** @brief Returns whether at least one TX and one RX vring are enabled */
+int
+vhost_user_intf_ready (vhost_user_intf_t * vui)
+{
+ int i, found[2] = { }; //RX + TX
+
+ for (i = 0; i < VHOST_VRING_MAX_N; i++)
+ if (vui->vrings[i].started && vui->vrings[i].enabled)
+ found[i & 1] = 1;
+
+ return found[0] && found[1];
+}
+
+static void
+vhost_user_update_iface_state (vhost_user_intf_t * vui)
+{
+ /* if we have pointers to descriptor table, go up */
+ int is_up = vhost_user_intf_ready (vui);
+ if (is_up != vui->is_up)
+ {
+ DBG_SOCK ("interface %d %s", vui->sw_if_index,
+ is_up ? "ready" : "down");
+ vnet_hw_interface_set_flags (vnet_get_main (), vui->hw_if_index,
+ is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP :
+ 0);
+ vui->is_up = is_up;
+ }
+ vhost_user_rx_thread_placement ();
+ vhost_user_tx_thread_placement (vui);
+}
+
+static void
+vhost_user_set_interrupt_pending (vhost_user_intf_t * vui, u32 ifq)
+{
+ u32 qid;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ qid = ifq & 0xff;
+ if ((qid & 1) == 0)
+ /* Only care about the odd number, or TX, virtqueue */
+ return;
+
+ if (vhost_user_intf_ready (vui))
+ // qid >> 1 is to convert virtqueue number to vring queue index
+ vnet_device_input_set_interrupt_pending (vnm, vui->hw_if_index, qid >> 1);
+}
+
+static clib_error_t *
+vhost_user_callfd_read_ready (clib_file_t * uf)
+{
+ __attribute__ ((unused)) int n;
+ u8 buff[8];
+
+ n = read (uf->file_descriptor, ((char *) &buff), 8);
+
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_kickfd_read_ready (clib_file_t * uf)
+{
+ __attribute__ ((unused)) int n;
+ u8 buff[8];
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vhost_user_main.vhost_user_interfaces,
+ uf->private_data >> 8);
+ u32 qid = uf->private_data & 0xff;
+
+ n = read (uf->file_descriptor, ((char *) &buff), 8);
+ DBG_SOCK ("if %d KICK queue %d", uf->private_data >> 8, qid);
+ if (!vui->vrings[qid].started ||
+ (vhost_user_intf_ready (vui) != vui->is_up))
+ {
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+ vui->vrings[qid].started = 1;
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ }
+
+ vhost_user_set_interrupt_pending (vui, uf->private_data);
+ return 0;
+}
+
+/**
+ * @brief Try once to lock the vring
+ * @return 0 on success, non-zero on failure.
+ */
+static inline int
+vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid)
+{
+ return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
+}
+
+/**
+ * @brief Spin until the vring is successfully locked
+ */
+static inline void
+vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid)
+{
+ while (vhost_user_vring_try_lock (vui, qid))
+ ;
+}
+
+/**
+ * @brief Unlock the vring lock
+ */
+static inline void
+vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid)
+{
+ *vui->vring_locks[qid] = 0;
+}
+
+static inline void
+vhost_user_vring_init (vhost_user_intf_t * vui, u32 qid)
+{
+ vhost_user_vring_t *vring = &vui->vrings[qid];
+ memset (vring, 0, sizeof (*vring));
+ vring->kickfd_idx = ~0;
+ vring->callfd_idx = ~0;
+ vring->errfd = -1;
+
+ /*
+ * We have a bug with some qemu 2.5, and this may be a fix.
+ * Feel like interpretation holy text, but this is from vhost-user.txt.
+ * "
+ * One queue pair is enabled initially. More queues are enabled
+ * dynamically, by sending message VHOST_USER_SET_VRING_ENABLE.
+ * "
+ * Don't know who's right, but this is what DPDK does.
+ */
+ if (qid == 0 || qid == 1)
+ vring->enabled = 1;
+}
+
+static inline void
+vhost_user_vring_close (vhost_user_intf_t * vui, u32 qid)
+{
+ vhost_user_vring_t *vring = &vui->vrings[qid];
+ if (vring->kickfd_idx != ~0)
+ {
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vring->kickfd_idx);
+ clib_file_del (&file_main, uf);
+ vring->kickfd_idx = ~0;
+ }
+ if (vring->callfd_idx != ~0)
+ {
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vring->callfd_idx);
+ clib_file_del (&file_main, uf);
+ vring->callfd_idx = ~0;
+ }
+ if (vring->errfd != -1)
+ {
+ close (vring->errfd);
+ vring->errfd = -1;
+ }
+ vhost_user_vring_init (vui, qid);
+}
+
+static inline void
+vhost_user_if_disconnect (vhost_user_intf_t * vui)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ int q;
+
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+
+ if (vui->clib_file_index != ~0)
+ {
+ clib_file_del (&file_main, file_main.file_pool + vui->clib_file_index);
+ vui->clib_file_index = ~0;
+ }
+
+ vui->is_up = 0;
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_close (vui, q);
+
+ unmap_all_mem_regions (vui);
+ DBG_SOCK ("interface ifindex %d disconnected", vui->sw_if_index);
+}
+
+#define VHOST_LOG_PAGE 0x1000
+static_always_inline void
+vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
+ u64 addr, u64 len, u8 is_host_address)
+{
+ if (PREDICT_TRUE (vui->log_base_addr == 0
+ || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
+ {
+ return;
+ }
+ if (is_host_address)
+ {
+ addr = pointer_to_uword (map_user_mem (vui, (uword) addr));
+ }
+ if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
+ {
+ DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
+ return;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ u64 page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len)
+ {
+ ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
+ page++;
+ }
+}
+
+static_always_inline void
+vhost_user_log_dirty_pages (vhost_user_intf_t * vui, u64 addr, u64 len)
+{
+ vhost_user_log_dirty_pages_2 (vui, addr, len, 0);
+}
+
+#define vhost_user_log_dirty_ring(vui, vq, member) \
+ if (PREDICT_FALSE(vq->log_used)) { \
+ vhost_user_log_dirty_pages(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
+ sizeof(vq->used->member)); \
+ }
+
+static clib_error_t *
+vhost_user_socket_read (clib_file_t * uf)
+{
+ int n, i;
+ int fd, number_of_fds = 0;
+ int fds[VHOST_MEMORY_MAX_NREGIONS];
+ vhost_user_msg_t msg;
+ struct msghdr mh;
+ struct iovec iov[1];
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ struct cmsghdr *cmsg;
+ u8 q;
+ clib_file_t template = { 0 };
+ vnet_main_t *vnm = vnet_get_main ();
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ char control[CMSG_SPACE (VHOST_MEMORY_MAX_NREGIONS * sizeof (int))];
+
+ memset (&mh, 0, sizeof (mh));
+ memset (control, 0, sizeof (control));
+
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++)
+ fds[i] = -1;
+
+ /* set the payload */
+ iov[0].iov_base = (void *) &msg;
+ iov[0].iov_len = VHOST_USER_MSG_HDR_SZ;
+
+ mh.msg_iov = iov;
+ mh.msg_iovlen = 1;
+ mh.msg_control = control;
+ mh.msg_controllen = sizeof (control);
+
+ n = recvmsg (uf->file_descriptor, &mh, 0);
+
+ /* Stop workers to avoid end of the world */
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+
+ if (n != VHOST_USER_MSG_HDR_SZ)
+ {
+ if (n == -1)
+ {
+ DBG_SOCK ("recvmsg returned error %d %s", errno, strerror (errno));
+ }
+ else
+ {
+ DBG_SOCK ("n (%d) != VHOST_USER_MSG_HDR_SZ (%d)",
+ n, VHOST_USER_MSG_HDR_SZ);
+ }
+ goto close_socket;
+ }
+
+ if (mh.msg_flags & MSG_CTRUNC)
+ {
+ DBG_SOCK ("MSG_CTRUNC is set");
+ goto close_socket;
+ }
+
+ cmsg = CMSG_FIRSTHDR (&mh);
+
+ if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
+ (cmsg->cmsg_type == SCM_RIGHTS) &&
+ (cmsg->cmsg_len - CMSG_LEN (0) <=
+ VHOST_MEMORY_MAX_NREGIONS * sizeof (int)))
+ {
+ number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) / sizeof (int);
+ clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds * sizeof (int));
+ }
+
+ /* version 1, no reply bit set */
+ if ((msg.flags & 7) != 1)
+ {
+ DBG_SOCK ("malformed message received. closing socket");
+ goto close_socket;
+ }
+
+ {
+ int rv;
+ rv =
+ read (uf->file_descriptor, ((char *) &msg) + VHOST_USER_MSG_HDR_SZ,
+ msg.size);
+ if (rv < 0)
+ {
+ DBG_SOCK ("read failed %s", strerror (errno));
+ goto close_socket;
+ }
+ else if (rv != msg.size)
+ {
+ DBG_SOCK ("message too short (read %dB should be %dB)", rv, msg.size);
+ goto close_socket;
+ }
+ }
+
+ switch (msg.request)
+ {
+ case VHOST_USER_GET_FEATURES:
+ msg.flags |= 4;
+ msg.u64 = (1ULL << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << FEAT_VIRTIO_NET_F_CTRL_VQ) |
+ (1ULL << FEAT_VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << FEAT_VIRTIO_F_INDIRECT_DESC) |
+ (1ULL << FEAT_VHOST_F_LOG_ALL) |
+ (1ULL << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ (1ULL << FEAT_VIRTIO_NET_F_MQ) |
+ (1ULL << FEAT_VHOST_USER_F_PROTOCOL_FEATURES) |
+ (1ULL << FEAT_VIRTIO_F_VERSION_1);
+ msg.u64 &= vui->feature_mask;
+ msg.size = sizeof (msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_FEATURES - reply 0x%016llx",
+ vui->hw_if_index, msg.u64);
+ break;
+
+ case VHOST_USER_SET_FEATURES:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
+ vui->hw_if_index, msg.u64);
+
+ vui->features = msg.u64;
+
+ if (vui->features &
+ ((1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << FEAT_VIRTIO_F_VERSION_1)))
+ vui->virtio_net_hdr_sz = 12;
+ else
+ vui->virtio_net_hdr_sz = 10;
+
+ vui->is_any_layout =
+ (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
+
+ ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+ vui->is_up = 0;
+
+ /*for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_close(&vui->vrings[q]); */
+
+ break;
+
+ case VHOST_USER_SET_MEM_TABLE:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
+ vui->hw_if_index, msg.memory.nregions);
+
+ if ((msg.memory.nregions < 1) ||
+ (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
+ {
+
+ DBG_SOCK ("number of mem regions must be between 1 and %i",
+ VHOST_MEMORY_MAX_NREGIONS);
+
+ goto close_socket;
+ }
+
+ if (msg.memory.nregions != number_of_fds)
+ {
+ DBG_SOCK ("each memory region must have FD");
+ goto close_socket;
+ }
+ unmap_all_mem_regions (vui);
+ for (i = 0; i < msg.memory.nregions; i++)
+ {
+ clib_memcpy (&(vui->regions[i]), &msg.memory.regions[i],
+ sizeof (vhost_user_memory_region_t));
+
+ long page_sz = get_huge_page_size (fds[i]);
+
+ /* align size to 2M page */
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz - 1) & ~(page_sz - 1);
+
+ vui->region_mmap_addr[i] = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fds[i], 0);
+ vui->region_guest_addr_lo[i] = vui->regions[i].guest_phys_addr;
+ vui->region_guest_addr_hi[i] = vui->regions[i].guest_phys_addr +
+ vui->regions[i].memory_size;
+
+ DBG_SOCK
+ ("map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx "
+ "page_sz 0x%x", i, map_sz, fds[i], vui->region_mmap_addr[i],
+ page_sz);
+
+ if (vui->region_mmap_addr[i] == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+ vui->region_mmap_addr[i] += vui->regions[i].mmap_offset;
+ vui->region_mmap_fd[i] = fds[i];
+ }
+ vui->nregions = msg.memory.nregions;
+ break;
+
+ case VHOST_USER_SET_VRING_NUM:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+
+ if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
+ (msg.state.num == 0) || /* it cannot be zero */
+ ((msg.state.num - 1) & msg.state.num)) /* must be power of 2 */
+ goto close_socket;
+ vui->vrings[msg.state.index].qsz_mask = msg.state.num - 1;
+ break;
+
+ case VHOST_USER_SET_VRING_ADDR:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
+ vui->hw_if_index, msg.state.index);
+
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ADDR:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ if (msg.size < sizeof (msg.addr))
+ {
+ DBG_SOCK ("vhost message is too short (%d < %d)",
+ msg.size, sizeof (msg.addr));
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].desc = (vring_desc_t *)
+ map_user_mem (vui, msg.addr.desc_user_addr);
+ vui->vrings[msg.state.index].used = (vring_used_t *)
+ map_user_mem (vui, msg.addr.used_user_addr);
+ vui->vrings[msg.state.index].avail = (vring_avail_t *)
+ map_user_mem (vui, msg.addr.avail_user_addr);
+
+ if ((vui->vrings[msg.state.index].desc == NULL) ||
+ (vui->vrings[msg.state.index].used == NULL) ||
+ (vui->vrings[msg.state.index].avail == NULL))
+ {
+ DBG_SOCK ("failed to map user memory for hw_if_index %d",
+ vui->hw_if_index);
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].log_guest_addr = msg.addr.log_guest_addr;
+ vui->vrings[msg.state.index].log_used =
+ (msg.addr.flags & (1 << VHOST_VRING_F_LOG)) ? 1 : 0;
+
+ /* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
+ the ring is initialized in an enabled state. */
+ if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
+ {
+ vui->vrings[msg.state.index].enabled = 1;
+ }
+
+ vui->vrings[msg.state.index].last_used_idx =
+ vui->vrings[msg.state.index].last_avail_idx =
+ vui->vrings[msg.state.index].used->idx;
+
+ /* tell driver that we don't want interrupts */
+ vui->vrings[msg.state.index].used->flags = VRING_USED_F_NO_NOTIFY;
+ break;
+
+ case VHOST_USER_SET_OWNER:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_OWNER", vui->hw_if_index);
+ break;
+
+ case VHOST_USER_RESET_OWNER:
+ DBG_SOCK ("if %d msg VHOST_USER_RESET_OWNER", vui->hw_if_index);
+ break;
+
+ case VHOST_USER_SET_VRING_CALL:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ /* if there is old fd, delete and close it */
+ if (vui->vrings[q].callfd_idx != ~0)
+ {
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ clib_file_del (&file_main, uf);
+ vui->vrings[q].callfd_idx = ~0;
+ }
+
+ if (!(msg.u64 & VHOST_USER_VRING_NOFD_MASK))
+ {
+ if (number_of_fds != 1)
+ {
+ DBG_SOCK ("More than one fd received !");
+ goto close_socket;
+ }
+
+ template.read_function = vhost_user_callfd_read_ready;
+ template.file_descriptor = fds[0];
+ template.private_data =
+ ((vui - vhost_user_main.vhost_user_interfaces) << 8) + q;
+ vui->vrings[q].callfd_idx = clib_file_add (&file_main, &template);
+ }
+ else
+ vui->vrings[q].callfd_idx = ~0;
+ break;
+
+ case VHOST_USER_SET_VRING_KICK:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ if (vui->vrings[q].kickfd_idx != ~0)
+ {
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vui->vrings[q].kickfd_idx);
+ clib_file_del (&file_main, uf);
+ vui->vrings[q].kickfd_idx = ~0;
+ }
+
+ if (!(msg.u64 & VHOST_USER_VRING_NOFD_MASK))
+ {
+ if (number_of_fds != 1)
+ {
+ DBG_SOCK ("More than one fd received !");
+ goto close_socket;
+ }
+
+ template.read_function = vhost_user_kickfd_read_ready;
+ template.file_descriptor = fds[0];
+ template.private_data =
+ (((uword) (vui - vhost_user_main.vhost_user_interfaces)) << 8) +
+ q;
+ vui->vrings[q].kickfd_idx = clib_file_add (&file_main, &template);
+ }
+ else
+ {
+ //When no kickfd is set, the queue is initialized as started
+ vui->vrings[q].kickfd_idx = ~0;
+ vui->vrings[q].started = 1;
+ }
+
+ break;
+
+ case VHOST_USER_SET_VRING_ERR:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR %d",
+ vui->hw_if_index, msg.u64);
+
+ q = (u8) (msg.u64 & 0xFF);
+
+ if (vui->vrings[q].errfd != -1)
+ close (vui->vrings[q].errfd);
+
+ if (!(msg.u64 & VHOST_USER_VRING_NOFD_MASK))
+ {
+ if (number_of_fds != 1)
+ goto close_socket;
+
+ vui->vrings[q].errfd = fds[0];
+ }
+ else
+ vui->vrings[q].errfd = -1;
+
+ break;
+
+ case VHOST_USER_SET_VRING_BASE:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+
+ vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
+ break;
+
+ case VHOST_USER_GET_VRING_BASE:
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_GET_VRING_BASE:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ /*
+ * Copy last_avail_idx from the vring before closing it because
+ * closing the vring also initializes the vring last_avail_idx
+ */
+ msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
+ msg.flags |= 4;
+ msg.size = sizeof (msg.state);
+
+ /* Spec says: Client must [...] stop ring upon receiving VHOST_USER_GET_VRING_BASE. */
+ vhost_user_vring_close (vui, msg.state.index);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
+ break;
+
+ case VHOST_USER_NONE:
+ DBG_SOCK ("if %d msg VHOST_USER_NONE", vui->hw_if_index);
+
+ break;
+
+ case VHOST_USER_SET_LOG_BASE:
+ {
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE", vui->hw_if_index);
+
+ if (msg.size != sizeof (msg.log))
+ {
+ DBG_SOCK
+ ("invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
+ msg.size, sizeof (msg.log));
+ goto close_socket;
+ }
+
+ if (!
+ (vui->protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD)))
+ {
+ DBG_SOCK
+ ("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
+ goto close_socket;
+ }
+
+ fd = fds[0];
+ /* align size to 2M page */
+ long page_sz = get_huge_page_size (fd);
+ ssize_t map_sz =
+ (msg.log.size + msg.log.offset + page_sz - 1) & ~(page_sz - 1);
+
+ vui->log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+
+ DBG_SOCK
+ ("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
+ map_sz, msg.log.offset, fd, vui->log_base_addr);
+
+ if (vui->log_base_addr == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+
+ vui->log_base_addr += msg.log.offset;
+ vui->log_size = msg.log.size;
+
+ msg.flags |= 4;
+ msg.size = sizeof (msg.u64);
+
+ break;
+ }
+
+ case VHOST_USER_SET_LOG_FD:
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_FD", vui->hw_if_index);
+
+ break;
+
+ case VHOST_USER_GET_PROTOCOL_FEATURES:
+ msg.flags |= 4;
+ msg.u64 = (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD) |
+ (1 << VHOST_USER_PROTOCOL_F_MQ);
+ msg.size = sizeof (msg.u64);
+ DBG_SOCK
+ ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES - reply 0x%016llx",
+ vui->hw_if_index, msg.u64);
+ break;
+
+ case VHOST_USER_SET_PROTOCOL_FEATURES:
+ DBG_SOCK
+ ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%016llx",
+ vui->hw_if_index, msg.u64);
+
+ vui->protocol_features = msg.u64;
+
+ break;
+
+ case VHOST_USER_GET_QUEUE_NUM:
+ msg.flags |= 4;
+ msg.u64 = VHOST_VRING_MAX_N;
+ msg.size = sizeof (msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_QUEUE_NUM - reply %d",
+ vui->hw_if_index, msg.u64);
+ break;
+
+ case VHOST_USER_SET_VRING_ENABLE:
+ DBG_SOCK ("if %d VHOST_USER_SET_VRING_ENABLE: %s queue %d",
+ vui->hw_if_index, msg.state.num ? "enable" : "disable",
+ msg.state.index);
+ if (msg.state.index >= VHOST_VRING_MAX_N)
+ {
+ DBG_SOCK ("invalid vring index VHOST_USER_SET_VRING_ENABLE:"
+ " %d >= %d", msg.state.index, VHOST_VRING_MAX_N);
+ goto close_socket;
+ }
+
+ vui->vrings[msg.state.index].enabled = msg.state.num;
+ break;
+
+ default:
+ DBG_SOCK ("unknown vhost-user message %d received. closing socket",
+ msg.request);
+ goto close_socket;
+ }
+
+ /* if we need to reply */
+ if (msg.flags & 4)
+ {
+ n =
+ send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
+ if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
+ {
+ DBG_SOCK ("could not send message response");
+ goto close_socket;
+ }
+ }
+
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+
+close_socket:
+ vhost_user_if_disconnect (vui);
+ vhost_user_update_iface_state (vui);
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_socket_error (clib_file_t * uf)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ DBG_SOCK ("socket error on if %d", vui->sw_if_index);
+ vlib_worker_thread_barrier_sync (vm);
+ vhost_user_if_disconnect (vui);
+ vhost_user_rx_thread_placement ();
+ vlib_worker_thread_barrier_release (vm);
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_socksvr_accept_ready (clib_file_t * uf)
+{
+ int client_fd, client_len;
+ struct sockaddr_un client;
+ clib_file_t template = { 0 };
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, uf->private_data);
+
+ client_len = sizeof (client);
+ client_fd = accept (uf->file_descriptor,
+ (struct sockaddr *) &client,
+ (socklen_t *) & client_len);
+
+ if (client_fd < 0)
+ return clib_error_return_unix (0, "accept");
+
+ DBG_SOCK ("New client socket for vhost interface %d", vui->sw_if_index);
+ template.read_function = vhost_user_socket_read;
+ template.error_function = vhost_user_socket_error;
+ template.file_descriptor = client_fd;
+ template.private_data = vui - vhost_user_main.vhost_user_interfaces;
+ vui->clib_file_index = clib_file_add (&file_main, &template);
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_init (vlib_main_t * vm)
+{
+ clib_error_t *error;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+
+ error = vlib_call_init_function (vm, ip4_init);
+ if (error)
+ return error;
+
+ vum->coalesce_frames = 32;
+ vum->coalesce_time = 1e-3;
+
+ vec_validate (vum->cpus, tm->n_vlib_mains - 1);
+
+ vhost_cpu_t *cpu;
+ vec_foreach (cpu, vum->cpus)
+ {
+ /* This is actually not necessary as validate already zeroes it
+ * Just keeping the loop here for later because I am lazy. */
+ cpu->rx_buffers_len = 0;
+ }
+
+ vum->random = random_default_seed ();
+
+ mhash_init_c_string (&vum->if_index_by_sock_name, sizeof (uword));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (vhost_user_init);
+
+static u8 *
+format_vhost_trace (u8 * s, va_list * va)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
+ CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
+ vhost_user_intf_t *vui = pool_elt_at_index (vum->vhost_user_interfaces,
+ t->device_index);
+
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
+
+ uword indent = format_get_indent (s);
+
+ s = format (s, "%U %U queue %d\n", format_white_space, indent,
+ format_vnet_sw_interface_name, vnm, sw, t->qid);
+
+ s = format (s, "%U virtio flags:\n", format_white_space, indent);
+#define _(n,i,st) \
+ if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
+ s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
+ foreach_virtio_trace_flags
+#undef _
+ s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
+ format_white_space, indent, t->first_desc_len);
+
+ s = format (s, "%U flags 0x%02x gso_type %u\n",
+ format_white_space, indent,
+ t->hdr.hdr.flags, t->hdr.hdr.gso_type);
+
+ if (vui->virtio_net_hdr_sz == 12)
+ s = format (s, "%U num_buff %u",
+ format_white_space, indent, t->hdr.num_buffers);
+
+ return s;
+}
+
+void
+vhost_user_rx_trace (vhost_trace_t * t,
+ vhost_user_intf_t * vui, u16 qid,
+ vlib_buffer_t * b, vhost_user_vring_t * txvq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u32 last_avail_idx = txvq->last_avail_idx;
+ u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
+ vring_desc_t *hdr_desc = 0;
+ virtio_net_hdr_mrg_rxbuf_t *hdr;
+ u32 hint = 0;
+
+ memset (t, 0, sizeof (*t));
+ t->device_index = vui - vum->vhost_user_interfaces;
+ t->qid = qid;
+
+ hdr_desc = &txvq->desc[desc_current];
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
+ /* Header is the first here */
+ hdr_desc = map_guest_mem (vui, txvq->desc[desc_current].addr, &hint);
+ }
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
+ }
+ if (!(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
+ !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
+ }
+
+ t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
+
+ if (!hdr_desc || !(hdr = map_guest_mem (vui, hdr_desc->addr, &hint)))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_MAP_ERROR;
+ }
+ else
+ {
+ u32 len = vui->virtio_net_hdr_sz;
+ memcpy (&t->hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
+ }
+}
+
+static inline void
+vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u64 x = 1;
+ int fd = UNIX_GET_FD (vq->callfd_idx);
+ int rv;
+
+ rv = write (fd, &x, sizeof (x));
+ if (rv <= 0)
+ {
+ clib_unix_warning
+ ("Error: Could not write to unix socket for callfd %d", fd);
+ return;
+ }
+
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
+}
+
+static_always_inline u32
+vhost_user_input_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
+ u16 copy_len, u32 * map_hint)
+{
+ void *src0, *src1, *src2, *src3;
+ if (PREDICT_TRUE (copy_len >= 4))
+ {
+ if (PREDICT_FALSE (!(src2 = map_guest_mem (vui, cpy[0].src, map_hint))))
+ return 1;
+ if (PREDICT_FALSE (!(src3 = map_guest_mem (vui, cpy[1].src, map_hint))))
+ return 1;
+
+ while (PREDICT_TRUE (copy_len >= 4))
+ {
+ src0 = src2;
+ src1 = src3;
+
+ if (PREDICT_FALSE
+ (!(src2 = map_guest_mem (vui, cpy[2].src, map_hint))))
+ return 1;
+ if (PREDICT_FALSE
+ (!(src3 = map_guest_mem (vui, cpy[3].src, map_hint))))
+ return 1;
+
+ CLIB_PREFETCH (src2, 64, LOAD);
+ CLIB_PREFETCH (src3, 64, LOAD);
+
+ clib_memcpy ((void *) cpy[0].dst, src0, cpy[0].len);
+ clib_memcpy ((void *) cpy[1].dst, src1, cpy[1].len);
+ copy_len -= 2;
+ cpy += 2;
+ }
+ }
+ while (copy_len)
+ {
+ if (PREDICT_FALSE (!(src0 = map_guest_mem (vui, cpy->src, map_hint))))
+ return 1;
+ clib_memcpy ((void *) cpy->dst, src0, cpy->len);
+ copy_len -= 1;
+ cpy += 1;
+ }
+ return 0;
+}
+
+/**
+ * Try to discard packets from the tx ring (VPP RX path).
+ * Returns the number of discarded packets.
+ */
+u32
+vhost_user_rx_discard_packet (vlib_main_t * vm,
+ vhost_user_intf_t * vui,
+ vhost_user_vring_t * txvq, u32 discard_max)
+{
+ /*
+ * On the RX side, each packet corresponds to one descriptor
+ * (it is the same whether it is a shallow descriptor, chained, or indirect).
+ * Therefore, discarding a packet is like discarding a descriptor.
+ */
+ u32 discarded_packets = 0;
+ u32 avail_idx = txvq->avail->idx;
+ while (discarded_packets != discard_max)
+ {
+ if (avail_idx == txvq->last_avail_idx)
+ goto out;
+
+ u16 desc_chain_head =
+ txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
+ txvq->last_avail_idx++;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
+ desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & txvq->qsz_mask]);
+ txvq->last_used_idx++;
+ discarded_packets++;
+ }
+
+out:
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+ return discarded_packets;
+}
+
+/*
+ * In case of overflow, we need to rewind the array of allocated buffers.
+ */
+static void
+vhost_user_input_rewind_buffers (vlib_main_t * vm,
+ vhost_cpu_t * cpu, vlib_buffer_t * b_head)
+{
+ u32 bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
+ vlib_buffer_t *b_current = vlib_get_buffer (vm, bi_current);
+ b_current->current_length = 0;
+ b_current->flags = 0;
+ while (b_current != b_head)
+ {
+ cpu->rx_buffers_len++;
+ bi_current = cpu->rx_buffers[cpu->rx_buffers_len];
+ b_current = vlib_get_buffer (vm, bi_current);
+ b_current->current_length = 0;
+ b_current->flags = 0;
+ }
+ cpu->rx_buffers_len++;
+}
+
+static u32
+vhost_user_if_input (vlib_main_t * vm,
+ vhost_user_main_t * vum,
+ vhost_user_intf_t * vui,
+ u16 qid, vlib_node_runtime_t * node,
+ vnet_hw_interface_rx_mode mode)
+{
+ vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+ u16 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u16 n_left;
+ u32 n_left_to_next, *to_next;
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ u32 n_trace = vlib_get_trace_count (vm, node);
+ u32 map_hint = 0;
+ u16 thread_index = vlib_get_thread_index ();
+ u16 copy_len = 0;
+
+ {
+ /* do we have pending interrupts ? */
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_VRING_IDX_RX (qid)];
+ f64 now = vlib_time_now (vm);
+
+ if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
+ vhost_user_send_call (vm, txvq);
+
+ if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
+ vhost_user_send_call (vm, rxvq);
+ }
+
+ /*
+ * For adaptive mode, it is optimized to reduce interrupts.
+ * If the scheduler switches the input node to polling due
+ * to burst of traffic, we tell the driver no interrupt.
+ * When the traffic subsides, the scheduler switches the node back to
+ * interrupt mode. We must tell the driver we want interrupt.
+ */
+ if (PREDICT_FALSE (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
+ {
+ if ((node->flags &
+ VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE) ||
+ !(node->flags &
+ VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
+ /* Tell driver we want notification */
+ txvq->used->flags = 0;
+ else
+ /* Tell driver we don't want notification */
+ txvq->used->flags = VRING_USED_F_NO_NOTIFY;
+ }
+
+ if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
+ return 0;
+
+ n_left = (u16) (txvq->avail->idx - txvq->last_avail_idx);
+
+ /* nothing to do */
+ if (PREDICT_FALSE (n_left == 0))
+ return 0;
+
+ if (PREDICT_FALSE (!vui->admin_up || !(txvq->enabled)))
+ {
+ /*
+ * Discard input packet if interface is admin down or vring is not
+ * enabled.
+ * "For example, for a networking device, in the disabled state
+ * client must not supply any new RX packets, but must process
+ * and discard any TX packets."
+ */
+ vhost_user_rx_discard_packet (vm, vui, txvq,
+ VHOST_USER_DOWN_DISCARD_COUNT);
+ return 0;
+ }
+
+ if (PREDICT_FALSE (n_left == (txvq->qsz_mask + 1)))
+ {
+ /*
+ * Informational error logging when VPP is not
+ * receiving packets fast enough.
+ */
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
+ }
+
+ if (n_left > VLIB_FRAME_SIZE)
+ n_left = VLIB_FRAME_SIZE;
+
+ /*
+ * For small packets (<2kB), we will not need more than one vlib buffer
+ * per packet. In case packets are bigger, we will just yeld at some point
+ * in the loop and come back later. This is not an issue as for big packet,
+ * processing cost really comes from the memory copy.
+ * The assumption is that big packets will fit in 40 buffers.
+ */
+ if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len < n_left + 1 ||
+ vum->cpus[thread_index].rx_buffers_len < 40))
+ {
+ u32 curr_len = vum->cpus[thread_index].rx_buffers_len;
+ vum->cpus[thread_index].rx_buffers_len +=
+ vlib_buffer_alloc_from_free_list (vm,
+ vum->cpus[thread_index].rx_buffers +
+ curr_len,
+ VHOST_USER_RX_BUFFERS_N - curr_len,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (PREDICT_FALSE
+ (vum->cpus[thread_index].rx_buffers_len <
+ VHOST_USER_RX_BUFFER_STARVATION))
+ {
+ /* In case of buffer starvation, discard some packets from the queue
+ * and log the event.
+ * We keep doing best effort for the remaining packets. */
+ u32 flush = (n_left + 1 > vum->cpus[thread_index].rx_buffers_len) ?
+ n_left + 1 - vum->cpus[thread_index].rx_buffers_len : 1;
+ flush = vhost_user_rx_discard_packet (vm, vui, txvq, flush);
+
+ n_left -= flush;
+ vlib_increment_simple_counter (vnet_main.
+ interface_main.sw_if_counters +
+ VNET_INTERFACE_COUNTER_DROP,
+ vlib_get_thread_index (),
+ vui->sw_if_index, flush);
+
+ vlib_error_count (vm, vhost_user_input_node.index,
+ VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
+ }
+ }
+
+ while (n_left > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b_head, *b_current;
+ u32 bi_current;
+ u16 desc_current;
+ u32 desc_data_offset;
+ vring_desc_t *desc_table = txvq->desc;
+
+ if (PREDICT_FALSE (vum->cpus[thread_index].rx_buffers_len <= 1))
+ {
+ /* Not enough rx_buffers
+ * Note: We yeld on 1 so we don't need to do an additional
+ * check for the next buffer prefetch.
+ */
+ n_left = 0;
+ break;
+ }
+
+ desc_current =
+ txvq->avail->ring[txvq->last_avail_idx & txvq->qsz_mask];
+ vum->cpus[thread_index].rx_buffers_len--;
+ bi_current = (vum->cpus[thread_index].rx_buffers)
+ [vum->cpus[thread_index].rx_buffers_len];
+ b_head = b_current = vlib_get_buffer (vm, bi_current);
+ to_next[0] = bi_current; //We do that now so we can forget about bi_current
+ to_next++;
+ n_left_to_next--;
+
+ vlib_prefetch_buffer_with_index (vm,
+ (vum->
+ cpus[thread_index].rx_buffers)
+ [vum->cpus[thread_index].
+ rx_buffers_len - 1], LOAD);
+
+ /* Just preset the used descriptor id and length for later */
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].id =
+ desc_current;
+ txvq->used->ring[txvq->last_used_idx & txvq->qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx &
+ txvq->qsz_mask]);
+
+ /* The buffer should already be initialized */
+ b_head->total_length_not_including_first_buffer = 0;
+ b_head->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ if (PREDICT_FALSE (n_trace))
+ {
+ //TODO: next_index is not exactly known at that point
+ vlib_trace_buffer (vm, node, next_index, b_head,
+ /* follow_chain */ 0);
+ vhost_trace_t *t0 =
+ vlib_add_trace (vm, node, b_head, sizeof (t0[0]));
+ vhost_user_rx_trace (t0, vui, qid, b_head, txvq);
+ n_trace--;
+ vlib_set_trace_count (vm, node, n_trace);
+ }
+
+ /* This depends on the setup but is very consistent
+ * So I think the CPU branch predictor will make a pretty good job
+ * at optimizing the decision. */
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ desc_table = map_guest_mem (vui, txvq->desc[desc_current].addr,
+ &map_hint);
+ desc_current = 0;
+ if (PREDICT_FALSE (desc_table == 0))
+ {
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
+ goto out;
+ }
+ }
+
+ if (PREDICT_TRUE (vui->is_any_layout) ||
+ (!(desc_table[desc_current].flags & VIRTQ_DESC_F_NEXT)))
+ {
+ /* ANYLAYOUT or single buffer */
+ desc_data_offset = vui->virtio_net_hdr_sz;
+ }
+ else
+ {
+ /* CSR case without ANYLAYOUT, skip 1st buffer */
+ desc_data_offset = desc_table[desc_current].len;
+ }
+
+ while (1)
+ {
+ /* Get more input if necessary. Or end of packet. */
+ if (desc_data_offset == desc_table[desc_current].len)
+ {
+ if (PREDICT_FALSE (desc_table[desc_current].flags &
+ VIRTQ_DESC_F_NEXT))
+ {
+ desc_current = desc_table[desc_current].next;
+ desc_data_offset = 0;
+ }
+ else
+ {
+ goto out;
+ }
+ }
+
+ /* Get more output if necessary. Or end of packet. */
+ if (PREDICT_FALSE
+ (b_current->current_length == VLIB_BUFFER_DATA_SIZE))
+ {
+ if (PREDICT_FALSE
+ (vum->cpus[thread_index].rx_buffers_len == 0))
+ {
+ /* Cancel speculation */
+ to_next--;
+ n_left_to_next++;
+
+ /*
+ * Checking if there are some left buffers.
+ * If not, just rewind the used buffers and stop.
+ * Note: Scheduled copies are not cancelled. This is
+ * not an issue as they would still be valid. Useless,
+ * but valid.
+ */
+ vhost_user_input_rewind_buffers (vm,
+ &vum->cpus
+ [thread_index],
+ b_head);
+ n_left = 0;
+ goto stop;
+ }
+
+ /* Get next output */
+ vum->cpus[thread_index].rx_buffers_len--;
+ u32 bi_next =
+ (vum->cpus[thread_index].rx_buffers)[vum->cpus
+ [thread_index].rx_buffers_len];
+ b_current->next_buffer = bi_next;
+ b_current->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ bi_current = bi_next;
+ b_current = vlib_get_buffer (vm, bi_current);
+ }
+
+ /* Prepare a copy order executed later for the data */
+ vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
+ copy_len++;
+ u32 desc_data_l =
+ desc_table[desc_current].len - desc_data_offset;
+ cpy->len = VLIB_BUFFER_DATA_SIZE - b_current->current_length;
+ cpy->len = (cpy->len > desc_data_l) ? desc_data_l : cpy->len;
+ cpy->dst = (uword) (vlib_buffer_get_current (b_current) +
+ b_current->current_length);
+ cpy->src = desc_table[desc_current].addr + desc_data_offset;
+
+ desc_data_offset += cpy->len;
+
+ b_current->current_length += cpy->len;
+ b_head->total_length_not_including_first_buffer += cpy->len;
+ }
+
+ out:
+ CLIB_PREFETCH (&n_left, sizeof (n_left), LOAD);
+
+ n_rx_bytes += b_head->total_length_not_including_first_buffer;
+ n_rx_packets++;
+
+ b_head->total_length_not_including_first_buffer -=
+ b_head->current_length;
+
+ /* consume the descriptor and return it as used */
+ txvq->last_avail_idx++;
+ txvq->last_used_idx++;
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
+
+ vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
+ vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ b_head->error = 0;
+
+ {
+ u32 next0 = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (vui->sw_if_index, &next0,
+ b_head);
+
+ u32 bi = to_next[-1]; //Cannot use to_next[-1] in the macro
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi, next0);
+ }
+
+ n_left--;
+
+ /*
+ * Although separating memory copies from virtio ring parsing
+ * is beneficial, we can offer to perform the copies from time
+ * to time in order to free some space in the ring.
+ */
+ if (PREDICT_FALSE (copy_len >= VHOST_USER_RX_COPY_THRESHOLD))
+ {
+ if (PREDICT_FALSE
+ (vhost_user_input_copy (vui, vum->cpus[thread_index].copy,
+ copy_len, &map_hint)))
+ {
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
+ }
+ copy_len = 0;
+
+ /* give buffers back to driver */
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+ }
+ }
+ stop:
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ /* Do the memory copies */
+ if (PREDICT_FALSE
+ (vhost_user_input_copy (vui, vum->cpus[thread_index].copy,
+ copy_len, &map_hint)))
+ {
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
+ }
+
+ /* give buffers back to driver */
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+
+ /* interrupt (call) handling */
+ if ((txvq->callfd_idx != ~0) &&
+ !(txvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ {
+ txvq->n_since_last_int += n_rx_packets;
+
+ if (txvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, txvq);
+ }
+
+ /* increase rx counters */
+ vlib_increment_combined_counter
+ (vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ vlib_get_thread_index (), vui->sw_if_index, n_rx_packets, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, n_rx_packets);
+
+ return n_rx_packets;
+}
+
+static uword
+vhost_user_input (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * f)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ uword n_rx_packets = 0;
+ vhost_user_intf_t *vui;
+ vnet_device_input_runtime_t *rt =
+ (vnet_device_input_runtime_t *) node->runtime_data;
+ vnet_device_and_queue_t *dq;
+
+ vec_foreach (dq, rt->devices_and_queues)
+ {
+ if (clib_smp_swap (&dq->interrupt_pending, 0) ||
+ (node->state == VLIB_NODE_STATE_POLLING))
+ {
+ vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
+ n_rx_packets = vhost_user_if_input (vm, vum, vui, dq->queue_id, node,
+ dq->mode);
+ }
+ }
+
+ return n_rx_packets;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vhost_user_input_node) = {
+ .function = vhost_user_input,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .name = "vhost-user-input",
+ .sibling_of = "device-input",
+
+ /* Will be enabled if/when hardware is detected. */
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .format_buffer = format_ethernet_header_with_length,
+ .format_trace = format_vhost_trace,
+
+ .n_errors = VHOST_USER_INPUT_FUNC_N_ERROR,
+ .error_strings = vhost_user_input_func_error_strings,
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (vhost_user_input_node, vhost_user_input)
+/* *INDENT-ON* */
+
+
+void
+vhost_user_tx_trace (vhost_trace_t * t,
+ vhost_user_intf_t * vui, u16 qid,
+ vlib_buffer_t * b, vhost_user_vring_t * rxvq)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u32 last_avail_idx = rxvq->last_avail_idx;
+ u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
+ vring_desc_t *hdr_desc = 0;
+ u32 hint = 0;
+
+ memset (t, 0, sizeof (*t));
+ t->device_index = vui - vum->vhost_user_interfaces;
+ t->qid = qid;
+
+ hdr_desc = &rxvq->desc[desc_current];
+ if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
+ /* Header is the first here */
+ hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
+ }
+ if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
+ }
+ if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
+ !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
+ }
+
+ t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
+}
+
+static_always_inline u32
+vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
+ u16 copy_len, u32 * map_hint)
+{
+ void *dst0, *dst1, *dst2, *dst3;
+ if (PREDICT_TRUE (copy_len >= 4))
+ {
+ if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
+ return 1;
+ if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
+ return 1;
+ while (PREDICT_TRUE (copy_len >= 4))
+ {
+ dst0 = dst2;
+ dst1 = dst3;
+
+ if (PREDICT_FALSE
+ (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
+ return 1;
+ if (PREDICT_FALSE
+ (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
+ return 1;
+
+ CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
+ CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
+
+ clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
+ clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
+
+ vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
+ vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
+ copy_len -= 2;
+ cpy += 2;
+ }
+ }
+ while (copy_len)
+ {
+ if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
+ return 1;
+ clib_memcpy (dst0, (void *) cpy->src, cpy->len);
+ vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
+ copy_len -= 1;
+ cpy += 1;
+ }
+ return 0;
+}
+
+
+static uword
+vhost_user_tx (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
+{
+ u32 *buffers = vlib_frame_args (frame);
+ u32 n_left = frame->n_vectors;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
+ u32 qid = ~0;
+ vhost_user_vring_t *rxvq;
+ u8 error;
+ u32 thread_index = vlib_get_thread_index ();
+ u32 map_hint = 0;
+ u8 retry = 8;
+ u16 copy_len;
+ u16 tx_headers_len;
+
+ if (PREDICT_FALSE (!vui->admin_up))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_DOWN;
+ goto done3;
+ }
+
+ if (PREDICT_FALSE (!vui->is_up))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
+ goto done3;
+ }
+
+ qid =
+ VHOST_VRING_IDX_RX (*vec_elt_at_index
+ (vui->per_cpu_tx_qid, thread_index));
+ rxvq = &vui->vrings[qid];
+ if (PREDICT_FALSE (vui->use_tx_spinlock))
+ vhost_user_vring_lock (vui, qid);
+
+retry:
+ error = VHOST_USER_TX_FUNC_ERROR_NONE;
+ tx_headers_len = 0;
+ copy_len = 0;
+ while (n_left > 0)
+ {
+ vlib_buffer_t *b0, *current_b0;
+ u16 desc_head, desc_index, desc_len;
+ vring_desc_t *desc_table;
+ uword buffer_map_addr;
+ u32 buffer_len;
+ u16 bytes_left;
+
+ if (PREDICT_TRUE (n_left > 1))
+ vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
+
+ b0 = vlib_get_buffer (vm, buffers[0]);
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vum->cpus[thread_index].current_trace =
+ vlib_add_trace (vm, node, b0,
+ sizeof (*vum->cpus[thread_index].current_trace));
+ vhost_user_tx_trace (vum->cpus[thread_index].current_trace,
+ vui, qid / 2, b0, rxvq);
+ }
+
+ if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+
+ desc_table = rxvq->desc;
+ desc_head = desc_index =
+ rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
+
+ /* Go deeper in case of indirect descriptor
+ * I don't know of any driver providing indirect for RX. */
+ if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+ goto done;
+ }
+ if (PREDICT_FALSE
+ (!(desc_table =
+ map_guest_mem (vui, rxvq->desc[desc_index].addr,
+ &map_hint))))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ desc_index = 0;
+ }
+
+ desc_len = vui->virtio_net_hdr_sz;
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+
+ {
+ // Get a header from the header array
+ virtio_net_hdr_mrg_rxbuf_t *hdr =
+ &vum->cpus[thread_index].tx_headers[tx_headers_len];
+ tx_headers_len++;
+ hdr->hdr.flags = 0;
+ hdr->hdr.gso_type = 0;
+ hdr->num_buffers = 1; //This is local, no need to check
+
+ // Prepare a copy order executed later for the header
+ vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
+ copy_len++;
+ cpy->len = vui->virtio_net_hdr_sz;
+ cpy->dst = buffer_map_addr;
+ cpy->src = (uword) hdr;
+ }
+
+ buffer_map_addr += vui->virtio_net_hdr_sz;
+ buffer_len -= vui->virtio_net_hdr_sz;
+ bytes_left = b0->current_length;
+ current_b0 = b0;
+ while (1)
+ {
+ if (buffer_len == 0)
+ { //Get new output
+ if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
+ {
+ //Next one is chained
+ desc_index = desc_table[desc_index].next;
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+ }
+ else if (vui->virtio_net_hdr_sz == 12) //MRG is available
+ {
+ virtio_net_hdr_mrg_rxbuf_t *hdr =
+ &vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
+
+ //Move from available to used buffer
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
+ desc_head;
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
+ desc_len;
+ vhost_user_log_dirty_ring (vui, rxvq,
+ ring[rxvq->last_used_idx &
+ rxvq->qsz_mask]);
+
+ rxvq->last_avail_idx++;
+ rxvq->last_used_idx++;
+ hdr->num_buffers++;
+ desc_len = 0;
+
+ if (PREDICT_FALSE
+ (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ //Dequeue queued descriptors for this packet
+ rxvq->last_used_idx -= hdr->num_buffers - 1;
+ rxvq->last_avail_idx -= hdr->num_buffers - 1;
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+
+ desc_table = rxvq->desc;
+ desc_head = desc_index =
+ rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
+ {
+ //It is seriously unlikely that a driver will put indirect descriptor
+ //after non-indirect descriptor.
+ if (PREDICT_FALSE
+ (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
+ goto done;
+ }
+ if (PREDICT_FALSE
+ (!(desc_table =
+ map_guest_mem (vui,
+ rxvq->desc[desc_index].addr,
+ &map_hint))))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ desc_index = 0;
+ }
+ buffer_map_addr = desc_table[desc_index].addr;
+ buffer_len = desc_table[desc_index].len;
+ }
+ else
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
+ goto done;
+ }
+ }
+
+ {
+ vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
+ copy_len++;
+ cpy->len = bytes_left;
+ cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
+ cpy->dst = buffer_map_addr;
+ cpy->src = (uword) vlib_buffer_get_current (current_b0) +
+ current_b0->current_length - bytes_left;
+
+ bytes_left -= cpy->len;
+ buffer_len -= cpy->len;
+ buffer_map_addr += cpy->len;
+ desc_len += cpy->len;
+
+ CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ // Check if vlib buffer has more data. If not, get more or break.
+ if (PREDICT_TRUE (!bytes_left))
+ {
+ if (PREDICT_FALSE
+ (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
+ bytes_left = current_b0->current_length;
+ }
+ else
+ {
+ //End of packet
+ break;
+ }
+ }
+ }
+
+ //Move from available to used ring
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
+ rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
+ vhost_user_log_dirty_ring (vui, rxvq,
+ ring[rxvq->last_used_idx & rxvq->qsz_mask]);
+ rxvq->last_avail_idx++;
+ rxvq->last_used_idx++;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ vum->cpus[thread_index].current_trace->hdr =
+ vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
+ }
+
+ n_left--; //At the end for error counting when 'goto done' is invoked
+
+ /*
+ * Do the copy periodically to prevent
+ * vum->cpus[thread_index].copy array overflow and corrupt memory
+ */
+ if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD))
+ {
+ if (PREDICT_FALSE
+ (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
+ copy_len, &map_hint)))
+ {
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
+ }
+ copy_len = 0;
+
+ /* give buffers back to driver */
+ CLIB_MEMORY_BARRIER ();
+ rxvq->used->idx = rxvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, rxvq, idx);
+ }
+ buffers++;
+ }
+
+done:
+ //Do the memory copies
+ if (PREDICT_FALSE
+ (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
+ copy_len, &map_hint)))
+ {
+ vlib_error_count (vm, node->node_index,
+ VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ rxvq->used->idx = rxvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, rxvq, idx);
+
+ /*
+ * When n_left is set, error is always set to something too.
+ * In case error is due to lack of remaining buffers, we go back up and
+ * retry.
+ * The idea is that it is better to waste some time on packets
+ * that have been processed already than dropping them and get
+ * more fresh packets with a good likelyhood that they will be dropped too.
+ * This technique also gives more time to VM driver to pick-up packets.
+ * In case the traffic flows from physical to virtual interfaces, this
+ * technique will end-up leveraging the physical NIC buffer in order to
+ * absorb the VM's CPU jitter.
+ */
+ if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
+ {
+ retry--;
+ goto retry;
+ }
+
+ /* interrupt (call) handling */
+ if ((rxvq->callfd_idx != ~0) &&
+ !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ {
+ rxvq->n_since_last_int += frame->n_vectors - n_left;
+
+ if (rxvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, rxvq);
+ }
+
+ vhost_user_vring_unlock (vui, qid);
+
+done3:
+ if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
+ {
+ vlib_error_count (vm, node->node_index, error, n_left);
+ vlib_increment_simple_counter
+ (vnet_main.interface_main.sw_if_counters
+ + VNET_INTERFACE_COUNTER_DROP,
+ thread_index, vui->sw_if_index, n_left);
+ }
+
+ vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
+ return frame->n_vectors;
+}
+
+static uword
+vhost_user_send_interrupt_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ vhost_user_intf_t *vui;
+ f64 timeout = 3153600000.0 /* 100 years */ ;
+ uword event_type, *event_data = 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ u16 *queue;
+ f64 now, poll_time_remaining;
+ f64 next_timeout;
+ u8 stop_timer = 0;
+
+ while (1)
+ {
+ poll_time_remaining =
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ /*
+ * Use the remaining timeout if it is less than coalesce time to avoid
+ * resetting the existing timer in the middle of expiration
+ */
+ timeout = poll_time_remaining;
+ if (vlib_process_suspend_time_is_zero (timeout) ||
+ (timeout > vum->coalesce_time))
+ timeout = vum->coalesce_time;
+
+ now = vlib_time_now (vm);
+ switch (event_type)
+ {
+ case VHOST_USER_EVENT_STOP_TIMER:
+ stop_timer = 1;
+ break;
+
+ case VHOST_USER_EVENT_START_TIMER:
+ stop_timer = 0;
+ if (!vlib_process_suspend_time_is_zero (poll_time_remaining))
+ break;
+ /* fall through */
+
+ case ~0:
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ next_timeout = timeout;
+ vec_foreach (queue, vui->rx_queues)
+ {
+ vhost_user_vring_t *rxvq =
+ &vui->vrings[VHOST_VRING_IDX_RX (*queue)];
+ vhost_user_vring_t *txvq =
+ &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
+
+ if (txvq->n_since_last_int)
+ {
+ if (now >= txvq->int_deadline)
+ vhost_user_send_call (vm, txvq);
+ else
+ next_timeout = txvq->int_deadline - now;
+ }
+
+ if (rxvq->n_since_last_int)
+ {
+ if (now >= rxvq->int_deadline)
+ vhost_user_send_call (vm, rxvq);
+ else
+ next_timeout = rxvq->int_deadline - now;
+ }
+
+ if ((next_timeout < timeout) && (next_timeout > 0.0))
+ timeout = next_timeout;
+ }
+ });
+ /* *INDENT-ON* */
+ break;
+
+ default:
+ clib_warning ("BUG: unhandled event type %d", event_type);
+ break;
+ }
+ /* No less than 1 millisecond */
+ if (timeout < 1e-3)
+ timeout = 1e-3;
+ if (stop_timer)
+ timeout = 3153600000.0;
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vhost_user_send_interrupt_node,static) = {
+ .function = vhost_user_send_interrupt_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vhost-user-send-interrupt-process",
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+vhost_user_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index,
+ u32 qid, vnet_hw_interface_rx_mode mode)
+{
+ vlib_main_t *vm = vnm->vlib_main;
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
+ vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
+
+ if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
+ (mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE))
+ {
+ if (txvq->kickfd_idx == ~0)
+ {
+ // We cannot support interrupt mode if the driver opts out
+ return clib_error_return (0, "Driver does not support interrupt");
+ }
+ if (txvq->mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ {
+ vum->ifq_count++;
+ // Start the timer if this is the first encounter on interrupt
+ // interface/queue
+ if ((vum->ifq_count == 1) &&
+ (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
+ vlib_process_signal_event (vm,
+ vhost_user_send_interrupt_node.index,
+ VHOST_USER_EVENT_START_TIMER, 0);
+ }
+ }
+ else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ {
+ if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
+ (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)) &&
+ vum->ifq_count)
+ {
+ vum->ifq_count--;
+ // Stop the timer if there is no more interrupt interface/queue
+ if ((vum->ifq_count == 0) &&
+ (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
+ vlib_process_signal_event (vm,
+ vhost_user_send_interrupt_node.index,
+ VHOST_USER_EVENT_STOP_TIMER, 0);
+ }
+ }
+
+ txvq->mode = mode;
+ if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
+ txvq->used->flags = VRING_USED_F_NO_NOTIFY;
+ else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
+ (mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT))
+ txvq->used->flags = 0;
+ else
+ {
+ clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
+ hw_if_index, qid);
+ return clib_error_return (0, "unsupported");
+ }
+
+ return 0;
+}
+
+static clib_error_t *
+vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
+{
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ pool_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
+
+ vui->admin_up = is_up;
+
+ if (is_up && vui->is_up)
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+
+ return /* no error */ 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (vhost_user_dev_class,static) = {
+ .name = "vhost-user",
+ .tx_function = vhost_user_tx,
+ .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
+ .tx_function_error_strings = vhost_user_tx_func_error_strings,
+ .format_device_name = format_vhost_user_interface_name,
+ .name_renumber = vhost_user_name_renumber,
+ .admin_up_down_function = vhost_user_interface_admin_up_down,
+ .rx_mode_change_function = vhost_user_interface_rx_mode_change,
+ .format_tx_trace = format_vhost_trace,
+};
+
+VLIB_DEVICE_TX_FUNCTION_MULTIARCH (vhost_user_dev_class,
+ vhost_user_tx)
+/* *INDENT-ON* */
+
+static uword
+vhost_user_process (vlib_main_t * vm,
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ struct sockaddr_un sun;
+ int sockfd;
+ clib_file_t template = { 0 };
+ f64 timeout = 3153600000.0 /* 100 years */ ;
+ uword *event_data = 0;
+
+ sockfd = -1;
+ sun.sun_family = AF_UNIX;
+ template.read_function = vhost_user_socket_read;
+ template.error_function = vhost_user_socket_error;
+
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+ vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ timeout = 3.0;
+
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+
+ if (vui->unix_server_index == ~0) { //Nothing to do for server sockets
+ if (vui->clib_file_index == ~0)
+ {
+ if ((sockfd < 0) &&
+ ((sockfd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0))
+ {
+ /*
+ * 1st time error or new error for this interface,
+ * spit out the message and record the error
+ */
+ if (!vui->sock_errno || (vui->sock_errno != errno))
+ {
+ clib_unix_warning
+ ("Error: Could not open unix socket for %s",
+ vui->sock_filename);
+ vui->sock_errno = errno;
+ }
+ continue;
+ }
+
+ /* try to connect */
+ strncpy (sun.sun_path, (char *) vui->sock_filename,
+ sizeof (sun.sun_path) - 1);
+
+ /* Avoid hanging VPP if the other end does not accept */
+ if (fcntl(sockfd, F_SETFL, O_NONBLOCK) < 0)
+ clib_unix_warning ("fcntl");
+
+ if (connect (sockfd, (struct sockaddr *) &sun,
+ sizeof (struct sockaddr_un)) == 0)
+ {
+ /* Set the socket to blocking as it was before */
+ if (fcntl(sockfd, F_SETFL, 0) < 0)
+ clib_unix_warning ("fcntl2");
+
+ vui->sock_errno = 0;
+ template.file_descriptor = sockfd;
+ template.private_data =
+ vui - vhost_user_main.vhost_user_interfaces;
+ vui->clib_file_index = clib_file_add (&file_main, &template);
+
+ /* This sockfd is considered consumed */
+ sockfd = -1;
+ }
+ else
+ {
+ vui->sock_errno = errno;
+ }
+ }
+ else
+ {
+ /* check if socket is alive */
+ int error = 0;
+ socklen_t len = sizeof (error);
+ int fd = UNIX_GET_FD(vui->clib_file_index);
+ int retval =
+ getsockopt (fd, SOL_SOCKET, SO_ERROR, &error, &len);
+
+ if (retval)
+ {
+ DBG_SOCK ("getsockopt returned %d", retval);
+ vhost_user_if_disconnect (vui);
+ }
+ }
+ }
+ });
+ /* *INDENT-ON* */
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (vhost_user_process_node,static) = {
+ .function = vhost_user_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "vhost-user-process",
+};
+/* *INDENT-ON* */
+
+/**
+ * Disables and reset interface structure.
+ * It can then be either init again, or removed from used interfaces.
+ */
+static void
+vhost_user_term_if (vhost_user_intf_t * vui)
+{
+ int q;
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ // disconnect interface sockets
+ vhost_user_if_disconnect (vui);
+ vhost_user_update_iface_state (vui);
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ clib_mem_free ((void *) vui->vring_locks[q]);
+ }
+
+ if (vui->unix_server_index != ~0)
+ {
+ //Close server socket
+ clib_file_t *uf = pool_elt_at_index (file_main.file_pool,
+ vui->unix_server_index);
+ clib_file_del (&file_main, uf);
+ vui->unix_server_index = ~0;
+ unlink (vui->sock_filename);
+ }
+
+ mhash_unset (&vum->if_index_by_sock_name, vui->sock_filename,
+ &vui->if_index);
+}
+
+int
+vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ int rv = 0;
+ vnet_hw_interface_t *hwif;
+ u16 *queue;
+
+ if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
+ hwif->dev_class_index != vhost_user_dev_class.index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ DBG_SOCK ("Deleting vhost-user interface %s (instance %d)",
+ hwif->name, hwif->dev_instance);
+
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
+
+ vec_foreach (queue, vui->rx_queues)
+ {
+ vhost_user_vring_t *txvq;
+
+ txvq = &vui->vrings[VHOST_VRING_IDX_TX (*queue)];
+ if ((vum->ifq_count > 0) &&
+ ((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
+ (txvq->mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE)))
+ {
+ vum->ifq_count--;
+ // Stop the timer if there is no more interrupt interface/queue
+ if ((vum->ifq_count == 0) &&
+ (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
+ {
+ vlib_process_signal_event (vm,
+ vhost_user_send_interrupt_node.index,
+ VHOST_USER_EVENT_STOP_TIMER, 0);
+ break;
+ }
+ }
+ }
+
+ // Disable and reset interface
+ vhost_user_term_if (vui);
+
+ // Reset renumbered iface
+ if (hwif->dev_instance <
+ vec_len (vum->show_dev_instance_by_real_dev_instance))
+ vum->show_dev_instance_by_real_dev_instance[hwif->dev_instance] = ~0;
+
+ // Delete ethernet interface
+ ethernet_delete_interface (vnm, vui->hw_if_index);
+
+ // Back to pool
+ pool_put (vum->vhost_user_interfaces, vui);
+
+ return rv;
+}
+
+static clib_error_t *
+vhost_user_exit (vlib_main_t * vm)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+
+ vlib_worker_thread_barrier_sync (vlib_get_main ());
+ /* *INDENT-OFF* */
+ pool_foreach (vui, vum->vhost_user_interfaces, {
+ vhost_user_delete_if (vnm, vm, vui->sw_if_index);
+ });
+ /* *INDENT-ON* */
+ vlib_worker_thread_barrier_release (vlib_get_main ());
+ return 0;
+}
+
+VLIB_MAIN_LOOP_EXIT_FUNCTION (vhost_user_exit);
+
+/**
+ * Open server unix socket on specified sock_filename.
+ */
+static int
+vhost_user_init_server_sock (const char *sock_filename, int *sock_fd)
+{
+ int rv = 0;
+ struct sockaddr_un un = { };
+ int fd;
+ /* create listening socket */
+ if ((fd = socket (AF_UNIX, SOCK_STREAM, 0)) < 0)
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+
+ un.sun_family = AF_UNIX;
+ strncpy ((char *) un.sun_path, (char *) sock_filename,
+ sizeof (un.sun_path) - 1);
+
+ /* remove if exists */
+ unlink ((char *) sock_filename);
+
+ if (bind (fd, (struct sockaddr *) &un, sizeof (un)) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ goto error;
+ }
+
+ if (listen (fd, 1) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ goto error;
+ }
+
+ *sock_fd = fd;
+ return 0;
+
+error:
+ close (fd);
+ return rv;
+}
+
+/**
+ * Create ethernet interface for vhost user interface.
+ */
+static void
+vhost_user_create_ethernet (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_t * vui, u8 * hwaddress)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 hwaddr[6];
+ clib_error_t *error;
+
+ /* create hw and sw interface */
+ if (hwaddress)
+ {
+ clib_memcpy (hwaddr, hwaddress, 6);
+ }
+ else
+ {
+ random_u32 (&vum->random);
+ clib_memcpy (hwaddr + 2, &vum->random, sizeof (vum->random));
+ hwaddr[0] = 2;
+ hwaddr[1] = 0xfe;
+ }
+
+ error = ethernet_register_interface
+ (vnm,
+ vhost_user_dev_class.index,
+ vui - vum->vhost_user_interfaces /* device instance */ ,
+ hwaddr /* ethernet address */ ,
+ &vui->hw_if_index, 0 /* flag change */ );
+
+ if (error)
+ clib_error_report (error);
+
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ hi->max_l3_packet_bytes[VLIB_RX] = hi->max_l3_packet_bytes[VLIB_TX] = 9000;
+}
+
+/*
+ * Initialize vui with specified attributes
+ */
+static void
+vhost_user_vui_init (vnet_main_t * vnm,
+ vhost_user_intf_t * vui,
+ int server_sock_fd,
+ const char *sock_filename,
+ u64 feature_mask, u32 * sw_if_index)
+{
+ vnet_sw_interface_t *sw;
+ int q;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vnet_hw_interface_t *hw;
+
+ hw = vnet_get_hw_interface (vnm, vui->hw_if_index);
+ sw = vnet_get_hw_sw_interface (vnm, vui->hw_if_index);
+ if (server_sock_fd != -1)
+ {
+ clib_file_t template = { 0 };
+ template.read_function = vhost_user_socksvr_accept_ready;
+ template.file_descriptor = server_sock_fd;
+ template.private_data = vui - vum->vhost_user_interfaces; //hw index
+ vui->unix_server_index = clib_file_add (&file_main, &template);
+ }
+ else
+ {
+ vui->unix_server_index = ~0;
+ }
+
+ vui->sw_if_index = sw->sw_if_index;
+ strncpy (vui->sock_filename, sock_filename,
+ ARRAY_LEN (vui->sock_filename) - 1);
+ vui->sock_errno = 0;
+ vui->is_up = 0;
+ vui->feature_mask = feature_mask;
+ vui->clib_file_index = ~0;
+ vui->log_base_addr = 0;
+ vui->if_index = vui - vum->vhost_user_interfaces;
+ mhash_set_mem (&vum->if_index_by_sock_name, vui->sock_filename,
+ &vui->if_index, 0);
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ vhost_user_vring_init (vui, q);
+
+ hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+
+ if (sw_if_index)
+ *sw_if_index = vui->sw_if_index;
+
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ vui->vring_locks[q] = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) vui->vring_locks[q], 0, CLIB_CACHE_LINE_BYTES);
+ }
+
+ vec_validate (vui->per_cpu_tx_qid,
+ vlib_get_thread_main ()->n_vlib_mains - 1);
+ vhost_user_tx_thread_placement (vui);
+}
+
+int
+vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 * sw_if_index,
+ u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
+{
+ vhost_user_intf_t *vui = NULL;
+ u32 sw_if_idx = ~0;
+ int rv = 0;
+ int server_sock_fd = -1;
+ vhost_user_main_t *vum = &vhost_user_main;
+ uword *if_index;
+
+ if (sock_filename == NULL || !(strlen (sock_filename) > 0))
+ {
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+ }
+
+ if_index = mhash_get (&vum->if_index_by_sock_name, (void *) sock_filename);
+ if (if_index)
+ {
+ if (sw_if_index)
+ {
+ vui = &vum->vhost_user_interfaces[*if_index];
+ *sw_if_index = vui->sw_if_index;
+ }
+ return VNET_API_ERROR_IF_ALREADY_EXISTS;
+ }
+
+ if (is_server)
+ {
+ if ((rv =
+ vhost_user_init_server_sock (sock_filename, &server_sock_fd)) != 0)
+ {
+ return rv;
+ }
+ }
+
+ pool_get (vhost_user_main.vhost_user_interfaces, vui);
+
+ vhost_user_create_ethernet (vnm, vm, vui, hwaddr);
+ vhost_user_vui_init (vnm, vui, server_sock_fd, sock_filename,
+ feature_mask, &sw_if_idx);
+
+ if (renumber)
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+
+ if (sw_if_index)
+ *sw_if_index = sw_if_idx;
+
+ // Process node must connect
+ vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
+
+ return rv;
+}
+
+int
+vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 sw_if_index,
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui = NULL;
+ u32 sw_if_idx = ~0;
+ int server_sock_fd = -1;
+ int rv = 0;
+ vnet_hw_interface_t *hwif;
+ uword *if_index;
+
+ if (!(hwif = vnet_get_sup_hw_interface (vnm, sw_if_index)) ||
+ hwif->dev_class_index != vhost_user_dev_class.index)
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+
+ if (sock_filename == NULL || !(strlen (sock_filename) > 0))
+ return VNET_API_ERROR_INVALID_ARGUMENT;
+
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, hwif->dev_instance);
+
+ /*
+ * Disallow changing the interface to have the same path name
+ * as other interface
+ */
+ if_index = mhash_get (&vum->if_index_by_sock_name, (void *) sock_filename);
+ if (if_index && (*if_index != vui->if_index))
+ return VNET_API_ERROR_IF_ALREADY_EXISTS;
+
+ // First try to open server socket
+ if (is_server)
+ if ((rv = vhost_user_init_server_sock (sock_filename,
+ &server_sock_fd)) != 0)
+ return rv;
+
+ vhost_user_term_if (vui);
+ vhost_user_vui_init (vnm, vui, server_sock_fd,
+ sock_filename, feature_mask, &sw_if_idx);
+
+ if (renumber)
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+
+ // Process node must connect
+ vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
+
+ return rv;
+}
+
+clib_error_t *
+vhost_user_connect_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *sock_filename = NULL;
+ u32 sw_if_index;
+ u8 is_server = 0;
+ u64 feature_mask = (u64) ~ (0ULL);
+ u8 renumber = 0;
+ u32 custom_dev_instance = ~0;
+ u8 hwaddr[6];
+ u8 *hw = NULL;
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "socket %s", &sock_filename))
+ ;
+ else if (unformat (line_input, "server"))
+ is_server = 1;
+ else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
+ ;
+ else
+ if (unformat
+ (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
+ hw = hwaddr;
+ else if (unformat (line_input, "renumber %d", &custom_dev_instance))
+ {
+ renumber = 1;
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ vnet_main_t *vnm = vnet_get_main ();
+
+ int rv;
+ if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
+ is_server, &sw_if_index, feature_mask,
+ renumber, custom_dev_instance, hw)))
+ {
+ error = clib_error_return (0, "vhost_user_create_if returned %d", rv);
+ goto done;
+ }
+
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
+
+done:
+ vec_free (sock_filename);
+ unformat_free (line_input);
+
+ return error;
+}
+
+clib_error_t *
+vhost_user_delete_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_main_t *vnm = vnet_get_main ();
+ clib_error_t *error = NULL;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat
+ (line_input, "%U", unformat_vnet_sw_interface, vnm,
+ &sw_if_index))
+ {
+ vnet_hw_interface_t *hwif =
+ vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (hwif == NULL ||
+ vhost_user_dev_class.index != hwif->dev_class_index)
+ {
+ error = clib_error_return (0, "Not a vhost interface");
+ goto done;
+ }
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ vhost_user_delete_if (vnm, vm, sw_if_index);
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+int
+vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids)
+{
+ int rv = 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vhost_user_intf_details_t *r_vuids = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ u32 *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
+ u8 *s = NULL;
+ int i;
+
+ if (!out_vuids)
+ return -1;
+
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ vec_add1 (hw_if_indices, vui->hw_if_index);
+ );
+
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+
+ vec_add2 (r_vuids, vuid, 1);
+ vuid->sw_if_index = vui->sw_if_index;
+ vuid->virtio_net_hdr_sz = vui->virtio_net_hdr_sz;
+ vuid->features = vui->features;
+ vuid->num_regions = vui->nregions;
+ vuid->is_server = vui->unix_server_index != ~0;
+ vuid->sock_errno = vui->sock_errno;
+ strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
+ ARRAY_LEN (vuid->sock_filename) - 1);
+
+ s = format (s, "%v%c", hi->name, 0);
+
+ strncpy ((char *) vuid->if_name, (char *) s,
+ ARRAY_LEN (vuid->if_name) - 1);
+ _vec_len (s) = 0;
+ }
+
+ vec_free (s);
+ vec_free (hw_if_indices);
+
+ *out_vuids = r_vuids;
+
+ return rv;
+}
+
+clib_error_t *
+show_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ u32 hw_if_index, *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
+ u16 *queue;
+ u32 ci;
+ int i, j, q;
+ int show_descr = 0;
+ struct feat_struct
+ {
+ u8 bit;
+ char *str;
+ };
+ struct feat_struct *feat_entry;
+
+ static struct feat_struct feat_array[] = {
+#define _(s,b) { .str = #s, .bit = b, },
+ foreach_virtio_net_feature
+#undef _
+ {.str = NULL}
+ };
+
+#define foreach_protocol_feature \
+ _(VHOST_USER_PROTOCOL_F_MQ) \
+ _(VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+
+ static struct feat_struct proto_feat_array[] = {
+#define _(s) { .str = #s, .bit = s},
+ foreach_protocol_feature
+#undef _
+ {.str = NULL}
+ };
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ vec_add1 (hw_if_indices, hw_if_index);
+ }
+ else if (unformat (input, "descriptors") || unformat (input, "desc"))
+ show_descr = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
+ }
+ if (vec_len (hw_if_indices) == 0)
+ {
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ vec_add1 (hw_if_indices, vui->hw_if_index);
+ );
+ }
+ vlib_cli_output (vm, "Virtio vhost-user interfaces");
+ vlib_cli_output (vm, "Global:\n coalesce frames %d time %e",
+ vum->coalesce_frames, vum->coalesce_time);
+ vlib_cli_output (vm, " number of rx virtqueues in interrupt mode: %d",
+ vum->ifq_count);
+
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = pool_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+ vlib_cli_output (vm, "Interface: %s (ifindex %d)",
+ hi->name, hw_if_indices[i]);
+
+ vlib_cli_output (vm, "virtio_net_hdr_sz %d\n"
+ " features mask (0x%llx): \n"
+ " features (0x%llx): \n",
+ vui->virtio_net_hdr_sz, vui->feature_mask,
+ vui->features);
+
+ feat_entry = (struct feat_struct *) &feat_array;
+ while (feat_entry->str)
+ {
+ if (vui->features & (1ULL << feat_entry->bit))
+ vlib_cli_output (vm, " %s (%d)", feat_entry->str,
+ feat_entry->bit);
+ feat_entry++;
+ }
+
+ vlib_cli_output (vm, " protocol features (0x%llx)",
+ vui->protocol_features);
+ feat_entry = (struct feat_struct *) &proto_feat_array;
+ while (feat_entry->str)
+ {
+ if (vui->protocol_features & (1ULL << feat_entry->bit))
+ vlib_cli_output (vm, " %s (%d)", feat_entry->str,
+ feat_entry->bit);
+ feat_entry++;
+ }
+
+ vlib_cli_output (vm, "\n");
+
+ vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
+ vui->sock_filename,
+ (vui->unix_server_index != ~0) ? "server" : "client",
+ strerror (vui->sock_errno));
+
+ vlib_cli_output (vm, " rx placement: ");
+
+ vec_foreach (queue, vui->rx_queues)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ uword thread_index;
+ vnet_hw_interface_rx_mode mode;
+
+ thread_index = vnet_get_device_input_thread_index (vnm,
+ vui->hw_if_index,
+ *queue);
+ vnet_hw_interface_get_rx_mode (vnm, vui->hw_if_index, *queue, &mode);
+ vlib_cli_output (vm, " thread %d on vring %d, %U\n",
+ thread_index, VHOST_VRING_IDX_TX (*queue),
+ format_vnet_hw_interface_rx_mode, mode);
+ }
+
+ vlib_cli_output (vm, " tx placement: %s\n",
+ vui->use_tx_spinlock ? "spin-lock" : "lock-free");
+
+ vec_foreach_index (ci, vui->per_cpu_tx_qid)
+ {
+ vlib_cli_output (vm, " thread %d on vring %d\n", ci,
+ VHOST_VRING_IDX_RX (vui->per_cpu_tx_qid[ci]));
+ }
+
+ vlib_cli_output (vm, "\n");
+
+ vlib_cli_output (vm, " Memory regions (total %d)\n", vui->nregions);
+
+ if (vui->nregions)
+ {
+ vlib_cli_output (vm,
+ " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
+ vlib_cli_output (vm,
+ " ====== ===== ================== ================== ================== ================== ==================\n");
+ }
+ for (j = 0; j < vui->nregions; j++)
+ {
+ vlib_cli_output (vm,
+ " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+ j, vui->region_mmap_fd[j],
+ vui->regions[j].guest_phys_addr,
+ vui->regions[j].memory_size,
+ vui->regions[j].userspace_addr,
+ vui->regions[j].mmap_offset,
+ pointer_to_uword (vui->region_mmap_addr[j]));
+ }
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ if (!vui->vrings[q].started)
+ continue;
+
+ vlib_cli_output (vm, "\n Virtqueue %d (%s%s)\n", q,
+ (q & 1) ? "RX" : "TX",
+ vui->vrings[q].enabled ? "" : " disabled");
+
+ vlib_cli_output (vm,
+ " qsz %d last_avail_idx %d last_used_idx %d\n",
+ vui->vrings[q].qsz_mask + 1,
+ vui->vrings[q].last_avail_idx,
+ vui->vrings[q].last_used_idx);
+
+ if (vui->vrings[q].avail && vui->vrings[q].used)
+ vlib_cli_output (vm,
+ " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
+ vui->vrings[q].avail->flags,
+ vui->vrings[q].avail->idx,
+ vui->vrings[q].used->flags,
+ vui->vrings[q].used->idx);
+
+ int kickfd = UNIX_GET_FD (vui->vrings[q].kickfd_idx);
+ int callfd = UNIX_GET_FD (vui->vrings[q].callfd_idx);
+ vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
+ kickfd, callfd, vui->vrings[q].errfd);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " id addr len flags next user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== ==================\n");
+ for (j = 0; j < vui->vrings[q].qsz_mask + 1; j++)
+ {
+ u32 mem_hint = 0;
+ vlib_cli_output (vm,
+ " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
+ j, vui->vrings[q].desc[j].addr,
+ vui->vrings[q].desc[j].len,
+ vui->vrings[q].desc[j].flags,
+ vui->vrings[q].desc[j].next,
+ pointer_to_uword (map_guest_mem
+ (vui,
+ vui->vrings[q].desc[j].
+ addr, &mem_hint)));
+ }
+ }
+ }
+ vlib_cli_output (vm, "\n");
+ }
+done:
+ vec_free (hw_if_indices);
+ return error;
+}
+
+/*
+ * CLI functions
+ */
+
+/*?
+ * Create a vHost User interface. Once created, a new virtual interface
+ * will exist with the name '<em>VirtualEthernet0/0/x</em>', where '<em>x</em>'
+ * is the next free index.
+ *
+ * There are several parameters associated with a vHost interface:
+ *
+ * - <b>socket <socket-filename></b> - Name of the linux socket used by QEMU/VM and
+ * VPP to manage the vHost interface. If socket does not already exist, VPP will
+ * create the socket.
+ *
+ * - <b>server</b> - Optional flag to indicate that VPP should be the server for the
+ * linux socket. If not provided, VPP will be the client.
+ *
+ * - <b>feature-mask <hex></b> - Optional virtio/vhost feature set negotiated at
+ * startup. By default, all supported features will be advertised. Otherwise,
+ * provide the set of features desired.
+ * - 0x000008000 (15) - VIRTIO_NET_F_MRG_RXBUF
+ * - 0x000020000 (17) - VIRTIO_NET_F_CTRL_VQ
+ * - 0x000200000 (21) - VIRTIO_NET_F_GUEST_ANNOUNCE
+ * - 0x000400000 (22) - VIRTIO_NET_F_MQ
+ * - 0x004000000 (26) - VHOST_F_LOG_ALL
+ * - 0x008000000 (27) - VIRTIO_F_ANY_LAYOUT
+ * - 0x010000000 (28) - VIRTIO_F_INDIRECT_DESC
+ * - 0x040000000 (30) - VHOST_USER_F_PROTOCOL_FEATURES
+ * - 0x100000000 (32) - VIRTIO_F_VERSION_1
+ *
+ * - <b>hwaddr <mac-addr></b> - Optional ethernet address, can be in either
+ * X:X:X:X:X:X unix or X.X.X cisco format.
+ *
+ * - <b>renumber <dev_instance></b> - Optional parameter which allows the instance
+ * in the name to be specified. If instance already exists, name will be used
+ * anyway and multiple instances will have the same name. Use with caution.
+ *
+ * - <b>mode [interrupt | polling]</b> - Optional parameter specifying
+ * the input thread polling policy.
+ *
+ * @cliexpar
+ * Example of how to create a vhost interface with VPP as the client and all features enabled:
+ * @cliexstart{create vhost-user socket /tmp/vhost1.sock}
+ * VirtualEthernet0/0/0
+ * @cliexend
+ * Example of how to create a vhost interface with VPP as the server and with just
+ * multiple queues enabled:
+ * @cliexstart{create vhost-user socket /tmp/vhost2.sock server feature-mask 0x40400000}
+ * VirtualEthernet0/0/1
+ * @cliexend
+ * Once the vHost interface is created, enable the interface using:
+ * @cliexcmd{set interface state VirtualEthernet0/0/0 up}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vhost_user_connect_command, static) = {
+ .path = "create vhost-user",
+ .short_help = "create vhost-user socket <socket-filename> [server] "
+ "[feature-mask <hex>] [hwaddr <mac-addr>] [renumber <dev_instance>] ",
+ .function = vhost_user_connect_command_fn,
+};
+/* *INDENT-ON* */
+
+/*?
+ * Delete a vHost User interface using the interface name or the
+ * software interface index. Use the '<em>show interface</em>'
+ * command to determine the software interface index. On deletion,
+ * the linux socket will not be deleted.
+ *
+ * @cliexpar
+ * Example of how to delete a vhost interface by name:
+ * @cliexcmd{delete vhost-user VirtualEthernet0/0/1}
+ * Example of how to delete a vhost interface by software interface index:
+ * @cliexcmd{delete vhost-user sw_if_index 1}
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (vhost_user_delete_command, static) = {
+ .path = "delete vhost-user",
+ .short_help = "delete vhost-user {<interface> | sw_if_index <sw_idx>}",
+ .function = vhost_user_delete_command_fn,
+};
+
+/*?
+ * Display the attributes of a single vHost User interface (provide interface
+ * name), multiple vHost User interfaces (provide a list of interface names seperated
+ * by spaces) or all Vhost User interfaces (omit an interface name to display all
+ * vHost interfaces).
+ *
+ * @cliexpar
+ * @parblock
+ * Example of how to display a vhost interface:
+ * @cliexstart{show vhost-user VirtualEthernet0/0/0}
+ * Virtio vhost-user interfaces
+ * Global:
+ * coalesce frames 32 time 1e-3
+ * Interface: VirtualEthernet0/0/0 (ifindex 1)
+ * virtio_net_hdr_sz 12
+ * features mask (0xffffffffffffffff):
+ * features (0x50408000):
+ * VIRTIO_NET_F_MRG_RXBUF (15)
+ * VIRTIO_NET_F_MQ (22)
+ * VIRTIO_F_INDIRECT_DESC (28)
+ * VHOST_USER_F_PROTOCOL_FEATURES (30)
+ * protocol features (0x3)
+ * VHOST_USER_PROTOCOL_F_MQ (0)
+ * VHOST_USER_PROTOCOL_F_LOG_SHMFD (1)
+ *
+ * socket filename /tmp/vhost1.sock type client errno "Success"
+ *
+ * rx placement:
+ * thread 1 on vring 1
+ * thread 1 on vring 5
+ * thread 2 on vring 3
+ * thread 2 on vring 7
+ * tx placement: spin-lock
+ * thread 0 on vring 0
+ * thread 1 on vring 2
+ * thread 2 on vring 0
+ *
+ * Memory regions (total 2)
+ * region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr
+ * ====== ===== ================== ================== ================== ================== ==================
+ * 0 60 0x0000000000000000 0x00000000000a0000 0x00002aaaaac00000 0x0000000000000000 0x00002aab2b400000
+ * 1 61 0x00000000000c0000 0x000000003ff40000 0x00002aaaaacc0000 0x00000000000c0000 0x00002aababcc0000
+ *
+ * Virtqueue 0 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 62 callfd 64 errfd -1
+ *
+ * Virtqueue 1 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 65 callfd 66 errfd -1
+ *
+ * Virtqueue 2 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 63 callfd 70 errfd -1
+ *
+ * Virtqueue 3 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 72 callfd 74 errfd -1
+ *
+ * Virtqueue 4 (TX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 76 callfd 78 errfd -1
+ *
+ * Virtqueue 5 (RX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 80 callfd 82 errfd -1
+ *
+ * Virtqueue 6 (TX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 84 callfd 86 errfd -1
+ *
+ * Virtqueue 7 (RX disabled)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 0 used.flags 1 used.idx 0
+ * kickfd 88 callfd 90 errfd -1
+ *
+ * @cliexend
+ *
+ * The optional '<em>descriptors</em>' parameter will display the same output as
+ * the previous example but will include the descriptor table for each queue.
+ * The output is truncated below:
+ * @cliexstart{show vhost-user VirtualEthernet0/0/0 descriptors}
+ * Virtio vhost-user interfaces
+ * Global:
+ * coalesce frames 32 time 1e-3
+ * Interface: VirtualEthernet0/0/0 (ifindex 1)
+ * virtio_net_hdr_sz 12
+ * features mask (0xffffffffffffffff):
+ * features (0x50408000):
+ * VIRTIO_NET_F_MRG_RXBUF (15)
+ * VIRTIO_NET_F_MQ (22)
+ * :
+ * Virtqueue 0 (TX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * avail.flags 1 avail.idx 128 used.flags 1 used.idx 0
+ * kickfd 62 callfd 64 errfd -1
+ *
+ * descriptor table:
+ * id addr len flags next user_addr
+ * ===== ================== ===== ====== ===== ==================
+ * 0 0x0000000010b6e974 2060 0x0002 1 0x00002aabbc76e974
+ * 1 0x0000000010b6e034 2060 0x0002 2 0x00002aabbc76e034
+ * 2 0x0000000010b6d6f4 2060 0x0002 3 0x00002aabbc76d6f4
+ * 3 0x0000000010b6cdb4 2060 0x0002 4 0x00002aabbc76cdb4
+ * 4 0x0000000010b6c474 2060 0x0002 5 0x00002aabbc76c474
+ * 5 0x0000000010b6bb34 2060 0x0002 6 0x00002aabbc76bb34
+ * 6 0x0000000010b6b1f4 2060 0x0002 7 0x00002aabbc76b1f4
+ * 7 0x0000000010b6a8b4 2060 0x0002 8 0x00002aabbc76a8b4
+ * 8 0x0000000010b69f74 2060 0x0002 9 0x00002aabbc769f74
+ * 9 0x0000000010b69634 2060 0x0002 10 0x00002aabbc769634
+ * 10 0x0000000010b68cf4 2060 0x0002 11 0x00002aabbc768cf4
+ * :
+ * 249 0x0000000000000000 0 0x0000 250 0x00002aab2b400000
+ * 250 0x0000000000000000 0 0x0000 251 0x00002aab2b400000
+ * 251 0x0000000000000000 0 0x0000 252 0x00002aab2b400000
+ * 252 0x0000000000000000 0 0x0000 253 0x00002aab2b400000
+ * 253 0x0000000000000000 0 0x0000 254 0x00002aab2b400000
+ * 254 0x0000000000000000 0 0x0000 255 0x00002aab2b400000
+ * 255 0x0000000000000000 0 0x0000 32768 0x00002aab2b400000
+ *
+ * Virtqueue 1 (RX)
+ * qsz 256 last_avail_idx 0 last_used_idx 0
+ * :
+ * @cliexend
+ * @endparblock
+?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_vhost_user_command, static) = {
+ .path = "show vhost-user",
+ .short_help = "show vhost-user [<interface> [<interface> [..]]] [descriptors]",
+ .function = show_vhost_user_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+debug_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ clib_error_t *error = NULL;
+ vhost_user_main_t *vum = &vhost_user_main;
+ u8 onoff = 0;
+ u8 input_found = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return clib_error_return (0, "missing argument");
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (input_found)
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+
+ if (unformat (line_input, "on"))
+ {
+ input_found = 1;
+ onoff = 1;
+ }
+ else if (unformat (line_input, "off"))
+ {
+ input_found = 1;
+ onoff = 0;
+ }
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, line_input);
+ goto done;
+ }
+ }
+
+ vum->debug = onoff;
+
+done:
+ unformat_free (line_input);
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (debug_vhost_user_command, static) = {
+ .path = "debug vhost-user",
+ .short_help = "debug vhost-user <on | off>",
+ .function = debug_vhost_user_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+vhost_user_config (vlib_main_t * vm, unformat_input_t * input)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "coalesce-frames %d", &vum->coalesce_frames))
+ ;
+ else if (unformat (input, "coalesce-time %f", &vum->coalesce_time))
+ ;
+ else if (unformat (input, "dont-dump-memory"))
+ vum->dont_dump_vhost_user_memory = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+
+ return 0;
+}
+
+/* vhost-user { ... } configuration. */
+VLIB_CONFIG_FUNCTION (vhost_user_config, "vhost-user");
+
+void
+vhost_user_unmap_all (void)
+{
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+
+ if (vum->dont_dump_vhost_user_memory)
+ {
+ pool_foreach (vui, vum->vhost_user_interfaces,
+ unmap_all_mem_regions (vui);
+ );
+ }
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost-user.h b/src/vnet/devices/virtio/vhost-user.h
new file mode 100644
index 00000000..105b92b7
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost-user.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __VIRTIO_VHOST_USER_H__
+#define __VIRTIO_VHOST_USER_H__
+/* vhost-user data structures */
+
+#define VHOST_MEMORY_MAX_NREGIONS 8
+#define VHOST_USER_MSG_HDR_SZ 12
+#define VHOST_VRING_MAX_SIZE 32768
+#define VHOST_VRING_MAX_N 16 //8TX + 8RX
+#define VHOST_VRING_IDX_RX(qid) (2*qid)
+#define VHOST_VRING_IDX_TX(qid) (2*qid + 1)
+
+#define VHOST_USER_VRING_NOFD_MASK 0x100
+#define VIRTQ_DESC_F_NEXT 1
+#define VIRTQ_DESC_F_INDIRECT 4
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
+
+#define VHOST_USER_PROTOCOL_F_MQ 0
+#define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
+#define VHOST_VRING_F_LOG 0
+
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+#define VHOST_USER_PROTOCOL_FEATURES ((1ULL << VHOST_USER_PROTOCOL_F_MQ) | \
+ (1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD))
+
+/* If multiqueue is provided by host, then we suppport it. */
+#define VIRTIO_NET_CTRL_MQ 4
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
+#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
+
+#define VRING_USED_F_NO_NOTIFY 1
+#define VRING_AVAIL_F_NO_INTERRUPT 1
+
+#define foreach_virtio_net_feature \
+ _ (VIRTIO_NET_F_MRG_RXBUF, 15) \
+ _ (VIRTIO_NET_F_CTRL_VQ, 17) \
+ _ (VIRTIO_NET_F_GUEST_ANNOUNCE, 21) \
+ _ (VIRTIO_NET_F_MQ, 22) \
+ _ (VHOST_F_LOG_ALL, 26) \
+ _ (VIRTIO_F_ANY_LAYOUT, 27) \
+ _ (VIRTIO_F_INDIRECT_DESC, 28) \
+ _ (VHOST_USER_F_PROTOCOL_FEATURES, 30) \
+ _ (VIRTIO_F_VERSION_1, 32)
+
+
+typedef enum
+{
+#define _(f,n) FEAT_##f = (n),
+ foreach_virtio_net_feature
+#undef _
+} virtio_net_feature_t;
+
+int vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 * sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr);
+int vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance);
+int vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
+ u32 sw_if_index);
+
+/* *INDENT-OFF* */
+typedef struct vhost_user_memory_region
+{
+ u64 guest_phys_addr;
+ u64 memory_size;
+ u64 userspace_addr;
+ u64 mmap_offset;
+} __attribute ((packed)) vhost_user_memory_region_t;
+
+typedef struct vhost_user_memory
+{
+ u32 nregions;
+ u32 padding;
+ vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
+} __attribute ((packed)) vhost_user_memory_t;
+
+typedef struct
+{
+ u32 index, num;
+} __attribute ((packed)) vhost_vring_state_t;
+
+typedef struct
+{
+ u32 index, flags;
+ u64 desc_user_addr, used_user_addr, avail_user_addr, log_guest_addr;
+} __attribute ((packed)) vhost_vring_addr_t;
+
+typedef struct vhost_user_log
+{
+ u64 size;
+ u64 offset;
+} __attribute ((packed)) vhost_user_log_t;
+
+typedef enum vhost_user_req
+{
+ VHOST_USER_NONE = 0,
+ VHOST_USER_GET_FEATURES = 1,
+ VHOST_USER_SET_FEATURES = 2,
+ VHOST_USER_SET_OWNER = 3,
+ VHOST_USER_RESET_OWNER = 4,
+ VHOST_USER_SET_MEM_TABLE = 5,
+ VHOST_USER_SET_LOG_BASE = 6,
+ VHOST_USER_SET_LOG_FD = 7,
+ VHOST_USER_SET_VRING_NUM = 8,
+ VHOST_USER_SET_VRING_ADDR = 9,
+ VHOST_USER_SET_VRING_BASE = 10,
+ VHOST_USER_GET_VRING_BASE = 11,
+ VHOST_USER_SET_VRING_KICK = 12,
+ VHOST_USER_SET_VRING_CALL = 13,
+ VHOST_USER_SET_VRING_ERR = 14,
+ VHOST_USER_GET_PROTOCOL_FEATURES = 15,
+ VHOST_USER_SET_PROTOCOL_FEATURES = 16,
+ VHOST_USER_GET_QUEUE_NUM = 17,
+ VHOST_USER_SET_VRING_ENABLE = 18,
+ VHOST_USER_MAX
+} vhost_user_req_t;
+
+// vring_desc I/O buffer descriptor
+typedef struct
+{
+ uint64_t addr; // packet data buffer address
+ uint32_t len; // packet data buffer size
+ uint16_t flags; // (see below)
+ uint16_t next; // optional index next descriptor in chain
+} __attribute ((packed)) vring_desc_t;
+
+typedef struct
+{
+ uint16_t flags;
+ volatile uint16_t idx;
+ uint16_t ring[VHOST_VRING_MAX_SIZE];
+} __attribute ((packed)) vring_avail_t;
+
+typedef struct
+{
+ uint16_t flags;
+ uint16_t idx;
+ struct /* vring_used_elem */
+ {
+ uint32_t id;
+ uint32_t len;
+ } ring[VHOST_VRING_MAX_SIZE];
+} __attribute ((packed)) vring_used_t;
+
+typedef struct
+{
+ u8 flags;
+ u8 gso_type;
+ u16 hdr_len;
+ u16 gso_size;
+ u16 csum_start;
+ u16 csum_offset;
+} __attribute ((packed)) virtio_net_hdr_t;
+
+typedef struct {
+ virtio_net_hdr_t hdr;
+ u16 num_buffers;
+} __attribute ((packed)) virtio_net_hdr_mrg_rxbuf_t;
+
+typedef struct vhost_user_msg {
+ vhost_user_req_t request;
+ u32 flags;
+ u32 size;
+ union
+ {
+ u64 u64;
+ vhost_vring_state_t state;
+ vhost_vring_addr_t addr;
+ vhost_user_memory_t memory;
+ vhost_user_log_t log;
+ };
+} __attribute ((packed)) vhost_user_msg_t;
+/* *INDENT-ON* */
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u16 qsz_mask;
+ u16 last_avail_idx;
+ u16 last_used_idx;
+ u16 n_since_last_int;
+ vring_desc_t *desc;
+ vring_avail_t *avail;
+ vring_used_t *used;
+ f64 int_deadline;
+ u8 started;
+ u8 enabled;
+ u8 log_used;
+ //Put non-runtime in a different cache line
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
+ int errfd;
+ u32 callfd_idx;
+ u32 kickfd_idx;
+ u64 log_guest_addr;
+
+ /* The rx queue policy (interrupt/adaptive/polling) for this queue */
+ u32 mode;
+} vhost_user_vring_t;
+
+#define VHOST_USER_EVENT_START_TIMER 1
+#define VHOST_USER_EVENT_STOP_TIMER 2
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 is_up;
+ u32 admin_up;
+ u32 unix_server_index;
+ u32 clib_file_index;
+ char sock_filename[256];
+ int sock_errno;
+ uword if_index;
+ u32 hw_if_index, sw_if_index;
+
+ //Feature negotiation
+ u64 features;
+ u64 feature_mask;
+ u64 protocol_features;
+
+ //Memory region information
+ u32 nregions;
+ vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
+ void *region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS];
+ u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS];
+ u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS];
+ u32 region_mmap_fd[VHOST_MEMORY_MAX_NREGIONS];
+
+ //Virtual rings
+ vhost_user_vring_t vrings[VHOST_VRING_MAX_N];
+ volatile u32 *vring_locks[VHOST_VRING_MAX_N];
+
+ int virtio_net_hdr_sz;
+ int is_any_layout;
+
+ void *log_base_addr;
+ u64 log_size;
+
+ /* Whether to use spinlock or per_cpu_tx_qid assignment */
+ u8 use_tx_spinlock;
+ u16 *per_cpu_tx_qid;
+
+ /* Vector of active rx queues for this interface */
+ u16 *rx_queues;
+} vhost_user_intf_t;
+
+typedef struct
+{
+ uword dst;
+ uword src;
+ u32 len;
+} vhost_copy_t;
+
+typedef struct
+{
+ u16 qid; /** The interface queue index (Not the virtio vring idx) */
+ u16 device_index; /** The device index */
+ u32 virtio_ring_flags; /** Runtime queue flags **/
+ u16 first_desc_len; /** Length of the first data descriptor **/
+ virtio_net_hdr_mrg_rxbuf_t hdr; /** Virtio header **/
+} vhost_trace_t;
+
+
+#define VHOST_USER_RX_BUFFERS_N (2 * VLIB_FRAME_SIZE + 2)
+#define VHOST_USER_COPY_ARRAY_N (4 * VLIB_FRAME_SIZE)
+
+typedef struct
+{
+ u32 rx_buffers_len;
+ u32 rx_buffers[VHOST_USER_RX_BUFFERS_N];
+
+ virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE];
+ vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N];
+
+ /* This is here so it doesn't end-up
+ * using stack or registers. */
+ vhost_trace_t *current_trace;
+} vhost_cpu_t;
+
+typedef struct
+{
+ mhash_t if_index_by_sock_name;
+ u32 mtu_bytes;
+ vhost_user_intf_t *vhost_user_interfaces;
+ u32 *show_dev_instance_by_real_dev_instance;
+ u32 coalesce_frames;
+ f64 coalesce_time;
+ int dont_dump_vhost_user_memory;
+
+ /** Per-CPU data for vhost-user */
+ vhost_cpu_t *cpus;
+
+ /** Pseudo random iterator */
+ u32 random;
+
+ /* The number of rx interface/queue pairs in interrupt mode */
+ u32 ifq_count;
+
+ /* debug on or off */
+ u8 debug;
+} vhost_user_main_t;
+
+typedef struct
+{
+ u8 if_name[64];
+ u32 sw_if_index;
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ int sock_errno;
+} vhost_user_intf_details_t;
+
+int vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids);
+
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost_user.api b/src/vnet/devices/virtio/vhost_user.api
new file mode 100644
index 00000000..28d5e891
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost_user.api
@@ -0,0 +1,105 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/** \brief vhost-user interface create request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+ @param use_custom_mac - enable or disable the use of the provided hardware address
+ @param mac_address - hardware address to use if 'use_custom_mac' is set
+*/
+define create_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+ u8 use_custom_mac;
+ u8 mac_address[6];
+ u8 tag[64];
+};
+
+/** \brief vhost-user interface create response
+ @param context - sender context, to match reply w/ request
+ @param retval - return code for the request
+ @param sw_if_index - interface the operation is applied to
+*/
+define create_vhost_user_if_reply
+{
+ u32 context;
+ i32 retval;
+ u32 sw_if_index;
+};
+
+/** \brief vhost-user interface modify request
+ @param client_index - opaque cookie to identify the sender
+ @param is_server - our side is socket server
+ @param sock_filename - unix socket filename, used to speak with frontend
+*/
+autoreply define modify_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+ u8 is_server;
+ u8 sock_filename[256];
+ u8 renumber;
+ u32 custom_dev_instance;
+};
+
+/** \brief vhost-user interface delete request
+ @param client_index - opaque cookie to identify the sender
+*/
+autoreply define delete_vhost_user_if
+{
+ u32 client_index;
+ u32 context;
+ u32 sw_if_index;
+};
+
+/** \brief Vhost-user interface details structure (fix this)
+ @param sw_if_index - index of the interface
+ @param interface_name - name of interface
+ @param virtio_net_hdr_sz - net header size
+ @param features - interface features
+ @param is_server - vhost-user server socket
+ @param sock_filename - socket filename
+ @param num_regions - number of used memory regions
+*/
+define sw_interface_vhost_user_details
+{
+ u32 context;
+ u32 sw_if_index;
+ u8 interface_name[64];
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ i32 sock_errno;
+};
+
+define sw_interface_vhost_user_dump
+{
+ u32 client_index;
+ u32 context;
+};
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/devices/virtio/vhost_user_api.c b/src/vnet/devices/virtio/vhost_user_api.c
new file mode 100644
index 00000000..78599241
--- /dev/null
+++ b/src/vnet/devices/virtio/vhost_user_api.c
@@ -0,0 +1,254 @@
+/*
+ *------------------------------------------------------------------
+ * vhost-user_api.c - vhost-user api
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vnet/vnet.h>
+#include <vlibmemory/api.h>
+
+#include <vnet/interface.h>
+#include <vnet/api_errno.h>
+#include <vnet/devices/virtio/vhost-user.h>
+
+#include <vnet/vnet_msg_enum.h>
+
+#define vl_typedefs /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_typedefs
+
+#define vl_endianfun /* define message structures */
+#include <vnet/vnet_all_api_h.h>
+#undef vl_endianfun
+
+/* instantiate all the print functions we know about */
+#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
+#define vl_printfun
+#include <vnet/vnet_all_api_h.h>
+#undef vl_printfun
+
+#include <vlibapi/api_helper_macros.h>
+
+#define foreach_vpe_api_msg \
+_(CREATE_VHOST_USER_IF, create_vhost_user_if) \
+_(MODIFY_VHOST_USER_IF, modify_vhost_user_if) \
+_(DELETE_VHOST_USER_IF, delete_vhost_user_if) \
+_(SW_INTERFACE_VHOST_USER_DUMP, sw_interface_vhost_user_dump)
+
+/*
+ * WARNING: replicated pending api refactor completion
+ */
+static void
+send_sw_interface_event_deleted (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ u32 sw_if_index)
+{
+ vl_api_sw_interface_event_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_EVENT);
+ mp->sw_if_index = ntohl (sw_if_index);
+
+ mp->admin_up_down = 0;
+ mp->link_up_down = 0;
+ mp->deleted = 1;
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+vl_api_create_vhost_user_if_t_handler (vl_api_create_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_create_vhost_user_if_reply_t *rmp;
+ u32 sw_if_index = (u32) ~ 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_create_if (vnm, vm, (char *) mp->sock_filename,
+ mp->is_server, &sw_if_index, (u64) ~ 0,
+ mp->renumber, ntohl (mp->custom_dev_instance),
+ (mp->use_custom_mac) ? mp->mac_address : NULL);
+
+ /* Remember an interface tag for the new interface */
+ if (rv == 0)
+ {
+ /* If a tag was supplied... */
+ if (mp->tag[0])
+ {
+ /* Make sure it's a proper C-string */
+ mp->tag[ARRAY_LEN (mp->tag) - 1] = 0;
+ u8 *tag = format (0, "%s%c", mp->tag, 0);
+ vnet_set_sw_interface_tag (vnm, tag, sw_if_index);
+ }
+ }
+
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_CREATE_VHOST_USER_IF_REPLY,
+ ({
+ rmp->sw_if_index = ntohl (sw_if_index);
+ }));
+ /* *INDENT-ON* */
+}
+
+static void
+vl_api_modify_vhost_user_if_t_handler (vl_api_modify_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_modify_vhost_user_if_reply_t *rmp;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_modify_if (vnm, vm, (char *) mp->sock_filename,
+ mp->is_server, sw_if_index, (u64) ~ 0,
+ mp->renumber, ntohl (mp->custom_dev_instance));
+
+ REPLY_MACRO (VL_API_MODIFY_VHOST_USER_IF_REPLY);
+}
+
+static void
+vl_api_delete_vhost_user_if_t_handler (vl_api_delete_vhost_user_if_t * mp)
+{
+ int rv = 0;
+ vl_api_delete_vhost_user_if_reply_t *rmp;
+ vpe_api_main_t *vam = &vpe_api_main;
+ u32 sw_if_index = ntohl (mp->sw_if_index);
+
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+
+ rv = vhost_user_delete_if (vnm, vm, sw_if_index);
+
+ REPLY_MACRO (VL_API_DELETE_VHOST_USER_IF_REPLY);
+ if (!rv)
+ {
+ unix_shared_memory_queue_t *q =
+ vl_api_client_index_to_input_queue (mp->client_index);
+ if (!q)
+ return;
+
+ vnet_clear_sw_interface_tag (vnm, sw_if_index);
+ send_sw_interface_event_deleted (vam, q, sw_if_index);
+ }
+}
+
+static void
+send_sw_interface_vhost_user_details (vpe_api_main_t * am,
+ unix_shared_memory_queue_t * q,
+ vhost_user_intf_details_t * vui,
+ u32 context)
+{
+ vl_api_sw_interface_vhost_user_details_t *mp;
+
+ mp = vl_msg_api_alloc (sizeof (*mp));
+ memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_SW_INTERFACE_VHOST_USER_DETAILS);
+ mp->sw_if_index = ntohl (vui->sw_if_index);
+ mp->virtio_net_hdr_sz = ntohl (vui->virtio_net_hdr_sz);
+ mp->features = clib_net_to_host_u64 (vui->features);
+ mp->is_server = vui->is_server;
+ mp->num_regions = ntohl (vui->num_regions);
+ mp->sock_errno = ntohl (vui->sock_errno);
+ mp->context = context;
+
+ strncpy ((char *) mp->sock_filename,
+ (char *) vui->sock_filename, ARRAY_LEN (mp->sock_filename) - 1);
+ strncpy ((char *) mp->interface_name,
+ (char *) vui->if_name, ARRAY_LEN (mp->interface_name) - 1);
+
+ vl_msg_api_send_shmem (q, (u8 *) & mp);
+}
+
+static void
+ vl_api_sw_interface_vhost_user_dump_t_handler
+ (vl_api_sw_interface_vhost_user_dump_t * mp)
+{
+ int rv = 0;
+ vpe_api_main_t *am = &vpe_api_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ vlib_main_t *vm = vlib_get_main ();
+ vhost_user_intf_details_t *ifaces = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ unix_shared_memory_queue_t *q;
+
+ q = vl_api_client_index_to_input_queue (mp->client_index);
+ if (q == 0)
+ return;
+
+ rv = vhost_user_dump_ifs (vnm, vm, &ifaces);
+ if (rv)
+ return;
+
+ vec_foreach (vuid, ifaces)
+ {
+ send_sw_interface_vhost_user_details (am, q, vuid, mp->context);
+ }
+ vec_free (ifaces);
+}
+
+/*
+ * vhost-user_api_hookup
+ * Add vpe's API message handlers to the table.
+ * vlib has alread mapped shared memory and
+ * added the client registration handlers.
+ * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
+ */
+#define vl_msg_name_crc_list
+#include <vnet/vnet_all_api_h.h>
+#undef vl_msg_name_crc_list
+
+static void
+setup_message_id_table (api_main_t * am)
+{
+#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
+ foreach_vl_msg_name_crc_vhost_user;
+#undef _
+}
+
+static clib_error_t *
+vhost_user_api_hookup (vlib_main_t * vm)
+{
+ api_main_t *am = &api_main;
+
+#define _(N,n) \
+ vl_msg_api_set_handlers(VL_API_##N, #n, \
+ vl_api_##n##_t_handler, \
+ vl_noop_handler, \
+ vl_api_##n##_t_endian, \
+ vl_api_##n##_t_print, \
+ sizeof(vl_api_##n##_t), 1);
+ foreach_vpe_api_msg;
+#undef _
+
+ /*
+ * Set up the (msg_name, crc, message-id) table
+ */
+ setup_message_id_table (am);
+
+ return 0;
+}
+
+VLIB_API_INIT_FUNCTION (vhost_user_api_hookup);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */