aboutsummaryrefslogtreecommitdiffstats
path: root/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'vnet')
-rw-r--r--vnet/vnet/devices/af_packet/af_packet.c177
-rw-r--r--vnet/vnet/devices/af_packet/af_packet.h39
-rw-r--r--vnet/vnet/devices/af_packet/cli.c59
-rw-r--r--vnet/vnet/devices/af_packet/device.c135
-rw-r--r--vnet/vnet/devices/af_packet/node.c144
-rw-r--r--vnet/vnet/devices/netmap/cli.c63
-rw-r--r--vnet/vnet/devices/netmap/device.c133
-rw-r--r--vnet/vnet/devices/netmap/netmap.c187
-rw-r--r--vnet/vnet/devices/netmap/netmap.h63
-rw-r--r--vnet/vnet/devices/netmap/node.c177
-rw-r--r--vnet/vnet/devices/nic/ixge.c1568
-rw-r--r--vnet/vnet/devices/nic/ixge.h510
-rw-r--r--vnet/vnet/devices/nic/sfp.c56
-rw-r--r--vnet/vnet/devices/nic/sfp.h19
-rw-r--r--vnet/vnet/devices/ssvm/node.c324
-rw-r--r--vnet/vnet/devices/ssvm/ssvm_eth.c351
-rw-r--r--vnet/vnet/devices/ssvm/ssvm_eth.h56
-rw-r--r--vnet/vnet/devices/virtio/vhost-user.c2391
-rw-r--r--vnet/vnet/devices/virtio/vhost-user.h169
19 files changed, 3646 insertions, 2975 deletions
diff --git a/vnet/vnet/devices/af_packet/af_packet.c b/vnet/vnet/devices/af_packet/af_packet.c
index 91f3c22d4f2..cb5f88a2cf4 100644
--- a/vnet/vnet/devices/af_packet/af_packet.c
+++ b/vnet/vnet/devices/af_packet/af_packet.c
@@ -52,24 +52,27 @@
#endif
/*defined in net/if.h but clashes with dpdk headers */
-unsigned int if_nametoindex(const char *ifname);
+unsigned int if_nametoindex (const char *ifname);
typedef struct tpacket_req tpacket_req_t;
static u32
-af_packet_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+af_packet_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
{
/* nothing for now */
return 0;
}
-static clib_error_t * af_packet_fd_read_ready (unix_file_t * uf)
+static clib_error_t *
+af_packet_fd_read_ready (unix_file_t * uf)
{
- vlib_main_t * vm = vlib_get_main();
- af_packet_main_t * apm = &af_packet_main;
+ vlib_main_t *vm = vlib_get_main ();
+ af_packet_main_t *apm = &af_packet_main;
u32 idx = uf->private_data;
- apm->pending_input_bitmap = clib_bitmap_set (apm->pending_input_bitmap, idx, 1);
+ apm->pending_input_bitmap =
+ clib_bitmap_set (apm->pending_input_bitmap, idx, 1);
/* Schedule the rx node */
vlib_node_set_interrupt_pending (vm, af_packet_input_node.index);
@@ -78,78 +81,84 @@ static clib_error_t * af_packet_fd_read_ready (unix_file_t * uf)
}
static int
-create_packet_v2_sock(u8 * name, tpacket_req_t * rx_req, tpacket_req_t * tx_req,
- int *fd, u8 ** ring)
+create_packet_v2_sock (u8 * name, tpacket_req_t * rx_req,
+ tpacket_req_t * tx_req, int *fd, u8 ** ring)
{
int ret, err;
struct sockaddr_ll sll;
uint host_if_index;
int ver = TPACKET_V2;
- socklen_t req_sz = sizeof(struct tpacket_req);
+ socklen_t req_sz = sizeof (struct tpacket_req);
u32 ring_sz = rx_req->tp_block_size * rx_req->tp_block_nr +
- tx_req->tp_block_size * tx_req->tp_block_nr;
+ tx_req->tp_block_size * tx_req->tp_block_nr;
- host_if_index = if_nametoindex((const char *) name);
+ host_if_index = if_nametoindex ((const char *) name);
if (!host_if_index)
{
- DBG_SOCK("Wrong host interface name");
+ DBG_SOCK ("Wrong host interface name");
ret = VNET_API_ERROR_INVALID_INTERFACE;
goto error;
}
- if ((*fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) < 0)
+ if ((*fd = socket (AF_PACKET, SOCK_RAW, htons (ETH_P_ALL))) < 0)
{
- DBG_SOCK("Failed to create socket");
+ DBG_SOCK ("Failed to create socket");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
- if ((err = setsockopt(*fd, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver))) < 0)
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_VERSION, &ver, sizeof (ver))) < 0)
{
- DBG_SOCK("Failed to set rx packet interface version");
+ DBG_SOCK ("Failed to set rx packet interface version");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
int opt = 1;
- if ((err = setsockopt(*fd, SOL_PACKET, PACKET_LOSS, &opt, sizeof(opt))) < 0)
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_LOSS, &opt, sizeof (opt))) < 0)
{
- DBG_SOCK("Failed to set packet tx ring error handling option");
+ DBG_SOCK ("Failed to set packet tx ring error handling option");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
- if ((err = setsockopt(*fd, SOL_PACKET, PACKET_RX_RING, rx_req, req_sz)) < 0)
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_RX_RING, rx_req, req_sz)) < 0)
{
- DBG_SOCK("Failed to set packet rx ring options");
+ DBG_SOCK ("Failed to set packet rx ring options");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
- if ((err = setsockopt(*fd, SOL_PACKET, PACKET_TX_RING, tx_req, req_sz)) < 0)
+ if ((err =
+ setsockopt (*fd, SOL_PACKET, PACKET_TX_RING, tx_req, req_sz)) < 0)
{
- DBG_SOCK("Failed to set packet rx ring options");
+ DBG_SOCK ("Failed to set packet rx ring options");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
- *ring = mmap(NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, *fd, 0);
+ *ring =
+ mmap (NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, *fd,
+ 0);
if (*ring == MAP_FAILED)
{
- DBG_SOCK("mmap failure");
+ DBG_SOCK ("mmap failure");
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
- memset(&sll, 0, sizeof(sll));
+ memset (&sll, 0, sizeof (sll));
sll.sll_family = PF_PACKET;
- sll.sll_protocol = htons(ETH_P_ALL);
+ sll.sll_protocol = htons (ETH_P_ALL);
sll.sll_ifindex = host_if_index;
- if ((err = bind(*fd, (struct sockaddr *) &sll, sizeof(sll))) < 0)
+ if ((err = bind (*fd, (struct sockaddr *) &sll, sizeof (sll))) < 0)
{
- DBG_SOCK("Failed to bind rx packet socket (error %d)", err);
+ DBG_SOCK ("Failed to bind rx packet socket (error %d)", err);
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
}
@@ -157,25 +166,26 @@ create_packet_v2_sock(u8 * name, tpacket_req_t * rx_req, tpacket_req_t * tx_req,
return 0;
error:
if (*fd >= 0)
- close(*fd);
+ close (*fd);
*fd = -1;
return ret;
}
int
-af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *sw_if_index)
+af_packet_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u32 * sw_if_index)
{
- af_packet_main_t * apm = &af_packet_main;
+ af_packet_main_t *apm = &af_packet_main;
int ret, fd = -1;
- struct tpacket_req * rx_req = 0;
- struct tpacket_req * tx_req = 0;
- u8 * ring = 0;
- af_packet_if_t * apif = 0;
+ struct tpacket_req *rx_req = 0;
+ struct tpacket_req *tx_req = 0;
+ u8 *ring = 0;
+ af_packet_if_t *apif = 0;
u8 hw_addr[6];
- clib_error_t * error;
- vnet_sw_interface_t * sw;
- vnet_main_t *vnm = vnet_get_main();
- uword * p;
+ clib_error_t *error;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
uword if_index;
p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
@@ -184,19 +194,19 @@ af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *
return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
}
- vec_validate(rx_req, 0);
+ vec_validate (rx_req, 0);
rx_req->tp_block_size = AF_PACKET_RX_BLOCK_SIZE;
rx_req->tp_frame_size = AF_PACKET_RX_FRAME_SIZE;
rx_req->tp_block_nr = AF_PACKET_RX_BLOCK_NR;
rx_req->tp_frame_nr = AF_PACKET_RX_FRAME_NR;
- vec_validate(tx_req, 0);
+ vec_validate (tx_req, 0);
tx_req->tp_block_size = AF_PACKET_TX_BLOCK_SIZE;
tx_req->tp_frame_size = AF_PACKET_TX_FRAME_SIZE;
tx_req->tp_block_nr = AF_PACKET_TX_BLOCK_NR;
tx_req->tp_frame_nr = AF_PACKET_TX_FRAME_NR;
- ret = create_packet_v2_sock(host_if_name, rx_req, tx_req, &fd, &ring);
+ ret = create_packet_v2_sock (host_if_name, rx_req, tx_req, &fd, &ring);
if (ret != 0)
goto error;
@@ -216,7 +226,7 @@ af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *
apif->next_rx_frame = 0;
{
- unix_file_t template = {0};
+ unix_file_t template = { 0 };
template.read_function = af_packet_fd_read_ready;
template.file_descriptor = fd;
template.private_data = if_index;
@@ -226,27 +236,27 @@ af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *
/*use configured or generate random MAC address */
if (hw_addr_set)
- clib_memcpy(hw_addr, hw_addr_set, 6);
+ clib_memcpy (hw_addr, hw_addr_set, 6);
else
{
- f64 now = vlib_time_now(vm);
+ f64 now = vlib_time_now (vm);
u32 rnd;
rnd = (u32) (now * 1e6);
rnd = random_u32 (&rnd);
- clib_memcpy (hw_addr+2, &rnd, sizeof(rnd));
+ clib_memcpy (hw_addr + 2, &rnd, sizeof (rnd));
hw_addr[0] = 2;
hw_addr[1] = 0xfe;
}
- error = ethernet_register_interface(vnm, af_packet_device_class.index,
- if_index, hw_addr, &apif->hw_if_index,
- af_packet_eth_flag_change);
+ error = ethernet_register_interface (vnm, af_packet_device_class.index,
+ if_index, hw_addr, &apif->hw_if_index,
+ af_packet_eth_flag_change);
if (error)
{
- memset(apif, 0, sizeof(*apif));
- pool_put(apm->interfaces, apif);
+ memset (apif, 0, sizeof (*apif));
+ pool_put (apm->interfaces, apif);
clib_error_report (error);
ret = VNET_API_ERROR_SYSCALL_ERROR_1;
goto error;
@@ -264,60 +274,63 @@ af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *
return 0;
error:
- vec_free(host_if_name);
- vec_free(rx_req);
- vec_free(tx_req);
+ vec_free (host_if_name);
+ vec_free (rx_req);
+ vec_free (tx_req);
return ret;
}
int
-af_packet_delete_if(vlib_main_t *vm, u8 *host_if_name)
+af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name)
{
- vnet_main_t *vnm = vnet_get_main();
+ vnet_main_t *vnm = vnet_get_main ();
af_packet_main_t *apm = &af_packet_main;
af_packet_if_t *apif;
uword *p;
uword if_index;
u32 ring_sz;
- p = mhash_get(&apm->if_index_by_host_if_name, host_if_name);
- if (p == NULL) {
- clib_warning("Host interface %s does not exist", host_if_name);
- return VNET_API_ERROR_SYSCALL_ERROR_1;
- }
- apif = pool_elt_at_index(apm->interfaces, p[0]);
+ p = mhash_get (&apm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ apif = pool_elt_at_index (apm->interfaces, p[0]);
if_index = apif - apm->interfaces;
/* bring down the interface */
- vnet_hw_interface_set_flags(vnm, apif->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, apif->hw_if_index, 0);
/* clean up */
- if (apif->unix_file_index != ~0) {
- unix_file_del(&unix_main, unix_main.file_pool + apif->unix_file_index);
- apif->unix_file_index = ~0;
- }
+ if (apif->unix_file_index != ~0)
+ {
+ unix_file_del (&unix_main, unix_main.file_pool + apif->unix_file_index);
+ apif->unix_file_index = ~0;
+ }
ring_sz = apif->rx_req->tp_block_size * apif->rx_req->tp_block_nr +
- apif->tx_req->tp_block_size * apif->tx_req->tp_block_nr;
- if (munmap(apif->rx_ring, ring_sz))
- clib_warning("Host interface %s could not free rx/tx ring", host_if_name);
+ apif->tx_req->tp_block_size * apif->tx_req->tp_block_nr;
+ if (munmap (apif->rx_ring, ring_sz))
+ clib_warning ("Host interface %s could not free rx/tx ring",
+ host_if_name);
apif->rx_ring = NULL;
apif->tx_ring = NULL;
- close(apif->fd);
+ close (apif->fd);
apif->fd = -1;
- vec_free(apif->rx_req);
+ vec_free (apif->rx_req);
apif->rx_req = NULL;
- vec_free(apif->tx_req);
+ vec_free (apif->tx_req);
apif->tx_req = NULL;
- vec_free(apif->host_if_name);
+ vec_free (apif->host_if_name);
apif->host_if_name = NULL;
- mhash_unset(&apm->if_index_by_host_if_name, host_if_name, &if_index);
+ mhash_unset (&apm->if_index_by_host_if_name, host_if_name, &if_index);
- ethernet_delete_interface(vnm, apif->hw_if_index);
+ ethernet_delete_interface (vnm, apif->hw_if_index);
- pool_put(apm->interfaces, apif);
+ pool_put (apm->interfaces, apif);
return 0;
}
@@ -325,7 +338,7 @@ af_packet_delete_if(vlib_main_t *vm, u8 *host_if_name)
static clib_error_t *
af_packet_init (vlib_main_t * vm)
{
- af_packet_main_t * apm = &af_packet_main;
+ af_packet_main_t *apm = &af_packet_main;
memset (apm, 0, sizeof (af_packet_main_t));
@@ -335,3 +348,11 @@ af_packet_init (vlib_main_t * vm)
}
VLIB_INIT_FUNCTION (af_packet_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/af_packet/af_packet.h b/vnet/vnet/devices/af_packet/af_packet.h
index 258700b2b75..1896ddf5dcb 100644
--- a/vnet/vnet/devices/af_packet/af_packet.h
+++ b/vnet/vnet/devices/af_packet/af_packet.h
@@ -17,14 +17,15 @@
*------------------------------------------------------------------
*/
-typedef struct {
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- u8 * host_if_name;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u8 *host_if_name;
int fd;
- struct tpacket_req * rx_req;
- struct tpacket_req * tx_req;
- u8 * rx_ring;
- u8 * tx_ring;
+ struct tpacket_req *rx_req;
+ struct tpacket_req *tx_req;
+ u8 *rx_ring;
+ u8 *tx_ring;
u32 hw_if_index;
u32 sw_if_index;
u32 unix_file_index;
@@ -36,15 +37,16 @@ typedef struct {
u8 is_admin_up;
} af_packet_if_t;
-typedef struct {
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- af_packet_if_t * interfaces;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ af_packet_if_t *interfaces;
/* bitmap of pending rx interfaces */
- uword * pending_input_bitmap;
+ uword *pending_input_bitmap;
/* rx buffer cache */
- u32 * rx_buffers;
+ u32 *rx_buffers;
/* hash of host interface names */
mhash_t if_index_by_host_if_name;
@@ -54,5 +56,14 @@ af_packet_main_t af_packet_main;
extern vnet_device_class_t af_packet_device_class;
extern vlib_node_registration_t af_packet_input_node;
-int af_packet_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, u32 *sw_if_index);
-int af_packet_delete_if(vlib_main_t * vm, u8 * host_if_name);
+int af_packet_create_if (vlib_main_t * vm, u8 * host_if_name,
+ u8 * hw_addr_set, u32 * sw_if_index);
+int af_packet_delete_if (vlib_main_t * vm, u8 * host_if_name);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/af_packet/cli.c b/vnet/vnet/devices/af_packet/cli.c
index 3153efe8baa..87ec5182c74 100644
--- a/vnet/vnet/devices/af_packet/cli.c
+++ b/vnet/vnet/devices/af_packet/cli.c
@@ -36,85 +36,94 @@ static clib_error_t *
af_packet_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
- u8 * host_if_name = NULL;
- u8 hwaddr [6];
- u8 * hw_addr_ptr = 0;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
u32 sw_if_index;
int r;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "name %s", &host_if_name))
;
- else if (unformat (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
hw_addr_ptr = hwaddr;
else
- return clib_error_return (0, "unknown input `%U'", format_unformat_error, input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
unformat_free (line_input);
if (host_if_name == NULL)
- return clib_error_return (0, "missing host interface name");
+ return clib_error_return (0, "missing host interface name");
- r = af_packet_create_if(vm, host_if_name, hw_addr_ptr, &sw_if_index);
+ r = af_packet_create_if (vm, host_if_name, hw_addr_ptr, &sw_if_index);
if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
- return clib_error_return(0, "%s (errno %d)", strerror (errno), errno);
+ return clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
if (r == VNET_API_ERROR_INVALID_INTERFACE)
- return clib_error_return(0, "Invalid interface name");
+ return clib_error_return (0, "Invalid interface name");
if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
- return clib_error_return(0, "Interface elready exists");
+ return clib_error_return (0, "Interface elready exists");
- vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
return 0;
}
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (af_packet_create_command, static) = {
.path = "create host-interface",
.short_help = "create host-interface name <interface name> [hw-addr <mac>]",
.function = af_packet_create_command_fn,
};
+/* *INDENT-ON* */
static clib_error_t *
af_packet_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
- u8 * host_if_name = NULL;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "name %s", &host_if_name))
- ;
+ ;
else
- return clib_error_return (0, "unknown input `%U'", format_unformat_error, input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
unformat_free (line_input);
if (host_if_name == NULL)
- return clib_error_return (0, "missing host interface name");
+ return clib_error_return (0, "missing host interface name");
- af_packet_delete_if(vm, host_if_name);
+ af_packet_delete_if (vm, host_if_name);
return 0;
}
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (af_packet_delete_command, static) = {
.path = "delete host-interface",
.short_help = "delete host-interface name <interface name>",
.function = af_packet_delete_command_fn,
};
+/* *INDENT-ON* */
clib_error_t *
af_packet_cli_init (vlib_main_t * vm)
@@ -123,3 +132,11 @@ af_packet_cli_init (vlib_main_t * vm)
}
VLIB_INIT_FUNCTION (af_packet_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/af_packet/device.c b/vnet/vnet/devices/af_packet/device.c
index f572632ce37..1a46caa7810 100644
--- a/vnet/vnet/devices/af_packet/device.c
+++ b/vnet/vnet/devices/af_packet/device.c
@@ -32,37 +32,41 @@ _(TXRING_EAGAIN, "tx sendto temporary failure") \
_(TXRING_FATAL, "tx sendto fatal failure") \
_(TXRING_OVERRUN, "tx ring overrun")
-typedef enum {
+typedef enum
+{
#define _(f,s) AF_PACKET_TX_ERROR_##f,
foreach_af_packet_tx_func_error
#undef _
- AF_PACKET_TX_N_ERROR,
+ AF_PACKET_TX_N_ERROR,
} af_packet_tx_func_error_t;
-static char * af_packet_tx_func_error_strings[] = {
+static char *af_packet_tx_func_error_strings[] = {
#define _(n,s) s,
- foreach_af_packet_tx_func_error
+ foreach_af_packet_tx_func_error
#undef _
};
-static u8 * format_af_packet_device_name (u8 * s, va_list * args)
+static u8 *
+format_af_packet_device_name (u8 * s, va_list * args)
{
u32 i = va_arg (*args, u32);
- af_packet_main_t * apm = &af_packet_main;
- af_packet_if_t * apif = pool_elt_at_index (apm->interfaces, i);
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, i);
s = format (s, "host-%s", apif->host_if_name);
return s;
}
-static u8 * format_af_packet_device (u8 * s, va_list * args)
+static u8 *
+format_af_packet_device (u8 * s, va_list * args)
{
s = format (s, "Linux PACKET socket interface");
return s;
}
-static u8 * format_af_packet_tx_trace (u8 * s, va_list * args)
+static u8 *
+format_af_packet_tx_trace (u8 * s, va_list * args)
{
s = format (s, "Unimplemented...");
return s;
@@ -70,36 +74,37 @@ static u8 * format_af_packet_tx_trace (u8 * s, va_list * args)
static uword
af_packet_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- af_packet_main_t * apm = &af_packet_main;
- u32 * buffers = vlib_frame_args (frame);
+ af_packet_main_t *apm = &af_packet_main;
+ u32 *buffers = vlib_frame_args (frame);
u32 n_left = frame->n_vectors;
u32 n_sent = 0;
- vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
- af_packet_if_t * apif = pool_elt_at_index (apm->interfaces, rd->dev_instance);
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, rd->dev_instance);
int block = 0;
u32 block_size = apif->tx_req->tp_block_size;
u32 frame_size = apif->tx_req->tp_frame_size;
u32 frame_num = apif->tx_req->tp_frame_nr;
- u8 * block_start = apif->tx_ring + block * block_size;
+ u8 *block_start = apif->tx_ring + block * block_size;
u32 tx_frame = apif->next_tx_frame;
- struct tpacket2_hdr * tph;
+ struct tpacket2_hdr *tph;
u32 frame_not_ready = 0;
- while(n_left > 0)
+ while (n_left > 0)
{
u32 len;
u32 offset = 0;
- vlib_buffer_t * b0;
+ vlib_buffer_t *b0;
n_left--;
u32 bi = buffers[0];
buffers++;
tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
- if (PREDICT_FALSE(tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
+ if (PREDICT_FALSE
+ (tph->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
{
frame_not_ready++;
goto next;
@@ -109,8 +114,9 @@ af_packet_interface_tx (vlib_main_t * vm,
{
b0 = vlib_get_buffer (vm, bi);
len = b0->current_length;
- clib_memcpy((u8 *) tph + TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + offset,
- vlib_buffer_get_current(b0), len);
+ clib_memcpy ((u8 *) tph +
+ TPACKET_ALIGN (sizeof (struct tpacket2_hdr)) + offset,
+ vlib_buffer_get_current (b0), len);
offset += len;
}
while ((bi = b0->next_buffer));
@@ -118,54 +124,54 @@ af_packet_interface_tx (vlib_main_t * vm,
tph->tp_len = tph->tp_snaplen = offset;
tph->tp_status = TP_STATUS_SEND_REQUEST;
n_sent++;
-next:
+ next:
/* check if we've exhausted the ring */
- if (PREDICT_FALSE(frame_not_ready + n_sent == frame_num))
- break;
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
+ break;
tx_frame = (tx_frame + 1) % frame_num;
}
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
- if (PREDICT_TRUE(n_sent))
+ if (PREDICT_TRUE (n_sent))
{
apif->next_tx_frame = tx_frame;
- if (PREDICT_FALSE(sendto(apif->fd, NULL, 0,
- MSG_DONTWAIT, NULL, 0) == -1))
- {
- /* Uh-oh, drop & move on, but count whether it was fatal or not.
- * Note that we have no reliable way to properly determine the
- * disposition of the packets we just enqueued for delivery.
- */
- vlib_error_count (vm, node->node_index,
- unix_error_is_fatal(errno) ?
- AF_PACKET_TX_ERROR_TXRING_FATAL :
- AF_PACKET_TX_ERROR_TXRING_EAGAIN,
- n_sent);
- }
+ if (PREDICT_FALSE (sendto (apif->fd, NULL, 0,
+ MSG_DONTWAIT, NULL, 0) == -1))
+ {
+ /* Uh-oh, drop & move on, but count whether it was fatal or not.
+ * Note that we have no reliable way to properly determine the
+ * disposition of the packets we just enqueued for delivery.
+ */
+ vlib_error_count (vm, node->node_index,
+ unix_error_is_fatal (errno) ?
+ AF_PACKET_TX_ERROR_TXRING_FATAL :
+ AF_PACKET_TX_ERROR_TXRING_EAGAIN, n_sent);
+ }
}
- if (PREDICT_FALSE(frame_not_ready))
- vlib_error_count (vm, node->node_index, AF_PACKET_TX_ERROR_FRAME_NOT_READY,
- frame_not_ready);
+ if (PREDICT_FALSE (frame_not_ready))
+ vlib_error_count (vm, node->node_index,
+ AF_PACKET_TX_ERROR_FRAME_NOT_READY, frame_not_ready);
- if (PREDICT_FALSE(frame_not_ready + n_sent == frame_num))
+ if (PREDICT_FALSE (frame_not_ready + n_sent == frame_num))
vlib_error_count (vm, node->node_index, AF_PACKET_TX_ERROR_TXRING_OVERRUN,
- n_left);
+ n_left);
vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
return frame->n_vectors;
}
static void
-af_packet_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
- u32 node_index)
+af_packet_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
{
- af_packet_main_t * apm = &af_packet_main;
+ af_packet_main_t *apm = &af_packet_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- af_packet_if_t * apif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
/* Shut off redirection */
if (node_index == ~0)
@@ -175,20 +181,24 @@ af_packet_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
}
apif->per_interface_next_index =
- vlib_node_add_next (vlib_get_main(), af_packet_input_node.index, node_index);
+ vlib_node_add_next (vlib_get_main (), af_packet_input_node.index,
+ node_index);
}
-static void af_packet_clear_hw_interface_counters (u32 instance)
+static void
+af_packet_clear_hw_interface_counters (u32 instance)
{
/* Nothing for now */
}
static clib_error_t *
-af_packet_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+af_packet_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
{
- af_packet_main_t * apm = &af_packet_main;
+ af_packet_main_t *apm = &af_packet_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- af_packet_if_t * apif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ af_packet_if_t *apif =
+ pool_elt_at_index (apm->interfaces, hw->dev_instance);
u32 hw_flags;
apif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
@@ -198,21 +208,21 @@ af_packet_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags
else
hw_flags = 0;
- vnet_hw_interface_set_flags(vnm, hw_if_index, hw_flags);
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
return 0;
}
static clib_error_t *
af_packet_subif_add_del_function (vnet_main_t * vnm,
- u32 hw_if_index,
- struct vnet_sw_interface_t * st,
- int is_add)
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
{
/* Nothing for now */
return 0;
}
+/* *INDENT-OFF* */
VNET_DEVICE_CLASS (af_packet_device_class) = {
.name = "af-packet",
.tx_function = af_packet_interface_tx,
@@ -230,3 +240,12 @@ VNET_DEVICE_CLASS (af_packet_device_class) = {
VLIB_DEVICE_TX_FUNCTION_MULTIARCH (af_packet_device_class,
af_packet_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/af_packet/node.c b/vnet/vnet/devices/af_packet/node.c
index c5daf64c72e..fa2b5ac1755 100644
--- a/vnet/vnet/devices/af_packet/node.c
+++ b/vnet/vnet/devices/af_packet/node.c
@@ -28,75 +28,80 @@
#define foreach_af_packet_input_error
-typedef enum {
+typedef enum
+{
#define _(f,s) AF_PACKET_INPUT_ERROR_##f,
foreach_af_packet_input_error
#undef _
- AF_PACKET_INPUT_N_ERROR,
+ AF_PACKET_INPUT_N_ERROR,
} af_packet_input_error_t;
-static char * af_packet_input_error_strings[] = {
+static char *af_packet_input_error_strings[] = {
#define _(n,s) s,
- foreach_af_packet_input_error
+ foreach_af_packet_input_error
#undef _
};
-enum {
+enum
+{
AF_PACKET_INPUT_NEXT_DROP,
AF_PACKET_INPUT_NEXT_ETHERNET_INPUT,
AF_PACKET_INPUT_N_NEXT,
};
-typedef struct {
+typedef struct
+{
u32 next_index;
u32 hw_if_index;
int block;
struct tpacket2_hdr tph;
} af_packet_input_trace_t;
-static u8 * format_af_packet_input_trace (u8 * s, va_list * args)
+static u8 *
+format_af_packet_input_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- af_packet_input_trace_t * t = va_arg (*args, af_packet_input_trace_t *);
+ af_packet_input_trace_t *t = va_arg (*args, af_packet_input_trace_t *);
uword indent = format_get_indent (s);
s = format (s, "af_packet: hw_if_index %d next-index %d",
t->hw_if_index, t->next_index);
- s = format (s, "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
- "\n%Usec 0x%x nsec 0x%x vlan %U"
+ s =
+ format (s,
+ "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
+ "\n%Usec 0x%x nsec 0x%x vlan %U"
#ifdef TP_STATUS_VLAN_TPID_VALID
- " vlan_tpid %u"
+ " vlan_tpid %u"
#endif
- ,
- format_white_space, indent + 2,
- format_white_space, indent + 4,
- t->tph.tp_status,
- t->tph.tp_len,
- t->tph.tp_snaplen,
- t->tph.tp_mac,
- t->tph.tp_net,
- format_white_space, indent + 4,
- t->tph.tp_sec,
- t->tph.tp_nsec,
- format_ethernet_vlan_tci, t->tph.tp_vlan_tci
+ ,
+ format_white_space, indent + 2,
+ format_white_space, indent + 4,
+ t->tph.tp_status,
+ t->tph.tp_len,
+ t->tph.tp_snaplen,
+ t->tph.tp_mac,
+ t->tph.tp_net,
+ format_white_space, indent + 4,
+ t->tph.tp_sec,
+ t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.tp_vlan_tci
#ifdef TP_STATUS_VLAN_TPID_VALID
- , t->tph.tp_vlan_tpid
+ , t->tph.tp_vlan_tpid
#endif
- );
+ );
return s;
}
always_inline void
-buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
{
- vlib_buffer_t * b = vlib_get_buffer (vm, bi);
- vlib_buffer_t * first_b = vlib_get_buffer (vm, first_bi);
- vlib_buffer_t * prev_b = vlib_get_buffer (vm, prev_bi);
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
/* update first buffer */
- first_b->total_length_not_including_first_buffer += b->current_length;
+ first_b->total_length_not_including_first_buffer += b->current_length;
/* update previous buffer */
prev_b->next_buffer = bi;
@@ -106,9 +111,9 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
b->next_buffer = 0;
#if DPDK > 0
- struct rte_mbuf * mbuf = rte_mbuf_from_vlib_buffer(b);
- struct rte_mbuf * first_mbuf = rte_mbuf_from_vlib_buffer(first_b);
- struct rte_mbuf * prev_mbuf = rte_mbuf_from_vlib_buffer(prev_b);
+ struct rte_mbuf *mbuf = rte_mbuf_from_vlib_buffer (b);
+ struct rte_mbuf *first_mbuf = rte_mbuf_from_vlib_buffer (first_b);
+ struct rte_mbuf *prev_mbuf = rte_mbuf_from_vlib_buffer (prev_b);
first_mbuf->nb_segs++;
prev_mbuf->next = mbuf;
mbuf->data_len = b->current_length;
@@ -118,11 +123,11 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
}
always_inline uword
-af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, u32 device_idx)
+af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, u32 device_idx)
{
- af_packet_main_t * apm = &af_packet_main;
- af_packet_if_t * apif = pool_elt_at_index(apm->interfaces, device_idx);
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, device_idx);
struct tpacket2_hdr *tph;
u32 next_index = AF_PACKET_INPUT_NEXT_ETHERNET_INPUT;
u32 block = 0;
@@ -130,24 +135,26 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 n_free_bufs;
u32 n_rx_packets = 0;
u32 n_rx_bytes = 0;
- u32 * to_next = 0;
+ u32 *to_next = 0;
u32 block_size = apif->rx_req->tp_block_size;
u32 frame_size = apif->rx_req->tp_frame_size;
u32 frame_num = apif->rx_req->tp_frame_nr;
- u8 * block_start = apif->rx_ring + block * block_size;
+ u8 *block_start = apif->rx_ring + block * block_size;
uword n_trace = vlib_get_trace_count (vm, node);
u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
- VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
if (apif->per_interface_next_index != ~0)
- next_index = apif->per_interface_next_index;
+ next_index = apif->per_interface_next_index;
n_free_bufs = vec_len (apm->rx_buffers);
- if (PREDICT_FALSE(n_free_bufs < VLIB_FRAME_SIZE))
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
{
- vec_validate(apm->rx_buffers, VLIB_FRAME_SIZE + n_free_bufs - 1);
- n_free_bufs += vlib_buffer_alloc(vm, &apm->rx_buffers[n_free_bufs], VLIB_FRAME_SIZE);
+ vec_validate (apm->rx_buffers, VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &apm->rx_buffers[n_free_bufs],
+ VLIB_FRAME_SIZE);
_vec_len (apm->rx_buffers) = n_free_bufs;
}
@@ -155,7 +162,7 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
{
- vlib_buffer_t * b0, * first_b0 = 0;
+ vlib_buffer_t *b0, *first_b0 = 0;
u32 next0 = next_index;
u32 n_left_to_next;
@@ -178,9 +185,11 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_free_bufs--;
/* copy data */
- u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
b0->current_data = 0;
- clib_memcpy (vlib_buffer_get_current (b0), (u8 *) tph + tph->tp_mac + offset, bytes_to_copy);
+ clib_memcpy (vlib_buffer_get_current (b0),
+ (u8 *) tph + tph->tp_mac + offset, bytes_to_copy);
/* fill buffer header */
b0->current_length = bytes_to_copy;
@@ -188,19 +197,19 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
if (offset == 0)
{
#if DPDK > 0
- struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(b0);
+ struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b0);
rte_pktmbuf_data_len (mb) = b0->current_length;
rte_pktmbuf_pkt_len (mb) = b0->current_length;
#endif
b0->total_length_not_including_first_buffer = 0;
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
first_bi0 = bi0;
- first_b0 = vlib_get_buffer(vm, first_bi0);
+ first_b0 = vlib_get_buffer (vm, first_bi0);
}
else
- buffer_add_to_chain(vm, bi0, first_bi0, prev_bi0);
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
offset += bytes_to_copy;
data_len -= bytes_to_copy;
@@ -212,22 +221,23 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_left_to_next--;
/* trace */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(first_b0);
- if (PREDICT_FALSE(n_trace > 0))
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
{
af_packet_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */ 0);
+ vlib_trace_buffer (vm, node, next0, first_b0, /* follow_chain */
+ 0);
vlib_set_trace_count (vm, node, --n_trace);
tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
tr->next_index = next0;
tr->hw_if_index = apif->hw_if_index;
- clib_memcpy(&tr->tph, tph, sizeof(struct tpacket2_hdr));
+ clib_memcpy (&tr->tph, tph, sizeof (struct tpacket2_hdr));
}
/* enque and take next packet */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
n_left_to_next, first_bi0, next0);
- /* next packet */
+ /* next packet */
tph->tp_status = TP_STATUS_KERNEL;
rx_frame = (rx_frame + 1) % frame_num;
tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
@@ -239,11 +249,9 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
apif->next_rx_frame = rx_frame;
vlib_increment_combined_counter
- (vnet_get_main()->interface_main.combined_sw_if_counters
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX,
- os_get_cpu_number(),
- apif->hw_if_index,
- n_rx_packets, n_rx_bytes);
+ os_get_cpu_number (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
return n_rx_packets;
}
@@ -255,17 +263,20 @@ af_packet_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
int i;
u32 n_rx_packets = 0;
- af_packet_main_t * apm = &af_packet_main;
+ af_packet_main_t *apm = &af_packet_main;
+ /* *INDENT-OFF* */
clib_bitmap_foreach (i, apm->pending_input_bitmap,
({
clib_bitmap_set (apm->pending_input_bitmap, i, 0);
n_rx_packets += af_packet_device_input_fn(vm, node, frame, i);
}));
+ /* *INDENT-ON* */
return n_rx_packets;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (af_packet_input_node) = {
.function = af_packet_input_fn,
.name = "af-packet-input",
@@ -283,4 +294,13 @@ VLIB_REGISTER_NODE (af_packet_input_node) = {
};
VLIB_NODE_FUNCTION_MULTIARCH (af_packet_input_node, af_packet_input_fn)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/netmap/cli.c b/vnet/vnet/devices/netmap/cli.c
index 5bf80e7f7c3..68695250506 100644
--- a/vnet/vnet/devices/netmap/cli.c
+++ b/vnet/vnet/devices/netmap/cli.c
@@ -27,26 +27,28 @@
static clib_error_t *
netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
- u8 * host_if_name = NULL;
- u8 hwaddr [6];
- u8 * hw_addr_ptr = 0;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
+ u8 hwaddr[6];
+ u8 *hw_addr_ptr = 0;
int r;
u8 is_pipe = 0;
u8 is_master = 0;
u32 sw_if_index = ~0;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "name %s", &host_if_name))
;
- else if (unformat (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
+ else
+ if (unformat
+ (line_input, "hw-addr %U", unformat_ethernet_address, hwaddr))
hw_addr_ptr = hwaddr;
else if (unformat (line_input, "pipe"))
is_pipe = 1;
@@ -55,68 +57,77 @@ netmap_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
else if (unformat (line_input, "slave"))
is_master = 0;
else
- return clib_error_return (0, "unknown input `%U'", format_unformat_error, input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
unformat_free (line_input);
if (host_if_name == NULL)
- return clib_error_return (0, "missing host interface name");
+ return clib_error_return (0, "missing host interface name");
- r = netmap_create_if(vm, host_if_name, hw_addr_ptr, is_pipe, is_master, &sw_if_index);
+ r =
+ netmap_create_if (vm, host_if_name, hw_addr_ptr, is_pipe, is_master,
+ &sw_if_index);
if (r == VNET_API_ERROR_SYSCALL_ERROR_1)
- return clib_error_return(0, "%s (errno %d)", strerror (errno), errno);
+ return clib_error_return (0, "%s (errno %d)", strerror (errno), errno);
if (r == VNET_API_ERROR_INVALID_INTERFACE)
- return clib_error_return(0, "Invalid interface name");
+ return clib_error_return (0, "Invalid interface name");
if (r == VNET_API_ERROR_SUBIF_ALREADY_EXISTS)
- return clib_error_return(0, "Interface already exists");
+ return clib_error_return (0, "Interface already exists");
- vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
return 0;
}
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (netmap_create_command, static) = {
.path = "create netmap",
.short_help = "create netmap name [<intf name>|valeXXX:YYY] "
"[hw-addr <mac>] [pipe] [master|slave]",
.function = netmap_create_command_fn,
};
+/* *INDENT-ON* */
static clib_error_t *
netmap_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
- u8 * host_if_name = NULL;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *host_if_name = NULL;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (line_input, "name %s", &host_if_name))
- ;
+ ;
else
- return clib_error_return (0, "unknown input `%U'", format_unformat_error, input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
unformat_free (line_input);
if (host_if_name == NULL)
- return clib_error_return (0, "missing host interface name");
+ return clib_error_return (0, "missing host interface name");
- netmap_delete_if(vm, host_if_name);
+ netmap_delete_if (vm, host_if_name);
return 0;
}
+/* *INDENT-OFF* */
VLIB_CLI_COMMAND (netmap_delete_command, static) = {
.path = "delete netmap",
.short_help = "delete netmap name <interface name>",
.function = netmap_delete_command_fn,
};
+/* *INDENT-ON* */
clib_error_t *
netmap_cli_init (vlib_main_t * vm)
@@ -125,3 +136,11 @@ netmap_cli_init (vlib_main_t * vm)
}
VLIB_INIT_FUNCTION (netmap_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/netmap/device.c b/vnet/vnet/devices/netmap/device.c
index 751caf72aa8..74535208aec 100644
--- a/vnet/vnet/devices/netmap/device.c
+++ b/vnet/vnet/devices/netmap/device.c
@@ -30,36 +30,39 @@
_(NO_FREE_SLOTS, "no free tx slots") \
_(PENDING_MSGS, "pending msgs in tx ring")
-typedef enum {
+typedef enum
+{
#define _(f,s) NETMAP_TX_ERROR_##f,
foreach_netmap_tx_func_error
#undef _
- NETMAP_TX_N_ERROR,
+ NETMAP_TX_N_ERROR,
} netmap_tx_func_error_t;
-static char * netmap_tx_func_error_strings[] = {
+static char *netmap_tx_func_error_strings[] = {
#define _(n,s) s,
- foreach_netmap_tx_func_error
+ foreach_netmap_tx_func_error
#undef _
};
-static u8 * format_netmap_device_name (u8 * s, va_list * args)
+static u8 *
+format_netmap_device_name (u8 * s, va_list * args)
{
u32 i = va_arg (*args, u32);
- netmap_main_t * apm = &netmap_main;
- netmap_if_t * nif = pool_elt_at_index (apm->interfaces, i);
+ netmap_main_t *apm = &netmap_main;
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, i);
s = format (s, "netmap-%s", nif->host_if_name);
return s;
}
-static u8 * format_netmap_device (u8 * s, va_list * args)
+static u8 *
+format_netmap_device (u8 * s, va_list * args)
{
u32 dev_instance = va_arg (*args, u32);
int verbose = va_arg (*args, int);
- netmap_main_t * nm = &netmap_main;
- netmap_if_t * nif = vec_elt_at_index (nm->interfaces, dev_instance);
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nif = vec_elt_at_index (nm->interfaces, dev_instance);
uword indent = format_get_indent (s);
s = format (s, "NETMAP interface");
@@ -78,13 +81,13 @@ static u8 * format_netmap_device (u8 * s, va_list * args)
format_white_space, indent + 2,
nif->req->nr_tx_slots,
nif->req->nr_rx_slots,
- nif->req->nr_tx_rings,
- nif->req->nr_rx_rings);
+ nif->req->nr_tx_rings, nif->req->nr_rx_rings);
}
return s;
}
-static u8 * format_netmap_tx_trace (u8 * s, va_list * args)
+static u8 *
+format_netmap_tx_trace (u8 * s, va_list * args)
{
s = format (s, "Unimplemented...");
return s;
@@ -92,95 +95,96 @@ static u8 * format_netmap_tx_trace (u8 * s, va_list * args)
static uword
netmap_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- netmap_main_t * nm = &netmap_main;
- u32 * buffers = vlib_frame_args (frame);
+ netmap_main_t *nm = &netmap_main;
+ u32 *buffers = vlib_frame_args (frame);
u32 n_left = frame->n_vectors;
f64 const time_constant = 1e3;
- vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
- netmap_if_t * nif = pool_elt_at_index (nm->interfaces, rd->dev_instance);
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance);
int cur_ring;
- if (PREDICT_FALSE(nif->lockp != 0))
+ if (PREDICT_FALSE (nif->lockp != 0))
{
while (__sync_lock_test_and_set (nif->lockp, 1))
- ;
+ ;
}
cur_ring = nif->first_tx_ring;
- while(n_left && cur_ring <= nif->last_tx_ring)
+ while (n_left && cur_ring <= nif->last_tx_ring)
{
- struct netmap_ring * ring = NETMAP_TXRING(nif->nifp, cur_ring);
- int n_free_slots = nm_ring_space(ring);
+ struct netmap_ring *ring = NETMAP_TXRING (nif->nifp, cur_ring);
+ int n_free_slots = nm_ring_space (ring);
uint cur = ring->cur;
- if (nm_tx_pending(ring))
- {
- if (ioctl(nif->fd, NIOCTXSYNC, NULL) < 0)
- clib_unix_warning ("NIOCTXSYNC");
- clib_cpu_time_wait(time_constant);
-
- if (nm_tx_pending(ring) && !n_free_slots)
- {
- cur_ring++;
- continue;
- }
- }
+ if (nm_tx_pending (ring))
+ {
+ if (ioctl (nif->fd, NIOCTXSYNC, NULL) < 0)
+ clib_unix_warning ("NIOCTXSYNC");
+ clib_cpu_time_wait (time_constant);
+
+ if (nm_tx_pending (ring) && !n_free_slots)
+ {
+ cur_ring++;
+ continue;
+ }
+ }
while (n_left && n_free_slots)
{
- vlib_buffer_t * b0 = 0;
+ vlib_buffer_t *b0 = 0;
u32 bi = buffers[0];
u32 len;
u32 offset = 0;
buffers++;
- struct netmap_slot * slot = &ring->slot[cur];
+ struct netmap_slot *slot = &ring->slot[cur];
do
{
b0 = vlib_get_buffer (vm, bi);
len = b0->current_length;
/* memcpy */
- clib_memcpy ((u8 *) NETMAP_BUF(ring, slot->buf_idx) + offset,
- vlib_buffer_get_current(b0), len);
+ clib_memcpy ((u8 *) NETMAP_BUF (ring, slot->buf_idx) + offset,
+ vlib_buffer_get_current (b0), len);
offset += len;
}
- while ((bi = b0->next_buffer));
+ while ((bi = b0->next_buffer));
slot->len = offset;
cur = (cur + 1) % ring->num_slots;
n_free_slots--;
- n_left--;
+ n_left--;
}
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
ring->head = ring->cur = cur;
}
if (n_left < frame->n_vectors)
- ioctl(nif->fd, NIOCTXSYNC, NULL);
+ ioctl (nif->fd, NIOCTXSYNC, NULL);
- if (PREDICT_FALSE(nif->lockp != 0))
- *nif->lockp = 0;
+ if (PREDICT_FALSE (nif->lockp != 0))
+ *nif->lockp = 0;
if (n_left)
vlib_error_count (vm, node->node_index,
- (n_left == frame->n_vectors ? NETMAP_TX_ERROR_PENDING_MSGS : NETMAP_TX_ERROR_NO_FREE_SLOTS), n_left);
+ (n_left ==
+ frame->n_vectors ? NETMAP_TX_ERROR_PENDING_MSGS :
+ NETMAP_TX_ERROR_NO_FREE_SLOTS), n_left);
vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
return frame->n_vectors;
}
static void
-netmap_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
- u32 node_index)
+netmap_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
{
- netmap_main_t * apm = &netmap_main;
+ netmap_main_t *apm = &netmap_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- netmap_if_t * nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
/* Shut off redirection */
if (node_index == ~0)
@@ -190,10 +194,12 @@ netmap_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
}
nif->per_interface_next_index =
- vlib_node_add_next (vlib_get_main(), netmap_input_node.index, node_index);
+ vlib_node_add_next (vlib_get_main (), netmap_input_node.index,
+ node_index);
}
-static void netmap_clear_hw_interface_counters (u32 instance)
+static void
+netmap_clear_hw_interface_counters (u32 instance)
{
/* Nothing for now */
}
@@ -201,9 +207,9 @@ static void netmap_clear_hw_interface_counters (u32 instance)
static clib_error_t *
netmap_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
{
- netmap_main_t * apm = &netmap_main;
+ netmap_main_t *apm = &netmap_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- netmap_if_t * nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
+ netmap_if_t *nif = pool_elt_at_index (apm->interfaces, hw->dev_instance);
u32 hw_flags;
nif->is_admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
@@ -213,21 +219,21 @@ netmap_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
else
hw_flags = 0;
- vnet_hw_interface_set_flags(vnm, hw_if_index, hw_flags);
+ vnet_hw_interface_set_flags (vnm, hw_if_index, hw_flags);
return 0;
}
static clib_error_t *
netmap_subif_add_del_function (vnet_main_t * vnm,
- u32 hw_if_index,
- struct vnet_sw_interface_t * st,
- int is_add)
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
{
/* Nothing for now */
return 0;
}
+/* *INDENT-OFF* */
VNET_DEVICE_CLASS (netmap_device_class) = {
.name = "netmap",
.tx_function = netmap_interface_tx,
@@ -245,3 +251,12 @@ VNET_DEVICE_CLASS (netmap_device_class) = {
VLIB_DEVICE_TX_FUNCTION_MULTIARCH(netmap_device_class,
netmap_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/netmap/netmap.c b/vnet/vnet/devices/netmap/netmap.c
index 7f1cadd7951..fe6f1eaf74a 100644
--- a/vnet/vnet/devices/netmap/netmap.c
+++ b/vnet/vnet/devices/netmap/netmap.c
@@ -28,19 +28,22 @@
#include <vnet/devices/netmap/netmap.h>
static u32
-netmap_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
+netmap_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi,
+ u32 flags)
{
/* nothing for now */
return 0;
}
-static clib_error_t * netmap_fd_read_ready (unix_file_t * uf)
+static clib_error_t *
+netmap_fd_read_ready (unix_file_t * uf)
{
- vlib_main_t * vm = vlib_get_main();
- netmap_main_t * nm = &netmap_main;
+ vlib_main_t *vm = vlib_get_main ();
+ netmap_main_t *nm = &netmap_main;
u32 idx = uf->private_data;
- nm->pending_input_bitmap = clib_bitmap_set (nm->pending_input_bitmap, idx, 1);
+ nm->pending_input_bitmap =
+ clib_bitmap_set (nm->pending_input_bitmap, idx, 1);
/* Schedule the rx node */
vlib_node_set_interrupt_pending (vm, netmap_input_node.index);
@@ -49,89 +52,95 @@ static clib_error_t * netmap_fd_read_ready (unix_file_t * uf)
}
static void
-close_netmap_if(netmap_main_t * nm, netmap_if_t * nif)
+close_netmap_if (netmap_main_t * nm, netmap_if_t * nif)
{
- if (nif->unix_file_index != ~0) {
- unix_file_del(&unix_main, unix_main.file_pool + nif->unix_file_index);
- nif->unix_file_index = ~0;
- }
+ if (nif->unix_file_index != ~0)
+ {
+ unix_file_del (&unix_main, unix_main.file_pool + nif->unix_file_index);
+ nif->unix_file_index = ~0;
+ }
if (nif->fd > -1)
- close(nif->fd);
+ close (nif->fd);
if (nif->mem_region)
{
- netmap_mem_region_t * reg = &nm->mem_regions[nif->mem_region];
+ netmap_mem_region_t *reg = &nm->mem_regions[nif->mem_region];
if (--reg->refcnt == 0)
{
- munmap(reg->mem, reg->region_size);
+ munmap (reg->mem, reg->region_size);
reg->region_size = 0;
}
}
- mhash_unset(&nm->if_index_by_host_if_name, nif->host_if_name, &nif->if_index);
- vec_free(nif->host_if_name);
- vec_free(nif->req);
+ mhash_unset (&nm->if_index_by_host_if_name, nif->host_if_name,
+ &nif->if_index);
+ vec_free (nif->host_if_name);
+ vec_free (nif->req);
- memset(nif, 0, sizeof(*nif));
- pool_put(nm->interfaces, nif);
+ memset (nif, 0, sizeof (*nif));
+ pool_put (nm->interfaces, nif);
}
int
-netmap_worker_thread_enable()
+netmap_worker_thread_enable ()
{
/* if worker threads are enabled, switch to polling mode */
- foreach_vlib_main (
- ({
- vlib_node_set_state(this_vlib_main, netmap_input_node.index, VLIB_NODE_STATE_POLLING);
- }));
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }));
return 0;
}
int
-netmap_worker_thread_disable()
+netmap_worker_thread_disable ()
{
- foreach_vlib_main (
- ({
- vlib_node_set_state(this_vlib_main, netmap_input_node.index, VLIB_NODE_STATE_INTERRUPT);
- }));
+ foreach_vlib_main ((
+ {
+ vlib_node_set_state (this_vlib_main,
+ netmap_input_node.index,
+ VLIB_NODE_STATE_INTERRUPT);
+ }));
return 0;
}
int
-netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
- u8 is_pipe, u8 is_master, u32 *sw_if_index)
+netmap_create_if (vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index)
{
- netmap_main_t * nm = &netmap_main;
+ netmap_main_t *nm = &netmap_main;
int ret = 0;
- netmap_if_t * nif = 0;
+ netmap_if_t *nif = 0;
u8 hw_addr[6];
- clib_error_t * error = 0;
- vnet_sw_interface_t * sw;
- vnet_main_t *vnm = vnet_get_main();
- uword * p;
- struct nmreq * req = 0;
- netmap_mem_region_t * reg;
- vlib_thread_main_t * tm = vlib_get_thread_main();
+ clib_error_t *error = 0;
+ vnet_sw_interface_t *sw;
+ vnet_main_t *vnm = vnet_get_main ();
+ uword *p;
+ struct nmreq *req = 0;
+ netmap_mem_region_t *reg;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
int fd;
p = mhash_get (&nm->if_index_by_host_if_name, if_name);
if (p)
- return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
- fd = open("/dev/netmap", O_RDWR);
+ fd = open ("/dev/netmap", O_RDWR);
if (fd < 0)
- return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
+ return VNET_API_ERROR_SUBIF_ALREADY_EXISTS;
pool_get (nm->interfaces, nif);
nif->if_index = nif - nm->interfaces;
nif->fd = fd;
nif->unix_file_index = ~0;
- vec_validate(req, 0);
+ vec_validate (req, 0);
nif->req = req;
req->nr_version = NETMAP_API;
req->nr_flags = NR_REG_ALL_NIC;
@@ -142,10 +151,10 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
req->nr_flags = NR_REG_ALL_NIC;
req->nr_flags |= NR_ACCEPT_VNET_HDR;
- snprintf(req->nr_name, IFNAMSIZ, "%s", if_name);
- req->nr_name[IFNAMSIZ-1] = 0;
+ snprintf (req->nr_name, IFNAMSIZ, "%s", if_name);
+ req->nr_name[IFNAMSIZ - 1] = 0;
- if (ioctl(nif->fd, NIOCREGIF, req))
+ if (ioctl (nif->fd, NIOCREGIF, req))
{
ret = VNET_API_ERROR_NOT_CONNECTED;
goto error;
@@ -156,9 +165,9 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
reg = &nm->mem_regions[nif->mem_region];
if (reg->region_size == 0)
{
- reg->mem = mmap(NULL, req->nr_memsize, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, 0);
- clib_warning("mem %p", reg->mem);
+ reg->mem = mmap (NULL, req->nr_memsize, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ clib_warning ("mem %p", reg->mem);
if (reg->mem == MAP_FAILED)
{
ret = VNET_API_ERROR_NOT_CONNECTED;
@@ -168,7 +177,7 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
}
reg->refcnt++;
- nif->nifp = NETMAP_IF(reg->mem, req->nr_offset);
+ nif->nifp = NETMAP_IF (reg->mem, req->nr_offset);
nif->first_rx_ring = 0;
nif->last_rx_ring = 0;
nif->first_tx_ring = 0;
@@ -177,14 +186,14 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
nif->per_interface_next_index = ~0;
if (tm->n_vlib_mains > 1)
- {
- nif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) nif->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ {
+ nif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) nif->lockp, 0, CLIB_CACHE_LINE_BYTES);
+ }
{
- unix_file_t template = {0};
+ unix_file_t template = { 0 };
template.read_function = netmap_fd_read_ready;
template.file_descriptor = nif->fd;
template.private_data = nif->if_index;
@@ -193,22 +202,23 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
/*use configured or generate random MAC address */
if (hw_addr_set)
- memcpy(hw_addr, hw_addr_set, 6);
+ memcpy (hw_addr, hw_addr_set, 6);
else
{
- f64 now = vlib_time_now(vm);
+ f64 now = vlib_time_now (vm);
u32 rnd;
rnd = (u32) (now * 1e6);
rnd = random_u32 (&rnd);
- memcpy (hw_addr+2, &rnd, sizeof(rnd));
+ memcpy (hw_addr + 2, &rnd, sizeof (rnd));
hw_addr[0] = 2;
hw_addr[1] = 0xfe;
}
- error = ethernet_register_interface(vnm, netmap_device_class.index,
- nif->if_index, hw_addr, &nif->hw_if_index,
- netmap_eth_flag_change);
+ error = ethernet_register_interface (vnm, netmap_device_class.index,
+ nif->if_index, hw_addr,
+ &nif->hw_if_index,
+ netmap_eth_flag_change);
if (error)
{
@@ -225,41 +235,42 @@ netmap_create_if(vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set,
if (sw_if_index)
*sw_if_index = nif->sw_if_index;
- if (tm->n_vlib_mains > 1 && pool_elts(nm->interfaces) == 1)
- netmap_worker_thread_enable();
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 1)
+ netmap_worker_thread_enable ();
return 0;
error:
- close_netmap_if(nm, nif);
+ close_netmap_if (nm, nif);
return ret;
}
int
-netmap_delete_if(vlib_main_t *vm, u8 *host_if_name)
+netmap_delete_if (vlib_main_t * vm, u8 * host_if_name)
{
- vnet_main_t *vnm = vnet_get_main();
+ vnet_main_t *vnm = vnet_get_main ();
netmap_main_t *nm = &netmap_main;
netmap_if_t *nif;
uword *p;
- vlib_thread_main_t * tm = vlib_get_thread_main();
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
- p = mhash_get(&nm->if_index_by_host_if_name, host_if_name);
- if (p == NULL) {
- clib_warning("Host interface %s does not exist", host_if_name);
- return VNET_API_ERROR_SYSCALL_ERROR_1;
- }
- nif = pool_elt_at_index(nm->interfaces, p[0]);
+ p = mhash_get (&nm->if_index_by_host_if_name, host_if_name);
+ if (p == NULL)
+ {
+ clib_warning ("Host interface %s does not exist", host_if_name);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ nif = pool_elt_at_index (nm->interfaces, p[0]);
/* bring down the interface */
- vnet_hw_interface_set_flags(vnm, nif->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, nif->hw_if_index, 0);
- ethernet_delete_interface(vnm, nif->hw_if_index);
+ ethernet_delete_interface (vnm, nif->hw_if_index);
- close_netmap_if(nm, nif);
+ close_netmap_if (nm, nif);
- if (tm->n_vlib_mains > 1 && pool_elts(nm->interfaces) == 0)
- netmap_worker_thread_disable();
+ if (tm->n_vlib_mains > 1 && pool_elts (nm->interfaces) == 0)
+ netmap_worker_thread_disable ();
return 0;
}
@@ -267,10 +278,10 @@ netmap_delete_if(vlib_main_t *vm, u8 *host_if_name)
static clib_error_t *
netmap_init (vlib_main_t * vm)
{
- netmap_main_t * nm = &netmap_main;
- vlib_thread_main_t * tm = vlib_get_thread_main();
- vlib_thread_registration_t * tr;
- uword * p;
+ netmap_main_t *nm = &netmap_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_thread_registration_t *tr;
+ uword *p;
memset (nm, 0, sizeof (netmap_main_t));
@@ -290,9 +301,17 @@ netmap_init (vlib_main_t * vm)
mhash_init_vec_string (&nm->if_index_by_host_if_name, sizeof (uword));
vec_validate_aligned (nm->rx_buffers, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
+ CLIB_CACHE_LINE_BYTES);
return 0;
}
VLIB_INIT_FUNCTION (netmap_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/netmap/netmap.h b/vnet/vnet/devices/netmap/netmap.h
index d29057febcf..39a94043c3c 100644
--- a/vnet/vnet/devices/netmap/netmap.h
+++ b/vnet/vnet/devices/netmap/netmap.h
@@ -40,10 +40,11 @@
* SUCH DAMAGE.
*/
-typedef struct {
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- volatile u32 * lockp;
- u8 * host_if_name;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *lockp;
+ u8 *host_if_name;
uword if_index;
u32 hw_if_index;
u32 sw_if_index;
@@ -53,10 +54,10 @@ typedef struct {
u8 is_admin_up;
/* netmap */
- struct nmreq * req;
+ struct nmreq *req;
u16 mem_region;
int fd;
- struct netmap_if * nifp;
+ struct netmap_if *nifp;
u16 first_tx_ring;
u16 last_tx_ring;
u16 first_rx_ring;
@@ -64,27 +65,29 @@ typedef struct {
} netmap_if_t;
-typedef struct {
- char * mem;
+typedef struct
+{
+ char *mem;
u32 region_size;
int refcnt;
} netmap_mem_region_t;
-typedef struct {
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- netmap_if_t * interfaces;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ netmap_if_t *interfaces;
/* bitmap of pending rx interfaces */
- uword * pending_input_bitmap;
+ uword *pending_input_bitmap;
/* rx buffer cache */
- u32 ** rx_buffers;
+ u32 **rx_buffers;
/* hash of host interface names */
mhash_t if_index_by_host_if_name;
/* vector of memory regions */
- netmap_mem_region_t * mem_regions;
+ netmap_mem_region_t *mem_regions;
/* first cpu index */
u32 input_cpu_first_index;
@@ -97,9 +100,9 @@ netmap_main_t netmap_main;
extern vnet_device_class_t netmap_device_class;
extern vlib_node_registration_t netmap_input_node;
-int netmap_create_if(vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
- u8 is_pipe, u8 is_master, u32 *sw_if_index);
-int netmap_delete_if(vlib_main_t * vm, u8 * host_if_name);
+int netmap_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set,
+ u8 is_pipe, u8 is_master, u32 * sw_if_index);
+int netmap_delete_if (vlib_main_t * vm, u8 * host_if_name);
/* Macros and helper functions from sys/net/netmap_user.h */
@@ -125,9 +128,9 @@ int netmap_delete_if(vlib_main_t * vm, u8 * host_if_name);
(ring)->nr_buf_size )
static inline uint32_t
-nm_ring_next(struct netmap_ring *ring, uint32_t i)
+nm_ring_next (struct netmap_ring *ring, uint32_t i)
{
- return ( PREDICT_FALSE(i + 1 == ring->num_slots) ? 0 : i + 1);
+ return (PREDICT_FALSE (i + 1 == ring->num_slots) ? 0 : i + 1);
}
@@ -136,18 +139,26 @@ nm_ring_next(struct netmap_ring *ring, uint32_t i)
* When everything is complete ring->head = ring->tail + 1 (modulo ring size)
*/
static inline int
-nm_tx_pending(struct netmap_ring *ring)
+nm_tx_pending (struct netmap_ring *ring)
{
- return nm_ring_next(ring, ring->tail) != ring->head;
+ return nm_ring_next (ring, ring->tail) != ring->head;
}
static inline uint32_t
-nm_ring_space(struct netmap_ring *ring)
+nm_ring_space (struct netmap_ring *ring)
{
- int ret = ring->tail - ring->cur;
- if (ret < 0)
- ret += ring->num_slots;
- return ret;
+ int ret = ring->tail - ring->cur;
+ if (ret < 0)
+ ret += ring->num_slots;
+ return ret;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/netmap/node.c b/vnet/vnet/devices/netmap/node.c
index 4a79fe0f979..559db669aae 100644
--- a/vnet/vnet/devices/netmap/node.c
+++ b/vnet/vnet/devices/netmap/node.c
@@ -28,36 +28,40 @@
#define foreach_netmap_input_error
-typedef enum {
+typedef enum
+{
#define _(f,s) NETMAP_INPUT_ERROR_##f,
foreach_netmap_input_error
#undef _
- NETMAP_INPUT_N_ERROR,
+ NETMAP_INPUT_N_ERROR,
} netmap_input_error_t;
-static char * netmap_input_error_strings[] = {
+static char *netmap_input_error_strings[] = {
#define _(n,s) s,
- foreach_netmap_input_error
+ foreach_netmap_input_error
#undef _
};
-enum {
+enum
+{
NETMAP_INPUT_NEXT_DROP,
NETMAP_INPUT_NEXT_ETHERNET_INPUT,
NETMAP_INPUT_N_NEXT,
};
-typedef struct {
+typedef struct
+{
u32 next_index;
u32 hw_if_index;
struct netmap_slot slot;
} netmap_input_trace_t;
-static u8 * format_netmap_input_trace (u8 * s, va_list * args)
+static u8 *
+format_netmap_input_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- netmap_input_trace_t * t = va_arg (*args, netmap_input_trace_t *);
+ netmap_input_trace_t *t = va_arg (*args, netmap_input_trace_t *);
uword indent = format_get_indent (s);
s = format (s, "netmap: hw_if_index %d next-index %d",
@@ -69,14 +73,14 @@ static u8 * format_netmap_input_trace (u8 * s, va_list * args)
}
always_inline void
-buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
+buffer_add_to_chain (vlib_main_t * vm, u32 bi, u32 first_bi, u32 prev_bi)
{
- vlib_buffer_t * b = vlib_get_buffer (vm, bi);
- vlib_buffer_t * first_b = vlib_get_buffer (vm, first_bi);
- vlib_buffer_t * prev_b = vlib_get_buffer (vm, prev_bi);
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
+ vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
/* update first buffer */
- first_b->total_length_not_including_first_buffer += b->current_length;
+ first_b->total_length_not_including_first_buffer += b->current_length;
/* update previous buffer */
prev_b->next_buffer = bi;
@@ -86,9 +90,9 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
b->next_buffer = 0;
#if DPDK > 0
- struct rte_mbuf * mbuf = rte_mbuf_from_vlib_buffer(b);
- struct rte_mbuf * first_mbuf = rte_mbuf_from_vlib_buffer(first_b);
- struct rte_mbuf * prev_mbuf = rte_mbuf_from_vlib_buffer(prev_b);
+ struct rte_mbuf *mbuf = rte_mbuf_from_vlib_buffer (b);
+ struct rte_mbuf *first_mbuf = rte_mbuf_from_vlib_buffer (first_b);
+ struct rte_mbuf *prev_mbuf = rte_mbuf_from_vlib_buffer (prev_b);
first_mbuf->nb_segs++;
prev_mbuf->next = mbuf;
mbuf->data_len = b->current_length;
@@ -98,30 +102,33 @@ buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
}
always_inline uword
-netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, netmap_if_t * nif)
+netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, netmap_if_t * nif)
{
u32 next_index = NETMAP_INPUT_NEXT_ETHERNET_INPUT;
uword n_trace = vlib_get_trace_count (vm, node);
- netmap_main_t * nm = &netmap_main;
+ netmap_main_t *nm = &netmap_main;
u32 n_rx_packets = 0;
u32 n_rx_bytes = 0;
- u32 * to_next = 0;
+ u32 *to_next = 0;
u32 n_free_bufs;
- struct netmap_ring * ring;
+ struct netmap_ring *ring;
int cur_ring;
- u32 cpu_index = os_get_cpu_number();
+ u32 cpu_index = os_get_cpu_number ();
u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
- VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
if (nif->per_interface_next_index != ~0)
- next_index = nif->per_interface_next_index;
+ next_index = nif->per_interface_next_index;
n_free_bufs = vec_len (nm->rx_buffers[cpu_index]);
- if (PREDICT_FALSE(n_free_bufs < VLIB_FRAME_SIZE))
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
{
- vec_validate(nm->rx_buffers[cpu_index], VLIB_FRAME_SIZE + n_free_bufs - 1);
- n_free_bufs += vlib_buffer_alloc(vm, &nm->rx_buffers[cpu_index][n_free_bufs], VLIB_FRAME_SIZE);
+ vec_validate (nm->rx_buffers[cpu_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs +=
+ vlib_buffer_alloc (vm, &nm->rx_buffers[cpu_index][n_free_bufs],
+ VLIB_FRAME_SIZE);
_vec_len (nm->rx_buffers[cpu_index]) = n_free_bufs;
}
@@ -130,8 +137,8 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
{
int r = 0;
u32 cur_slot_index;
- ring = NETMAP_RXRING(nif->nifp, cur_ring);
- r = nm_ring_space(ring);
+ ring = NETMAP_RXRING (nif->nifp, cur_ring);
+ r = nm_ring_space (ring);
if (!r)
{
@@ -145,30 +152,33 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
cur_slot_index = ring->cur;
while (r)
{
- u32 n_left_to_next;
- u32 next0 = next_index;
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ u32 n_left_to_next;
+ u32 next0 = next_index;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (r && n_left_to_next)
{
- vlib_buffer_t * b0, * first_b0 = 0;
+ vlib_buffer_t *b0, *first_b0 = 0;
u32 offset = 0;
u32 bi0 = 0, first_bi0 = 0, prev_bi0;
u32 next_slot_index = (cur_slot_index + 1) % ring->num_slots;
u32 next2_slot_index = (cur_slot_index + 2) % ring->num_slots;
- struct netmap_slot * slot = &ring->slot[cur_slot_index];
+ struct netmap_slot *slot = &ring->slot[cur_slot_index];
u32 data_len = slot->len;
/* prefetch 2 slots in advance */
- CLIB_PREFETCH (&ring->slot[next2_slot_index], CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&ring->slot[next2_slot_index],
+ CLIB_CACHE_LINE_BYTES, LOAD);
/* prefetch start of next packet */
- CLIB_PREFETCH (NETMAP_BUF(ring, ring->slot[next_slot_index].buf_idx),
+ CLIB_PREFETCH (NETMAP_BUF
+ (ring, ring->slot[next_slot_index].buf_idx),
CLIB_CACHE_LINE_BYTES, LOAD);
while (data_len && n_free_bufs)
{
/* grab free buffer */
- u32 last_empty_buffer = vec_len (nm->rx_buffers[cpu_index]) - 1;
+ u32 last_empty_buffer =
+ vec_len (nm->rx_buffers[cpu_index]) - 1;
prev_bi0 = bi0;
bi0 = nm->rx_buffers[cpu_index][last_empty_buffer];
b0 = vlib_get_buffer (vm, bi0);
@@ -176,11 +186,13 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
n_free_bufs--;
/* copy data */
- u32 bytes_to_copy = data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
b0->current_data = 0;
clib_memcpy (vlib_buffer_get_current (b0),
- (u8 *) NETMAP_BUF(ring, slot->buf_idx) + offset,
- bytes_to_copy);
+ (u8 *) NETMAP_BUF (ring,
+ slot->buf_idx) + offset,
+ bytes_to_copy);
/* fill buffer header */
b0->current_length = bytes_to_copy;
@@ -188,43 +200,45 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
if (offset == 0)
{
#if DPDK > 0
- struct rte_mbuf * mb = rte_mbuf_from_vlib_buffer(b0);
+ struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b0);
rte_pktmbuf_data_len (mb) = b0->current_length;
rte_pktmbuf_pkt_len (mb) = b0->current_length;
#endif
b0->total_length_not_including_first_buffer = 0;
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = nif->sw_if_index;
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ nif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
first_bi0 = bi0;
- first_b0 = vlib_get_buffer(vm, first_bi0);
+ first_b0 = vlib_get_buffer (vm, first_bi0);
}
else
- buffer_add_to_chain(vm, bi0, first_bi0, prev_bi0);
+ buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
offset += bytes_to_copy;
data_len -= bytes_to_copy;
}
/* trace */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(first_b0);
- if (PREDICT_FALSE(n_trace > 0))
- {
- if (PREDICT_TRUE(first_b0 != 0))
- {
- netmap_input_trace_t *tr;
- vlib_trace_buffer (vm, node, next0, first_b0,
- /* follow_chain */ 0);
- vlib_set_trace_count (vm, node, --n_trace);
- tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
- tr->next_index = next0;
- tr->hw_if_index = nif->hw_if_index;
- memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
- }
- }
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (first_b0);
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ if (PREDICT_TRUE (first_b0 != 0))
+ {
+ netmap_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, first_b0,
+ /* follow_chain */ 0);
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = nif->hw_if_index;
+ memcpy (&tr->slot, slot, sizeof (struct netmap_slot));
+ }
+ }
/* enque and take next packet */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, first_bi0, next0);
+ n_left_to_next, first_bi0,
+ next0);
/* next packet */
n_rx_packets++;
@@ -236,46 +250,46 @@ netmap_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
r--;
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- ring->head = ring->cur = cur_slot_index;
- cur_ring++;
+ ring->head = ring->cur = cur_slot_index;
+ cur_ring++;
}
if (n_rx_packets)
- ioctl(nif->fd, NIOCRXSYNC, NULL);
+ ioctl (nif->fd, NIOCRXSYNC, NULL);
vlib_increment_combined_counter
- (vnet_get_main()->interface_main.combined_sw_if_counters
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ VNET_INTERFACE_COUNTER_RX,
- os_get_cpu_number(),
- nif->hw_if_index,
- n_rx_packets, n_rx_bytes);
+ os_get_cpu_number (), nif->hw_if_index, n_rx_packets, n_rx_bytes);
return n_rx_packets;
}
static uword
netmap_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ vlib_frame_t * frame)
{
int i;
u32 n_rx_packets = 0;
- u32 cpu_index = os_get_cpu_number();
- netmap_main_t * nm = &netmap_main;
- netmap_if_t * nmi;
+ u32 cpu_index = os_get_cpu_number ();
+ netmap_main_t *nm = &netmap_main;
+ netmap_if_t *nmi;
- for(i = 0; i < vec_len(nm->interfaces); i++ )
+ for (i = 0; i < vec_len (nm->interfaces); i++)
{
- nmi = vec_elt_at_index(nm->interfaces, i);
+ nmi = vec_elt_at_index (nm->interfaces, i);
if (nmi->is_admin_up &&
- (i % nm->input_cpu_count) == (cpu_index - nm->input_cpu_first_index))
- n_rx_packets += netmap_device_input_fn(vm, node, frame, nmi);
+ (i % nm->input_cpu_count) ==
+ (cpu_index - nm->input_cpu_first_index))
+ n_rx_packets += netmap_device_input_fn (vm, node, frame, nmi);
}
return n_rx_packets;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (netmap_input_node) = {
.function = netmap_input_fn,
.name = "netmap-input",
@@ -294,4 +308,13 @@ VLIB_REGISTER_NODE (netmap_input_node) = {
};
VLIB_NODE_FUNCTION_MULTIARCH (netmap_input_node, netmap_input_fn)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/nic/ixge.c b/vnet/vnet/devices/nic/ixge.c
index b98e7d9c3af..849dd084f0a 100644
--- a/vnet/vnet/devices/nic/ixge.c
+++ b/vnet/vnet/devices/nic/ixge.c
@@ -54,40 +54,45 @@ ixge_main_t ixge_main;
static vlib_node_registration_t ixge_input_node;
static vlib_node_registration_t ixge_process_node;
-static void ixge_semaphore_get (ixge_device_t * xd)
+static void
+ixge_semaphore_get (ixge_device_t * xd)
{
- ixge_main_t * xm = &ixge_main;
- vlib_main_t * vm = xm->vlib_main;
- ixge_regs_t * r = xd->regs;
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
u32 i;
i = 0;
- while (! (r->software_semaphore & (1 << 0)))
+ while (!(r->software_semaphore & (1 << 0)))
{
if (i > 0)
vlib_process_suspend (vm, 100e-6);
i++;
}
- do {
- r->software_semaphore |= 1 << 1;
- } while (! (r->software_semaphore & (1 << 1)));
+ do
+ {
+ r->software_semaphore |= 1 << 1;
+ }
+ while (!(r->software_semaphore & (1 << 1)));
}
-static void ixge_semaphore_release (ixge_device_t * xd)
+static void
+ixge_semaphore_release (ixge_device_t * xd)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
r->software_semaphore &= ~3;
}
-static void ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
+static void
+ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
{
- ixge_main_t * xm = &ixge_main;
- vlib_main_t * vm = xm->vlib_main;
- ixge_regs_t * r = xd->regs;
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
u32 fw_mask = sw_mask << 5;
u32 m, done = 0;
- while (! done)
+ while (!done)
{
ixge_semaphore_get (xd);
m = r->software_firmware_sync;
@@ -95,22 +100,25 @@ static void ixge_software_firmware_sync (ixge_device_t * xd, u32 sw_mask)
if (done)
r->software_firmware_sync = m | sw_mask;
ixge_semaphore_release (xd);
- if (! done)
+ if (!done)
vlib_process_suspend (vm, 10e-3);
}
}
-static void ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
+static void
+ixge_software_firmware_sync_release (ixge_device_t * xd, u32 sw_mask)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
ixge_semaphore_get (xd);
r->software_firmware_sync &= ~sw_mask;
ixge_semaphore_release (xd);
}
-u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
+u32
+ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
+ u32 v, u32 is_read)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
const u32 busy_bit = 1 << 30;
u32 x;
@@ -119,11 +127,13 @@ u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u3
ASSERT (reg_index < (1 << 16));
ASSERT (dev_type < (1 << 5));
- if (! is_read)
+ if (!is_read)
r->xge_mac.phy_data = v;
/* Address cycle. */
- x = reg_index | (dev_type << 16) | (xd->phys[xd->phy_index].mdio_address << 21);
+ x =
+ reg_index | (dev_type << 16) | (xd->
+ phys[xd->phy_index].mdio_address << 21);
r->xge_mac.phy_command = x | busy_bit;
/* Busy wait timed to take 28e-6 secs. No suspend. */
while (r->xge_mac.phy_command & busy_bit)
@@ -141,16 +151,25 @@ u32 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u3
return v;
}
-static u32 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
-{ return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */ 1); }
+static u32
+ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
+{
+ return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
+ 1);
+}
-static void ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
-{ (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */ 0); }
+static void
+ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
+{
+ (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
+ 0);
+}
-static void ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
+static void
+ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
{
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, b->private_data);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
u32 v;
v = 0;
@@ -159,10 +178,11 @@ static void ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
xd->regs->i2c_control = v;
}
-static void ixge_i2c_get_bits (i2c_bus_t * b, int * scl, int * sda)
+static void
+ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
{
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, b->private_data);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
u32 v;
v = xd->regs->i2c_control;
@@ -170,13 +190,14 @@ static void ixge_i2c_get_bits (i2c_bus_t * b, int * scl, int * sda)
*scl = (v & (1 << 0)) != 0;
}
-static u16 ixge_read_eeprom (ixge_device_t * xd, u32 address)
+static u16
+ixge_read_eeprom (ixge_device_t * xd, u32 address)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
u32 v;
- r->eeprom_read = ((/* start bit */ (1 << 0)) | (address << 2));
+ r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
/* Wait for done bit. */
- while (! ((v = r->eeprom_read) & (1 << 1)))
+ while (!((v = r->eeprom_read) & (1 << 1)))
;
return v >> 16;
}
@@ -258,7 +279,7 @@ ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
v |= (1 << 12);
xd->regs->xge_mac.auto_negotiation_control = v;
- while (! (xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
+ while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
;
v = xd->regs->xge_mac.auto_negotiation_control;
@@ -277,16 +298,16 @@ ixge_sfp_device_up_down (ixge_device_t * xd, uword is_up)
ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
/* Give time for link partner to notice that we're up. */
- if (is_up &&
- vlib_in_process_context(vlib_get_main())) {
- vlib_process_suspend (vlib_get_main(), 300e-3);
- }
+ if (is_up && vlib_in_process_context (vlib_get_main ()))
+ {
+ vlib_process_suspend (vlib_get_main (), 300e-3);
+ }
}
always_inline ixge_dma_regs_t *
get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
ASSERT (qi < 128);
if (rt == VLIB_RX)
return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
@@ -297,19 +318,19 @@ get_dma_regs (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 qi)
static clib_error_t *
ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
{
- vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, hif->dev_instance);
- ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, 0);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, hif->dev_instance);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
if (is_up)
{
xd->regs->rx_enable |= 1;
xd->regs->tx_dma_control |= 1;
dr->control |= 1 << 25;
- while (! (dr->control & (1 << 25)))
- ;
+ while (!(dr->control & (1 << 25)))
+ ;
}
else
{
@@ -322,24 +343,26 @@ ixge_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
return /* no error */ 0;
}
-static void ixge_sfp_phy_init (ixge_device_t * xd)
+static void
+ixge_sfp_phy_init (ixge_device_t * xd)
{
- ixge_phy_t * phy = xd->phys + xd->phy_index;
- i2c_bus_t * ib = &xd->i2c_bus;
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
+ i2c_bus_t *ib = &xd->i2c_bus;
ib->private_data = xd->device_index;
ib->put_bits = ixge_i2c_put_bits;
ib->get_bits = ixge_i2c_get_bits;
vlib_i2c_init (ib);
- vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) &xd->sfp_eeprom);
+ vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
- if ( vlib_i2c_bus_timed_out(ib) || ! sfp_eeprom_is_valid (&xd->sfp_eeprom))
+ if (vlib_i2c_bus_timed_out (ib) || !sfp_eeprom_is_valid (&xd->sfp_eeprom))
xd->sfp_eeprom.id = SFP_ID_unknown;
else
{
/* FIXME 5 => SR/LR eeprom ID. */
- clib_error_t * e = ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
+ clib_error_t *e =
+ ixge_sfp_phy_init_from_eeprom (xd, 5 + xd->pci_function);
if (e)
clib_error_report (e);
}
@@ -347,11 +370,12 @@ static void ixge_sfp_phy_init (ixge_device_t * xd)
phy->mdio_address = ~0;
}
-static void ixge_phy_init (ixge_device_t * xd)
+static void
+ixge_phy_init (ixge_device_t * xd)
{
- ixge_main_t * xm = &ixge_main;
- vlib_main_t * vm = xm->vlib_main;
- ixge_phy_t * phy = xd->phys + xd->phy_index;
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
switch (xd->device_id)
{
@@ -383,16 +407,19 @@ static void ixge_phy_init (ixge_device_t * xd)
return;
}
- phy->id = ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16)
- | ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
+ phy->id =
+ ((ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID1) << 16) |
+ ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PMA_PMD, XGE_PHY_ID2));
{
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d, phy id 0x%d mdio address %d",
- .format_args = "i4i4i4",
- };
- struct { u32 instance, id, address; } * ed;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
+ struct
+ {
+ u32 instance, id, address;
+ } *ed;
ed = ELOG_DATA (&vm->elog_main, e);
ed->instance = xd->device_index;
ed->id = phy->id;
@@ -400,26 +427,34 @@ static void ixge_phy_init (ixge_device_t * xd)
}
/* Reset phy. */
- ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL, XGE_PHY_CONTROL_RESET);
+ ixge_write_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL,
+ XGE_PHY_CONTROL_RESET);
/* Wait for self-clearning reset bit to clear. */
- do {
- vlib_process_suspend (vm, 1e-3);
- } while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) & XGE_PHY_CONTROL_RESET);
+ do
+ {
+ vlib_process_suspend (vm, 1e-3);
+ }
+ while (ixge_read_phy_reg (xd, XGE_PHY_DEV_TYPE_PHY_XS, XGE_PHY_CONTROL) &
+ XGE_PHY_CONTROL_RESET);
}
-static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
+static u8 *
+format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
{
- ixge_rx_from_hw_descriptor_t * d = va_arg (*va, ixge_rx_from_hw_descriptor_t *);
+ ixge_rx_from_hw_descriptor_t *d =
+ va_arg (*va, ixge_rx_from_hw_descriptor_t *);
u32 s0 = d->status[0], s2 = d->status[2];
u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
uword indent = format_get_indent (s);
s = format (s, "%s-owned",
- (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" : "hw");
- s = format (s, ", length this descriptor %d, l3 offset %d",
- d->n_packet_bytes_this_descriptor,
- IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE) ? "sw" :
+ "hw");
+ s =
+ format (s, ", length this descriptor %d, l3 offset %d",
+ d->n_packet_bytes_this_descriptor,
+ IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s0));
if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET)
s = format (s, ", end-of-packet");
@@ -441,14 +476,17 @@ static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
{
s = format (s, "ip4%s",
- (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" : "");
+ (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
+ "");
if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED)
s = format (s, " checksum %s",
- (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ? "bad" : "ok");
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR) ?
+ "bad" : "ok");
}
if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
s = format (s, "ip6%s",
- (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" : "");
+ (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
+ "");
is_tcp = is_udp = 0;
if ((is_ip = (is_ip4 | is_ip6)))
{
@@ -462,24 +500,26 @@ static u8 * format_ixge_rx_from_hw_descriptor (u8 * s, va_list * va)
if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED)
s = format (s, ", tcp checksum %s",
- (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" : "ok");
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR) ? "bad" :
+ "ok");
if (s2 & IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED)
s = format (s, ", udp checksum %s",
- (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" : "ok");
+ (s2 & IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR) ? "bad" :
+ "ok");
return s;
}
-static u8 * format_ixge_tx_descriptor (u8 * s, va_list * va)
+static u8 *
+format_ixge_tx_descriptor (u8 * s, va_list * va)
{
- ixge_tx_descriptor_t * d = va_arg (*va, ixge_tx_descriptor_t *);
+ ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
u32 s0 = d->status0, s1 = d->status1;
uword indent = format_get_indent (s);
u32 v;
s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
- d->buffer_address,
- s1 >> 14, d->n_bytes_this_buffer);
+ d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
s = format (s, "\n%U", format_white_space, indent);
@@ -514,7 +554,8 @@ static u8 * format_ixge_tx_descriptor (u8 * s, va_list * va)
return s;
}
-typedef struct {
+typedef struct
+{
ixge_descriptor_t before, after;
u32 buffer_index;
@@ -529,22 +570,24 @@ typedef struct {
vlib_buffer_t buffer;
} ixge_rx_dma_trace_t;
-static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
+static u8 *
+format_ixge_rx_dma_trace (u8 * s, va_list * va)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
- vlib_node_t * node = va_arg (*va, vlib_node_t *);
- vnet_main_t * vnm = vnet_get_main();
- ixge_rx_dma_trace_t * t = va_arg (*va, ixge_rx_dma_trace_t *);
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, t->device_index);
- format_function_t * f;
+ vlib_node_t *node = va_arg (*va, vlib_node_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+ format_function_t *f;
uword indent = format_get_indent (s);
{
- vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
- s = format (s, "%U rx queue %d",
- format_vnet_sw_interface_name, vnm, sw,
- t->queue_index);
+ vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+ s =
+ format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
+ t->queue_index);
}
s = format (s, "\n%Ubefore: %U",
@@ -552,19 +595,16 @@ static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
format_ixge_rx_from_hw_descriptor, &t->before);
s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
format_white_space, indent,
- t->after.rx_to_hw.head_address,
- t->after.rx_to_hw.tail_address);
+ t->after.rx_to_hw.head_address, t->after.rx_to_hw.tail_address);
s = format (s, "\n%Ubuffer 0x%x: %U",
format_white_space, indent,
- t->buffer_index,
- format_vlib_buffer, &t->buffer);
+ t->buffer_index, format_vlib_buffer, &t->buffer);
- s = format (s, "\n%U",
- format_white_space, indent);
+ s = format (s, "\n%U", format_white_space, indent);
f = node->format_buffer;
- if (! f || ! t->is_start_of_packet)
+ if (!f || !t->is_start_of_packet)
f = format_hex_bytes;
s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
@@ -578,16 +618,17 @@ static u8 * format_ixge_rx_dma_trace (u8 * s, va_list * va)
_ (rx_alloc_fail, "rx buf alloc from free list failed") \
_ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
-typedef enum {
+typedef enum
+{
#define _(f,s) IXGE_ERROR_##f,
foreach_ixge_error
#undef _
- IXGE_N_ERROR,
+ IXGE_N_ERROR,
} ixge_error_t;
always_inline void
-ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
- u32 s00, u32 s02,
+ixge_rx_next_and_error_from_status_x1 (ixge_device_t * xd,
+ u32 s00, u32 s02,
u8 * next0, u8 * error0, u32 * flags0)
{
u8 is0_ip4, is0_ip6, n0, e0;
@@ -600,8 +641,7 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
- ? IXGE_ERROR_ip4_checksum_error
- : e0);
+ ? IXGE_ERROR_ip4_checksum_error : e0);
is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
@@ -614,13 +654,11 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
| IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
- ? IP_BUFFER_L4_CHECKSUM_COMPUTED
- : 0);
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
| IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
- ? 0
- : IP_BUFFER_L4_CHECKSUM_CORRECT);
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
*error0 = e0;
*next0 = n0;
@@ -628,8 +666,8 @@ ixge_rx_next_and_error_from_status_x1 (ixge_device_t *xd,
}
always_inline void
-ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
- u32 s00, u32 s02,
+ixge_rx_next_and_error_from_status_x2 (ixge_device_t * xd,
+ u32 s00, u32 s02,
u32 s10, u32 s12,
u8 * next0, u8 * error0, u32 * flags0,
u8 * next1, u8 * error1, u32 * flags1)
@@ -648,11 +686,9 @@ ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
- ? IXGE_ERROR_ip4_checksum_error
- : e0);
+ ? IXGE_ERROR_ip4_checksum_error : e0);
e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
- ? IXGE_ERROR_ip4_checksum_error
- : e1);
+ ? IXGE_ERROR_ip4_checksum_error : e1);
is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
@@ -677,21 +713,17 @@ ixge_rx_next_and_error_from_status_x2 (ixge_device_t *xd,
f0 = ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
| IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
- ? IP_BUFFER_L4_CHECKSUM_COMPUTED
- : 0);
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
f1 = ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
| IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED))
- ? IP_BUFFER_L4_CHECKSUM_COMPUTED
- : 0);
+ ? IP_BUFFER_L4_CHECKSUM_COMPUTED : 0);
f0 |= ((s02 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
| IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
- ? 0
- : IP_BUFFER_L4_CHECKSUM_CORRECT);
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
f1 |= ((s12 & (IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
| IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR))
- ? 0
- : IP_BUFFER_L4_CHECKSUM_CORRECT);
+ ? 0 : IP_BUFFER_L4_CHECKSUM_CORRECT);
*flags0 = f0;
*flags1 = f1;
@@ -703,14 +735,13 @@ ixge_rx_trace (ixge_main_t * xm,
ixge_dma_queue_t * dq,
ixge_descriptor_t * before_descriptors,
u32 * before_buffers,
- ixge_descriptor_t * after_descriptors,
- uword n_descriptors)
+ ixge_descriptor_t * after_descriptors, uword n_descriptors)
{
- vlib_main_t * vm = xm->vlib_main;
- vlib_node_runtime_t * node = dq->rx.node;
- ixge_rx_from_hw_descriptor_t * bd;
- ixge_rx_to_hw_descriptor_t * ad;
- u32 * b, n_left, is_sop, next_index_sop;
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = dq->rx.node;
+ ixge_rx_from_hw_descriptor_t *bd;
+ ixge_rx_to_hw_descriptor_t *ad;
+ u32 *b, n_left, is_sop, next_index_sop;
n_left = n_descriptors;
b = before_buffers;
@@ -722,8 +753,8 @@ ixge_rx_trace (ixge_main_t * xm,
while (n_left >= 2)
{
u32 bi0, bi1, flags0, flags1;
- vlib_buffer_t * b0, * b1;
- ixge_rx_dma_trace_t * t0, * t1;
+ vlib_buffer_t *b0, *b1;
+ ixge_rx_dma_trace_t *t0, *t1;
u8 next0, error0, next1, error1;
bi0 = b[0];
@@ -734,7 +765,7 @@ ixge_rx_trace (ixge_main_t * xm,
b1 = vlib_get_buffer (vm, bi1);
ixge_rx_next_and_error_from_status_x2 (xd,
- bd[0].status[0], bd[0].status[2],
+ bd[0].status[0], bd[0].status[2],
bd[1].status[0], bd[1].status[2],
&next0, &error0, &flags0,
&next1, &error1, &flags1);
@@ -776,8 +807,8 @@ ixge_rx_trace (ixge_main_t * xm,
while (n_left >= 1)
{
u32 bi0, flags0;
- vlib_buffer_t * b0;
- ixge_rx_dma_trace_t * t0;
+ vlib_buffer_t *b0;
+ ixge_rx_dma_trace_t *t0;
u8 next0, error0;
bi0 = b[0];
@@ -786,7 +817,7 @@ ixge_rx_trace (ixge_main_t * xm,
b0 = vlib_get_buffer (vm, bi0);
ixge_rx_next_and_error_from_status_x1 (xd,
- bd[0].status[0], bd[0].status[2],
+ bd[0].status[0], bd[0].status[2],
&next0, &error0, &flags0);
next_index_sop = is_sop ? next0 : next_index_sop;
@@ -810,7 +841,8 @@ ixge_rx_trace (ixge_main_t * xm,
}
}
-typedef struct {
+typedef struct
+{
ixge_tx_descriptor_t descriptor;
u32 buffer_index;
@@ -825,22 +857,24 @@ typedef struct {
vlib_buffer_t buffer;
} ixge_tx_dma_trace_t;
-static u8 * format_ixge_tx_dma_trace (u8 * s, va_list * va)
+static u8 *
+format_ixge_tx_dma_trace (u8 * s, va_list * va)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
- ixge_tx_dma_trace_t * t = va_arg (*va, ixge_tx_dma_trace_t *);
- vnet_main_t * vnm = vnet_get_main();
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, t->device_index);
- format_function_t * f;
+ ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, t->device_index);
+ format_function_t *f;
uword indent = format_get_indent (s);
{
- vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
- s = format (s, "%U tx queue %d",
- format_vnet_sw_interface_name, vnm, sw,
- t->queue_index);
+ vnet_sw_interface_t *sw =
+ vnet_get_sw_interface (vnm, xd->vlib_sw_if_index);
+ s =
+ format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
+ t->queue_index);
}
s = format (s, "\n%Udescriptor: %U",
@@ -849,28 +883,27 @@ static u8 * format_ixge_tx_dma_trace (u8 * s, va_list * va)
s = format (s, "\n%Ubuffer 0x%x: %U",
format_white_space, indent,
- t->buffer_index,
- format_vlib_buffer, &t->buffer);
+ t->buffer_index, format_vlib_buffer, &t->buffer);
- s = format (s, "\n%U",
- format_white_space, indent);
+ s = format (s, "\n%U", format_white_space, indent);
f = format_ethernet_header_with_length;
- if (! f || ! t->is_start_of_packet)
+ if (!f || !t->is_start_of_packet)
f = format_hex_bytes;
s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
return s;
}
-typedef struct {
- vlib_node_runtime_t * node;
+typedef struct
+{
+ vlib_node_runtime_t *node;
u32 is_start_of_packet;
u32 n_bytes_in_packet;
- ixge_tx_descriptor_t * start_of_packet_descriptor;
+ ixge_tx_descriptor_t *start_of_packet_descriptor;
} ixge_tx_state_t;
static void
@@ -879,13 +912,12 @@ ixge_tx_trace (ixge_main_t * xm,
ixge_dma_queue_t * dq,
ixge_tx_state_t * tx_state,
ixge_tx_descriptor_t * descriptors,
- u32 * buffers,
- uword n_descriptors)
+ u32 * buffers, uword n_descriptors)
{
- vlib_main_t * vm = xm->vlib_main;
- vlib_node_runtime_t * node = tx_state->node;
- ixge_tx_descriptor_t * d;
- u32 * b, n_left, is_sop;
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = tx_state->node;
+ ixge_tx_descriptor_t *d;
+ u32 *b, n_left, is_sop;
n_left = n_descriptors;
b = buffers;
@@ -895,8 +927,8 @@ ixge_tx_trace (ixge_main_t * xm,
while (n_left >= 2)
{
u32 bi0, bi1;
- vlib_buffer_t * b0, * b1;
- ixge_tx_dma_trace_t * t0, * t1;
+ vlib_buffer_t *b0, *b1;
+ ixge_tx_dma_trace_t *t0, *t1;
bi0 = b[0];
bi1 = b[1];
@@ -935,8 +967,8 @@ ixge_tx_trace (ixge_main_t * xm,
while (n_left >= 1)
{
u32 bi0;
- vlib_buffer_t * b0;
- ixge_tx_dma_trace_t * t0;
+ vlib_buffer_t *b0;
+ ixge_tx_dma_trace_t *t0;
bi0 = b[0];
n_left -= 1;
@@ -980,7 +1012,8 @@ ixge_ring_add (ixge_dma_queue_t * q, u32 i0, u32 i1)
}
always_inline uword
-ixge_tx_descriptor_matches_template (ixge_main_t * xm, ixge_tx_descriptor_t * d)
+ixge_tx_descriptor_matches_template (ixge_main_t * xm,
+ ixge_tx_descriptor_t * d)
{
u32 cmp;
@@ -1002,14 +1035,14 @@ ixge_tx_no_wrap (ixge_main_t * xm,
ixge_dma_queue_t * dq,
u32 * buffers,
u32 start_descriptor_index,
- u32 n_descriptors,
- ixge_tx_state_t * tx_state)
+ u32 n_descriptors, ixge_tx_state_t * tx_state)
{
- vlib_main_t * vm = xm->vlib_main;
- ixge_tx_descriptor_t * d, * d_sop;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_tx_descriptor_t *d, *d_sop;
u32 n_left = n_descriptors;
- u32 * to_free = vec_end (xm->tx_buffers_pending_free);
- u32 * to_tx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+ u32 *to_free = vec_end (xm->tx_buffers_pending_free);
+ u32 *to_tx =
+ vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
u32 is_sop = tx_state->is_start_of_packet;
u32 len_sop = tx_state->n_bytes_in_packet;
u16 template_status = xm->tx_descriptor_template.status0;
@@ -1021,7 +1054,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
while (n_left >= 4)
{
- vlib_buffer_t * b0, * b1;
+ vlib_buffer_t *b0, *b1;
u32 bi0, fi0, len0;
u32 bi1, fi1, len1;
u8 is_eop0, is_eop1;
@@ -1031,7 +1064,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
if ((descriptor_prefetch_rotor & 0x3) == 0)
- CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
descriptor_prefetch_rotor += 2;
@@ -1062,24 +1095,32 @@ ixge_tx_no_wrap (ixge_main_t * xm,
ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
ASSERT (ixge_tx_descriptor_matches_template (xm, d + 1));
- d[0].buffer_address = vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
- d[1].buffer_address = vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
+ d[0].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+ d[1].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi1) + b1->current_data;
d[0].n_bytes_this_buffer = len0;
d[1].n_bytes_this_buffer = len1;
- d[0].status0 = template_status | (is_eop0 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
- d[1].status0 = template_status | (is_eop1 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+ d[0].status0 =
+ template_status | (is_eop0 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+ d[1].status0 =
+ template_status | (is_eop1 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
len_sop = (is_sop ? 0 : len_sop) + len0;
- d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
d += 1;
d_sop = is_eop0 ? d : d_sop;
is_sop = is_eop0;
len_sop = (is_sop ? 0 : len_sop) + len1;
- d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
d += 1;
d_sop = is_eop1 ? d : d_sop;
@@ -1088,7 +1129,7 @@ ixge_tx_no_wrap (ixge_main_t * xm,
while (n_left > 0)
{
- vlib_buffer_t * b0;
+ vlib_buffer_t *b0;
u32 bi0, fi0, len0;
u8 is_eop0;
@@ -1110,14 +1151,18 @@ ixge_tx_no_wrap (ixge_main_t * xm,
ASSERT (ixge_tx_descriptor_matches_template (xm, d + 0));
- d[0].buffer_address = vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
+ d[0].buffer_address =
+ vlib_get_buffer_data_physical_address (vm, bi0) + b0->current_data;
d[0].n_bytes_this_buffer = len0;
- d[0].status0 = template_status | (is_eop0 << IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
+ d[0].status0 =
+ template_status | (is_eop0 <<
+ IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET);
len_sop = (is_sop ? 0 : len_sop) + len0;
- d_sop[0].status1 = IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
+ d_sop[0].status1 =
+ IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET (len_sop);
d += 1;
d_sop = is_eop0 ? d : d_sop;
@@ -1126,18 +1171,20 @@ ixge_tx_no_wrap (ixge_main_t * xm,
if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
{
- to_tx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+ to_tx =
+ vec_elt_at_index (dq->descriptor_buffer_indices,
+ start_descriptor_index);
ixge_tx_trace (xm, xd, dq, tx_state,
- &dq->descriptors[start_descriptor_index].tx,
- to_tx,
+ &dq->descriptors[start_descriptor_index].tx, to_tx,
n_descriptors);
}
- _vec_len (xm->tx_buffers_pending_free) = to_free - xm->tx_buffers_pending_free;
+ _vec_len (xm->tx_buffers_pending_free) =
+ to_free - xm->tx_buffers_pending_free;
/* When we are done d_sop can point to end of ring. Wrap it if so. */
{
- ixge_tx_descriptor_t * d_start = &dq->descriptors[0].tx;
+ ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
ASSERT (d_sop - d_start <= dq->n_descriptors);
d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
@@ -1152,14 +1199,13 @@ ixge_tx_no_wrap (ixge_main_t * xm,
static uword
ixge_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * f)
+ vlib_node_runtime_t * node, vlib_frame_t * f)
{
- ixge_main_t * xm = &ixge_main;
- vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, rd->dev_instance);
- ixge_dma_queue_t * dq;
- u32 * from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
+ ixge_main_t *xm = &ixge_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, rd->dev_instance);
+ ixge_dma_queue_t *dq;
+ u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
u32 queue_index = 0; /* fixme parameter */
ixge_tx_state_t tx_state;
@@ -1189,8 +1235,8 @@ ixge_interface_tx (vlib_main_t * vm,
i_sop = i_eop = ~0;
for (i = n_left_tx - 1; i >= 0; i--)
{
- vlib_buffer_t * b = vlib_get_buffer (vm, from[i]);
- if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
+ vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
+ if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
{
if (i_sop != ~0 && i_eop != ~0)
break;
@@ -1204,12 +1250,15 @@ ixge_interface_tx (vlib_main_t * vm,
n_ok = i_eop + 1;
{
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d, ring full to tx %d head %d tail %d",
- .format_args = "i2i2i2i2",
- };
- struct { u16 instance, to_tx, head, tail; } * ed;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, ring full to tx %d head %d tail %d",.format_args =
+ "i2i2i2i2",};
+ struct
+ {
+ u16 instance, to_tx, head, tail;
+ } *ed;
ed = ELOG_DATA (&vm->elog_main, e);
ed->instance = xd->device_index;
ed->to_tx = n_descriptors_to_tx;
@@ -1221,7 +1270,8 @@ ixge_interface_tx (vlib_main_t * vm,
{
n_tail_drop = n_descriptors_to_tx - n_ok;
vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
- vlib_error_count (vm, ixge_input_node.index, IXGE_ERROR_tx_full_drops, n_tail_drop);
+ vlib_error_count (vm, ixge_input_node.index,
+ IXGE_ERROR_tx_full_drops, n_tail_drop);
}
n_descriptors_to_tx = n_ok;
@@ -1232,7 +1282,8 @@ ixge_interface_tx (vlib_main_t * vm,
/* Process from tail to end of descriptor ring. */
if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
{
- u32 n = clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
+ u32 n =
+ clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
from += n;
n_descriptors_to_tx -= n;
@@ -1244,7 +1295,8 @@ ixge_interface_tx (vlib_main_t * vm,
if (n_descriptors_to_tx > 0)
{
- u32 n = ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
+ u32 n =
+ ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
from += n;
ASSERT (n == n_descriptors_to_tx);
dq->tail_index += n;
@@ -1259,13 +1311,13 @@ ixge_interface_tx (vlib_main_t * vm,
/* Report status when last descriptor is done. */
{
u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
- ixge_tx_descriptor_t * d = &dq->descriptors[i].tx;
+ ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
d->status0 |= IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS;
}
/* Give new descriptors to hardware. */
{
- ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_TX, queue_index);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
CLIB_MEMORY_BARRIER ();
@@ -1291,26 +1343,26 @@ static uword
ixge_rx_queue_no_wrap (ixge_main_t * xm,
ixge_device_t * xd,
ixge_dma_queue_t * dq,
- u32 start_descriptor_index,
- u32 n_descriptors)
+ u32 start_descriptor_index, u32 n_descriptors)
{
- vlib_main_t * vm = xm->vlib_main;
- vlib_node_runtime_t * node = dq->rx.node;
- ixge_descriptor_t * d;
- static ixge_descriptor_t * d_trace_save;
- static u32 * d_trace_buffers;
+ vlib_main_t *vm = xm->vlib_main;
+ vlib_node_runtime_t *node = dq->rx.node;
+ ixge_descriptor_t *d;
+ static ixge_descriptor_t *d_trace_save;
+ static u32 *d_trace_buffers;
u32 n_descriptors_left = n_descriptors;
- u32 * to_rx = vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
- u32 * to_add;
+ u32 *to_rx =
+ vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
+ u32 *to_add;
u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
u32 bi_last = dq->rx.saved_last_buffer_index;
u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
u32 is_sop = dq->rx.is_start_of_packet;
- u32 next_index, n_left_to_next, * to_next;
+ u32 next_index, n_left_to_next, *to_next;
u32 n_packets = 0;
u32 n_bytes = 0;
u32 n_trace = vlib_get_trace_count (vm, node);
- vlib_buffer_t * b_last, b_dummy;
+ vlib_buffer_t *b_last, b_dummy;
ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
d = &dq->descriptors[start_descriptor_index];
@@ -1346,8 +1398,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
xm->vlib_buffer_free_list_index);
_vec_len (xm->rx_buffers_to_add) += n_allocated;
- /* Handle transient allocation failure */
- if (PREDICT_FALSE(l + n_allocated <= n_descriptors_left))
+ /* Handle transient allocation failure */
+ if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
{
if (n_allocated == 0)
vlib_error_count (vm, ixge_input_node.index,
@@ -1358,7 +1410,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
n_descriptors_left = l + n_allocated;
}
- n_descriptors = n_descriptors_left;
+ n_descriptors = n_descriptors_left;
}
/* Add buffers from end of vector going backwards. */
@@ -1367,25 +1419,24 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
while (n_descriptors_left > 0)
{
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_descriptors_left >= 4 && n_left_to_next >= 2)
{
- vlib_buffer_t * b0, * b1;
+ vlib_buffer_t *b0, *b1;
u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
u8 is_eop0, error0, next0;
u8 is_eop1, error1, next1;
- ixge_descriptor_t d0, d1;
+ ixge_descriptor_t d0, d1;
vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
- CLIB_PREFETCH (d + 2, 32, STORE);
+ CLIB_PREFETCH (d + 2, 32, STORE);
- d0.as_u32x4 = d[0].as_u32x4;
- d1.as_u32x4 = d[1].as_u32x4;
+ d0.as_u32x4 = d[0].as_u32x4;
+ d1.as_u32x4 = d[1].as_u32x4;
s20 = d0.rx_from_hw.status[2];
s21 = d1.rx_from_hw.status[2];
@@ -1393,7 +1444,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
s00 = d0.rx_from_hw.status[0];
s01 = d1.rx_from_hw.status[0];
- if (! ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+ if (!
+ ((s20 & s21) & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
goto found_hw_owned_descriptor_x2;
bi0 = to_rx[0];
@@ -1408,21 +1460,25 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
to_rx += 2;
to_add -= 2;
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi0));
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi1));
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi0));
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi1));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi1));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi1));
b0 = vlib_get_buffer (vm, bi0);
b1 = vlib_get_buffer (vm, bi1);
- /*
- * Turn this on if you run into
- * "bad monkey" contexts, and you want to know exactly
- * which nodes they've visited... See main.c...
- */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b1);
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
CLIB_PREFETCH (b0->data, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (b1->data, CLIB_CACHE_LINE_BYTES, LOAD);
@@ -1443,8 +1499,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
- vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32)~0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
b0->error = node->errors[error0];
b1->error = node->errors[error1];
@@ -1456,24 +1512,22 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
/* Give new buffers to hardware. */
d0.rx_to_hw.tail_address =
- vlib_get_buffer_data_physical_address (vm, fi0);
+ vlib_get_buffer_data_physical_address (vm, fi0);
d1.rx_to_hw.tail_address =
- vlib_get_buffer_data_physical_address (vm, fi1);
+ vlib_get_buffer_data_physical_address (vm, fi1);
d0.rx_to_hw.head_address = d[0].rx_to_hw.tail_address;
d1.rx_to_hw.head_address = d[1].rx_to_hw.tail_address;
- d[0].as_u32x4 = d0.as_u32x4;
- d[1].as_u32x4 = d1.as_u32x4;
+ d[0].as_u32x4 = d0.as_u32x4;
+ d[1].as_u32x4 = d1.as_u32x4;
d += 2;
n_descriptors_left -= 2;
/* Point to either l2 or l3 header depending on next. */
l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
- ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00)
- : 0;
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
- ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01)
- : 0;
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s01) : 0;
b0->current_length = len0 - l3_offset0;
b1->current_length = len1 - l3_offset1;
@@ -1492,122 +1546,126 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
if (is_eop0)
{
- u8 * msg = vlib_validate_buffer (vm, bi_sop0, /* follow_buffer_next */ 1);
- ASSERT (! msg);
+ u8 *msg = vlib_validate_buffer (vm, bi_sop0,
+ /* follow_buffer_next */ 1);
+ ASSERT (!msg);
}
if (is_eop1)
{
- u8 * msg = vlib_validate_buffer (vm, bi_sop1, /* follow_buffer_next */ 1);
- ASSERT (! msg);
+ u8 *msg = vlib_validate_buffer (vm, bi_sop1,
+ /* follow_buffer_next */ 1);
+ ASSERT (!msg);
+ }
+ }
+ if (0) /* "Dave" version */
+ {
+ u32 bi_sop0 = is_sop ? bi0 : bi_sop;
+ u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
+
+ if (is_eop0)
+ {
+ to_next[0] = bi_sop0;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop0, next0);
+ }
+ if (is_eop1)
+ {
+ to_next[0] = bi_sop1;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop1, next1);
+ }
+ is_sop = is_eop1;
+ bi_sop = bi_sop1;
+ }
+ if (1) /* "Eliot" version */
+ {
+ /* Speculatively enqueue to cached next. */
+ u8 saved_is_sop = is_sop;
+ u32 bi_sop_save = bi_sop;
+
+ bi_sop = saved_is_sop ? bi0 : bi_sop;
+ to_next[0] = bi_sop;
+ to_next += is_eop0;
+ n_left_to_next -= is_eop0;
+
+ bi_sop = is_eop0 ? bi1 : bi_sop;
+ to_next[0] = bi_sop;
+ to_next += is_eop1;
+ n_left_to_next -= is_eop1;
+
+ is_sop = is_eop1;
+
+ if (PREDICT_FALSE
+ (!(next0 == next_index && next1 == next_index)))
+ {
+ /* Undo speculation. */
+ to_next -= is_eop0 + is_eop1;
+ n_left_to_next += is_eop0 + is_eop1;
+
+ /* Re-do both descriptors being careful about where we enqueue. */
+ bi_sop = saved_is_sop ? bi0 : bi_sop_save;
+ if (is_eop0)
+ {
+ if (next0 != next_index)
+ vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+ else
+ {
+ to_next[0] = bi_sop;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ bi_sop = is_eop0 ? bi1 : bi_sop;
+ if (is_eop1)
+ {
+ if (next1 != next_index)
+ vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
+ else
+ {
+ to_next[0] = bi_sop;
+ to_next += 1;
+ n_left_to_next -= 1;
+ }
+ }
+
+ /* Switch cached next index when next for both packets is the same. */
+ if (is_eop0 && is_eop1 && next0 == next1)
+ {
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ }
}
}
- if (0) /* "Dave" version */
- {
- u32 bi_sop0 = is_sop ? bi0 : bi_sop;
- u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
-
- if (is_eop0)
- {
- to_next[0] = bi_sop0;
- to_next++;
- n_left_to_next--;
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi_sop0, next0);
- }
- if (is_eop1)
- {
- to_next[0] = bi_sop1;
- to_next++;
- n_left_to_next--;
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi_sop1, next1);
- }
- is_sop = is_eop1;
- bi_sop = bi_sop1;
- }
- if (1) /* "Eliot" version */
- {
- /* Speculatively enqueue to cached next. */
- u8 saved_is_sop = is_sop;
- u32 bi_sop_save = bi_sop;
-
- bi_sop = saved_is_sop ? bi0 : bi_sop;
- to_next[0] = bi_sop;
- to_next += is_eop0;
- n_left_to_next -= is_eop0;
-
- bi_sop = is_eop0 ? bi1 : bi_sop;
- to_next[0] = bi_sop;
- to_next += is_eop1;
- n_left_to_next -= is_eop1;
-
- is_sop = is_eop1;
-
- if (PREDICT_FALSE (! (next0 == next_index && next1 == next_index)))
- {
- /* Undo speculation. */
- to_next -= is_eop0 + is_eop1;
- n_left_to_next += is_eop0 + is_eop1;
-
- /* Re-do both descriptors being careful about where we enqueue. */
- bi_sop = saved_is_sop ? bi0 : bi_sop_save;
- if (is_eop0)
- {
- if (next0 != next_index)
- vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
- else
- {
- to_next[0] = bi_sop;
- to_next += 1;
- n_left_to_next -= 1;
- }
- }
-
- bi_sop = is_eop0 ? bi1 : bi_sop;
- if (is_eop1)
- {
- if (next1 != next_index)
- vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
- else
- {
- to_next[0] = bi_sop;
- to_next += 1;
- n_left_to_next -= 1;
- }
- }
-
- /* Switch cached next index when next for both packets is the same. */
- if (is_eop0 && is_eop1 && next0 == next1)
- {
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- next_index = next0;
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
- }
- }
- }
}
- /* Bail out of dual loop and proceed with single loop. */
+ /* Bail out of dual loop and proceed with single loop. */
found_hw_owned_descriptor_x2:
while (n_descriptors_left > 0 && n_left_to_next > 0)
{
- vlib_buffer_t * b0;
+ vlib_buffer_t *b0;
u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
u8 is_eop0, error0, next0;
- ixge_descriptor_t d0;
+ ixge_descriptor_t d0;
- d0.as_u32x4 = d[0].as_u32x4;
+ d0.as_u32x4 = d[0].as_u32x4;
s20 = d0.rx_from_hw.status[2];
s00 = d0.rx_from_hw.status[0];
- if (! (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
+ if (!(s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE))
goto found_hw_owned_descriptor_x1;
bi0 = to_rx[0];
@@ -1618,21 +1676,23 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
to_rx += 1;
to_add -= 1;
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, bi0));
- ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (vm, fi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, bi0));
+ ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED ==
+ vlib_buffer_is_known (vm, fi0));
b0 = vlib_get_buffer (vm, bi0);
- /*
- * Turn this on if you run into
- * "bad monkey" contexts, and you want to know exactly
- * which nodes they've visited...
- */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
ixge_rx_next_and_error_from_status_x1
- (xd, s00, s20, &next0, &error0, &flags0);
+ (xd, s00, s20, &next0, &error0, &flags0);
next0 = is_sop ? next0 : next_index_sop;
next_index_sop = next0;
@@ -1640,7 +1700,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
- vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32)~0;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
b0->error = node->errors[error0];
@@ -1650,17 +1710,16 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
/* Give new buffer to hardware. */
d0.rx_to_hw.tail_address =
- vlib_get_buffer_data_physical_address (vm, fi0);
+ vlib_get_buffer_data_physical_address (vm, fi0);
d0.rx_to_hw.head_address = d0.rx_to_hw.tail_address;
- d[0].as_u32x4 = d0.as_u32x4;
+ d[0].as_u32x4 = d0.as_u32x4;
d += 1;
n_descriptors_left -= 1;
/* Point to either l2 or l3 header depending on next. */
l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
- ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00)
- : 0;
+ ? IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET (s00) : 0;
b0->current_length = len0 - l3_offset0;
b0->current_data = l3_offset0;
@@ -1672,48 +1731,49 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
if (CLIB_DEBUG > 0 && is_eop0)
{
- u8 * msg = vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
- ASSERT (! msg);
+ u8 *msg =
+ vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
+ ASSERT (!msg);
}
- if (0) /* "Dave" version */
- {
- if (is_eop0)
- {
- to_next[0] = bi_sop;
- to_next++;
- n_left_to_next--;
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi_sop, next0);
- }
- }
- if (1) /* "Eliot" version */
- {
- if (PREDICT_TRUE (next0 == next_index))
- {
- to_next[0] = bi_sop;
- to_next += is_eop0;
- n_left_to_next -= is_eop0;
- }
- else
- {
- if (next0 != next_index && is_eop0)
- vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
-
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- next_index = next0;
- vlib_get_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
- }
- }
- is_sop = is_eop0;
+ if (0) /* "Dave" version */
+ {
+ if (is_eop0)
+ {
+ to_next[0] = bi_sop;
+ to_next++;
+ n_left_to_next--;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_sop, next0);
+ }
+ }
+ if (1) /* "Eliot" version */
+ {
+ if (PREDICT_TRUE (next0 == next_index))
+ {
+ to_next[0] = bi_sop;
+ to_next += is_eop0;
+ n_left_to_next -= is_eop0;
+ }
+ else
+ {
+ if (next0 != next_index && is_eop0)
+ vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ next_index = next0;
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
+ }
+ }
+ is_sop = is_eop0;
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- found_hw_owned_descriptor_x1:
+found_hw_owned_descriptor_x1:
if (n_descriptors_left > 0)
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
@@ -1728,8 +1788,7 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
ixge_rx_trace (xm, xd, dq,
d_trace_save,
d_trace_buffers,
- &dq->descriptors[start_descriptor_index],
- n);
+ &dq->descriptors[start_descriptor_index], n);
vlib_set_trace_count (vm, node, n_trace - n);
}
if (d_trace_save)
@@ -1743,8 +1802,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
enqueued a packet. */
if (is_sop)
{
- b_last->next_buffer = ~0;
- bi_last = ~0;
+ b_last->next_buffer = ~0;
+ bi_last = ~0;
}
dq->rx.n_descriptors_done_this_call = n_done;
@@ -1763,16 +1822,16 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm,
static uword
ixge_rx_queue (ixge_main_t * xm,
ixge_device_t * xd,
- vlib_node_runtime_t * node,
- u32 queue_index)
+ vlib_node_runtime_t * node, u32 queue_index)
{
- ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
- ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
+ ixge_dma_queue_t *dq =
+ vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, dq->queue_index);
uword n_packets = 0;
u32 hw_head_index, sw_head_index;
/* One time initialization. */
- if (! dq->rx.node)
+ if (!dq->rx.node)
{
dq->rx.node = node;
dq->rx.is_start_of_packet = 1;
@@ -1797,7 +1856,9 @@ ixge_rx_queue (ixge_main_t * xm,
{
u32 n_tried = dq->n_descriptors - sw_head_index;
n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
- sw_head_index = ixge_ring_add (dq, sw_head_index, dq->rx.n_descriptors_done_this_call);
+ sw_head_index =
+ ixge_ring_add (dq, sw_head_index,
+ dq->rx.n_descriptors_done_this_call);
if (dq->rx.n_descriptors_done_this_call != n_tried)
goto done;
@@ -1806,60 +1867,63 @@ ixge_rx_queue (ixge_main_t * xm,
{
u32 n_tried = hw_head_index - sw_head_index;
n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
- sw_head_index = ixge_ring_add (dq, sw_head_index, dq->rx.n_descriptors_done_this_call);
+ sw_head_index =
+ ixge_ring_add (dq, sw_head_index,
+ dq->rx.n_descriptors_done_this_call);
}
- done:
+done:
dq->head_index = sw_head_index;
- dq->tail_index = ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
+ dq->tail_index =
+ ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
/* Give tail back to hardware. */
CLIB_MEMORY_BARRIER ();
dr->tail_index = dq->tail_index;
- vlib_increment_combined_counter (vnet_main.interface_main.combined_sw_if_counters
- + VNET_INTERFACE_COUNTER_RX,
- 0 /* cpu_index */,
- xd->vlib_sw_if_index,
- n_packets,
+ vlib_increment_combined_counter (vnet_main.
+ interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ 0 /* cpu_index */ ,
+ xd->vlib_sw_if_index, n_packets,
dq->rx.n_bytes);
return n_packets;
}
-static void ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
+static void
+ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
{
- vlib_main_t * vm = xm->vlib_main;
- ixge_regs_t * r = xd->regs;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_regs_t *r = xd->regs;
if (i != 20)
{
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d, %s",
- .format_args = "i1t1",
- .n_enum_strings = 16,
- .enum_strings = {
- "flow director",
- "rx miss",
- "pci exception",
- "mailbox",
- "link status change",
- "linksec key exchange",
- "manageability event",
- "reserved23",
- "sdp0",
- "sdp1",
- "sdp2",
- "sdp3",
- "ecc",
- "descriptor handler error",
- "tcp timer",
- "other",
- },
- };
- struct { u8 instance; u8 index; } * ed;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
+ 16,.enum_strings =
+ {
+ "flow director",
+ "rx miss",
+ "pci exception",
+ "mailbox",
+ "link status change",
+ "linksec key exchange",
+ "manageability event",
+ "reserved23",
+ "sdp0",
+ "sdp1",
+ "sdp2",
+ "sdp3",
+ "ecc", "descriptor handler error", "tcp timer", "other",},};
+ struct
+ {
+ u8 instance;
+ u8 index;
+ } *ed;
ed = ELOG_DATA (&vm->elog_main, e);
ed->instance = xd->device_index;
ed->index = i - 16;
@@ -1869,27 +1933,29 @@ static void ixge_interrupt (ixge_main_t * xm, ixge_device_t * xd, u32 i)
u32 v = r->xge_mac.link_status;
uword is_up = (v & (1 << 30)) != 0;
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d, link status change 0x%x",
- .format_args = "i4i4",
- };
- struct { u32 instance, link_status; } * ed;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d, link status change 0x%x",.format_args = "i4i4",};
+ struct
+ {
+ u32 instance, link_status;
+ } *ed;
ed = ELOG_DATA (&vm->elog_main, e);
ed->instance = xd->device_index;
ed->link_status = v;
xd->link_status_at_last_link_change = v;
vlib_process_signal_event (vm, ixge_process_node.index,
- EVENT_SET_FLAGS,
- ((is_up<<31) | xd->vlib_hw_if_index));
+ EVENT_SET_FLAGS,
+ ((is_up << 31) | xd->vlib_hw_if_index));
}
}
always_inline u32
clean_block (u32 * b, u32 * t, u32 n_left)
{
- u32 * t0 = t;
+ u32 *t0 = t;
while (n_left >= 4)
{
@@ -1932,9 +1998,10 @@ clean_block (u32 * b, u32 * t, u32 n_left)
static void
ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
{
- vlib_main_t * vm = xm->vlib_main;
- ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
- u32 n_clean, * b, * t, * t0;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_dma_queue_t *dq =
+ vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
+ u32 n_clean, *b, *t, *t0;
i32 n_hw_owned_descriptors;
i32 first_to_clean, last_to_clean;
u64 hwbp_race = 0;
@@ -1948,12 +2015,15 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
hwbp_race++;
if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
{
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",
- .format_args = "i4i4i4i4",
- };
- struct { u32 instance, head_index, tail_index, n_buffers_on_ring; } * ed;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
+ = "i4i4i4i4",};
+ struct
+ {
+ u32 instance, head_index, tail_index, n_buffers_on_ring;
+ } *ed;
ed = ELOG_DATA (&vm->elog_main, e);
ed->instance = xd->device_index;
ed->head_index = dq->head_index;
@@ -1964,23 +2034,26 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
dq->head_index = dq->tx.head_index_write_back[0];
n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
- ASSERT(dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
+ ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
if (IXGE_HWBP_RACE_ELOG && hwbp_race)
{
- ELOG_TYPE_DECLARE (e) = {
- .function = (char *) __FUNCTION__,
- .format = "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",
- .format_args = "i4i4i4i4i4",
- };
- struct { u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries; } * ed;
- ed = ELOG_DATA (&vm->elog_main, e);
- ed->instance = xd->device_index;
- ed->head_index = dq->head_index;
- ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
- ed->n_clean = n_clean;
- ed->retries = hwbp_race;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .function = (char *) __FUNCTION__,.format =
+ "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
+ = "i4i4i4i4i4",};
+ struct
+ {
+ u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, e);
+ ed->instance = xd->device_index;
+ ed->head_index = dq->head_index;
+ ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
+ ed->n_clean = n_clean;
+ ed->retries = hwbp_race;
}
/*
@@ -1996,11 +2069,11 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
/* Clean the n_clean descriptors prior to the reported hardware head */
last_to_clean = dq->head_index - 1;
last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
- last_to_clean;
+ last_to_clean;
first_to_clean = (last_to_clean) - (n_clean - 1);
first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
- first_to_clean;
+ first_to_clean;
vec_resize (xm->tx_buffers_pending_free, dq->n_descriptors - 1);
t0 = t = xm->tx_buffers_pending_free;
@@ -2016,7 +2089,7 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
/* Typical case: clean from first to last */
if (first_to_clean <= last_to_clean)
- t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
+ t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
if (t > t0)
{
@@ -2029,25 +2102,39 @@ ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
}
/* RX queue interrupts 0 thru 7; TX 8 thru 15. */
-always_inline uword ixge_interrupt_is_rx_queue (uword i)
-{ return i < 8; }
+always_inline uword
+ixge_interrupt_is_rx_queue (uword i)
+{
+ return i < 8;
+}
-always_inline uword ixge_interrupt_is_tx_queue (uword i)
-{ return i >= 8 && i < 16; }
+always_inline uword
+ixge_interrupt_is_tx_queue (uword i)
+{
+ return i >= 8 && i < 16;
+}
-always_inline uword ixge_tx_queue_to_interrupt (uword i)
-{ return 8 + i; }
+always_inline uword
+ixge_tx_queue_to_interrupt (uword i)
+{
+ return 8 + i;
+}
-always_inline uword ixge_rx_queue_to_interrupt (uword i)
-{ return 0 + i; }
+always_inline uword
+ixge_rx_queue_to_interrupt (uword i)
+{
+ return 0 + i;
+}
-always_inline uword ixge_interrupt_rx_queue (uword i)
+always_inline uword
+ixge_interrupt_rx_queue (uword i)
{
ASSERT (ixge_interrupt_is_rx_queue (i));
return i - 0;
}
-always_inline uword ixge_interrupt_tx_queue (uword i)
+always_inline uword
+ixge_interrupt_tx_queue (uword i)
{
ASSERT (ixge_interrupt_is_tx_queue (i));
return i - 8;
@@ -2055,10 +2142,9 @@ always_inline uword ixge_interrupt_tx_queue (uword i)
static uword
ixge_device_input (ixge_main_t * xm,
- ixge_device_t * xd,
- vlib_node_runtime_t * node)
+ ixge_device_t * xd, vlib_node_runtime_t * node)
{
- ixge_regs_t * r = xd->regs;
+ ixge_regs_t *r = xd->regs;
u32 i, s;
uword n_rx_packets = 0;
@@ -2066,6 +2152,7 @@ ixge_device_input (ixge_main_t * xm,
if (s)
r->interrupt.status_write_1_to_clear = s;
+ /* *INDENT-OFF* */
foreach_set_bit (i, s, ({
if (ixge_interrupt_is_rx_queue (i))
n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
@@ -2076,17 +2163,16 @@ ixge_device_input (ixge_main_t * xm,
else
ixge_interrupt (xm, xd, i);
}));
+ /* *INDENT-ON* */
return n_rx_packets;
}
static uword
-ixge_input (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * f)
+ixge_input (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f)
{
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd;
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd;
uword n_rx_packets = 0;
if (node->state == VLIB_NODE_STATE_INTERRUPT)
@@ -2094,6 +2180,7 @@ ixge_input (vlib_main_t * vm,
uword i;
/* Loop over devices with interrupts. */
+ /* *INDENT-OFF* */
foreach_set_bit (i, node->runtime_data[0], ({
xd = vec_elt_at_index (xm->devices, i);
n_rx_packets += ixge_device_input (xm, xd, node);
@@ -2102,6 +2189,7 @@ ixge_input (vlib_main_t * vm,
if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
xd->regs->interrupt.enable_write_1_to_set = ~0;
}));
+ /* *INDENT-ON* */
/* Clear mask of devices with pending interrupts. */
node->runtime_data[0] = 0;
@@ -2110,25 +2198,26 @@ ixge_input (vlib_main_t * vm,
{
/* Poll all devices for input/interrupts. */
vec_foreach (xd, xm->devices)
- {
- n_rx_packets += ixge_device_input (xm, xd, node);
+ {
+ n_rx_packets += ixge_device_input (xm, xd, node);
- /* Re-enable interrupts when switching out of polling mode. */
- if (node->flags &
- VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
- xd->regs->interrupt.enable_write_1_to_set = ~0;
- }
+ /* Re-enable interrupts when switching out of polling mode. */
+ if (node->flags &
+ VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)
+ xd->regs->interrupt.enable_write_1_to_set = ~0;
+ }
}
return n_rx_packets;
}
-static char * ixge_error_strings[] = {
+static char *ixge_error_strings[] = {
#define _(n,s) s,
- foreach_ixge_error
+ foreach_ixge_error
#undef _
};
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ixge_input_node, static) = {
.function = ixge_input,
.type = VLIB_NODE_TYPE_INPUT,
@@ -2154,12 +2243,14 @@ VLIB_REGISTER_NODE (ixge_input_node, static) = {
VLIB_NODE_FUNCTION_MULTIARCH_CLONE (ixge_input)
CLIB_MULTIARCH_SELECT_FN (ixge_input)
+/* *INDENT-ON* */
-static u8 * format_ixge_device_name (u8 * s, va_list * args)
+static u8 *
+format_ixge_device_name (u8 * s, va_list * args)
{
u32 i = va_arg (*args, u32);
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, i);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
return format (s, "TenGigabitEthernet%U",
format_vlib_pci_handle, &xd->pci_device.bus_address);
}
@@ -2175,7 +2266,8 @@ static u8 ixge_counter_flags[] = {
#undef _64
};
-static void ixge_update_counters (ixge_device_t * xd)
+static void
+ixge_update_counters (ixge_device_t * xd)
{
/* Byte offset for counter registers. */
static u32 reg_offsets[] = {
@@ -2185,7 +2277,7 @@ static void ixge_update_counters (ixge_device_t * xd)
#undef _
#undef _64
};
- volatile u32 * r = (volatile u32 *) xd->regs;
+ volatile u32 *r = (volatile u32 *) xd->regs;
int i;
for (i = 0; i < ARRAY_LEN (xd->counters); i++)
@@ -2195,14 +2287,15 @@ static void ixge_update_counters (ixge_device_t * xd)
if (ixge_counter_flags[i] & IXGE_COUNTER_NOT_CLEAR_ON_READ)
r[o] = 0;
if (ixge_counter_flags[i] & IXGE_COUNTER_IS_64_BIT)
- xd->counters[i] += (u64) r[o+1] << (u64) 32;
+ xd->counters[i] += (u64) r[o + 1] << (u64) 32;
}
}
-static u8 * format_ixge_device_id (u8 * s, va_list * args)
+static u8 *
+format_ixge_device_id (u8 * s, va_list * args)
{
u32 device_id = va_arg (*args, u32);
- char * t = 0;
+ char *t = 0;
switch (device_id)
{
#define _(f,n) case n: t = #f; break;
@@ -2219,35 +2312,36 @@ static u8 * format_ixge_device_id (u8 * s, va_list * args)
return s;
}
-static u8 * format_ixge_link_status (u8 * s, va_list * args)
+static u8 *
+format_ixge_link_status (u8 * s, va_list * args)
{
- ixge_device_t * xd = va_arg (*args, ixge_device_t *);
+ ixge_device_t *xd = va_arg (*args, ixge_device_t *);
u32 v = xd->link_status_at_last_link_change;
s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
{
- char * modes[] = {
+ char *modes[] = {
"1g", "10g parallel", "10g serial", "autoneg",
};
- char * speeds[] = {
+ char *speeds[] = {
"unknown", "100m", "1g", "10g",
};
s = format (s, ", mode %s, speed %s",
- modes[(v >> 26) & 3],
- speeds[(v >> 28) & 3]);
+ modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
}
return s;
}
-static u8 * format_ixge_device (u8 * s, va_list * args)
+static u8 *
+format_ixge_device (u8 * s, va_list * args)
{
u32 dev_instance = va_arg (*args, u32);
CLIB_UNUSED (int verbose) = va_arg (*args, int);
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, dev_instance);
- ixge_phy_t * phy = xd->phys + xd->phy_index;
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
+ ixge_phy_t *phy = xd->phys + xd->phy_index;
uword indent = format_get_indent (s);
ixge_update_counters (xd);
@@ -2255,8 +2349,7 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
s = format (s, "Intel 8259X: id %U\n%Ulink %U",
format_ixge_device_id, xd->device_id,
- format_white_space, indent + 2,
- format_ixge_link_status, xd);
+ format_white_space, indent + 2, format_ixge_link_status, xd);
{
@@ -2274,30 +2367,31 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
/* FIXME */
{
- ixge_dma_queue_t * dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
- ixge_dma_regs_t * dr = get_dma_regs (xd, VLIB_RX, 0);
+ ixge_dma_queue_t *dq = vec_elt_at_index (xd->dma_queues[VLIB_RX], 0);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
u32 hw_head_index = dr->head_index;
u32 sw_head_index = dq->head_index;
u32 nitems;
nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
- format_white_space, indent + 2, nitems, dq->n_descriptors);
+ format_white_space, indent + 2, nitems, dq->n_descriptors);
s = format (s, "\n%U%d buffers in driver rx cache",
- format_white_space, indent + 2, vec_len(xm->rx_buffers_to_add));
+ format_white_space, indent + 2,
+ vec_len (xm->rx_buffers_to_add));
s = format (s, "\n%U%d buffers on tx queue 0 ring",
- format_white_space, indent + 2,
- xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
+ format_white_space, indent + 2,
+ xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
}
{
u32 i;
u64 v;
- static char * names[] = {
+ static char *names[] = {
#define _(a,f) #f,
#define _64(a,f) _(a,f)
- foreach_ixge_counter
+ foreach_ixge_counter
#undef _
#undef _64
};
@@ -2308,18 +2402,18 @@ static u8 * format_ixge_device (u8 * s, va_list * args)
if (v != 0)
s = format (s, "\n%U%-40U%16Ld",
format_white_space, indent + 2,
- format_c_identifier, names[i],
- v);
+ format_c_identifier, names[i], v);
}
}
return s;
}
-static void ixge_clear_hw_interface_counters (u32 instance)
+static void
+ixge_clear_hw_interface_counters (u32 instance)
{
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd = vec_elt_at_index (xm->devices, instance);
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
ixge_update_counters (xd);
memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
}
@@ -2328,12 +2422,13 @@ static void ixge_clear_hw_interface_counters (u32 instance)
* Dynamically redirect all pkts from a specific interface
* to the specified node
*/
-static void ixge_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
- u32 node_index)
+static void
+ixge_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
{
- ixge_main_t * xm = &ixge_main;
+ ixge_main_t *xm = &ixge_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- ixge_device_t * xd = vec_elt_at_index (xm->devices, hw->dev_instance);
+ ixge_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
/* Shut off redirection */
if (node_index == ~0)
@@ -2347,6 +2442,7 @@ static void ixge_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
}
+/* *INDENT-OFF* */
VNET_DEVICE_CLASS (ixge_device_class) = {
.name = "ixge",
.tx_function = ixge_interface_tx,
@@ -2357,46 +2453,53 @@ VNET_DEVICE_CLASS (ixge_device_class) = {
.admin_up_down_function = ixge_interface_admin_up_down,
.rx_redirect_to_node = ixge_set_interface_next_node,
};
+/* *INDENT-ON* */
-#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
+#define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
static clib_error_t *
ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
{
- ixge_main_t * xm = &ixge_main;
- vlib_main_t * vm = xm->vlib_main;
- ixge_dma_queue_t * dq;
- clib_error_t * error = 0;
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
+ ixge_dma_queue_t *dq;
+ clib_error_t *error = 0;
vec_validate (xd->dma_queues[rt], queue_index);
dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
- if (! xm->n_descriptors_per_cache_line)
- xm->n_descriptors_per_cache_line = CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
+ if (!xm->n_descriptors_per_cache_line)
+ xm->n_descriptors_per_cache_line =
+ CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
- if (! xm->n_bytes_in_rx_buffer)
+ if (!xm->n_bytes_in_rx_buffer)
xm->n_bytes_in_rx_buffer = IXGE_N_BYTES_IN_RX_BUFFER;
xm->n_bytes_in_rx_buffer = round_pow2 (xm->n_bytes_in_rx_buffer, 1024);
- if (! xm->vlib_buffer_free_list_index)
+ if (!xm->vlib_buffer_free_list_index)
{
- xm->vlib_buffer_free_list_index = vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer, "ixge rx");
+ xm->vlib_buffer_free_list_index =
+ vlib_buffer_get_or_create_free_list (vm, xm->n_bytes_in_rx_buffer,
+ "ixge rx");
ASSERT (xm->vlib_buffer_free_list_index != 0);
}
- if (! xm->n_descriptors[rt])
+ if (!xm->n_descriptors[rt])
xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
dq->queue_index = queue_index;
- dq->n_descriptors = round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
+ dq->n_descriptors =
+ round_pow2 (xm->n_descriptors[rt], xm->n_descriptors_per_cache_line);
dq->head_index = dq->tail_index = 0;
dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
- dq->n_descriptors * sizeof (dq->descriptors[0]),
- 128 /* per chip spec */);
+ dq->n_descriptors *
+ sizeof (dq->descriptors[0]),
+ 128 /* per chip spec */ );
if (error)
return error;
- memset (dq->descriptors, 0, dq->n_descriptors * sizeof (dq->descriptors[0]));
+ memset (dq->descriptors, 0,
+ dq->n_descriptors * sizeof (dq->descriptors[0]));
vec_resize (dq->descriptor_buffer_indices, dq->n_descriptors);
if (rt == VLIB_RX)
@@ -2404,20 +2507,24 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
u32 n_alloc, i;
n_alloc = vlib_buffer_alloc_from_free_list
- (vm, dq->descriptor_buffer_indices, vec_len (dq->descriptor_buffer_indices),
+ (vm, dq->descriptor_buffer_indices,
+ vec_len (dq->descriptor_buffer_indices),
xm->vlib_buffer_free_list_index);
ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
for (i = 0; i < n_alloc; i++)
{
- vlib_buffer_t * b = vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
- dq->descriptors[i].rx_to_hw.tail_address = vlib_physmem_virtual_to_physical (vm, b->data);
+ vlib_buffer_t *b =
+ vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]);
+ dq->descriptors[i].rx_to_hw.tail_address =
+ vlib_physmem_virtual_to_physical (vm, b->data);
}
}
else
{
u32 i;
- dq->tx.head_index_write_back = vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
+ dq->tx.head_index_write_back =
+ vlib_physmem_alloc (vm, &error, CLIB_CACHE_LINE_BYTES);
for (i = 0; i < dq->n_descriptors; i++)
dq->descriptors[i].tx = xm->tx_descriptor_template;
@@ -2426,7 +2533,7 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
}
{
- ixge_dma_regs_t * dr = get_dma_regs (xd, rt, queue_index);
+ ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
u64 a;
a = vlib_physmem_virtual_to_physical (vm, dq->descriptors);
@@ -2439,24 +2546,23 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
{
ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
dr->rx_split_control =
- (/* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
- | (/* lo free descriptor threshold (units of 64 descriptors) */
- (1 << 22))
- | (/* descriptor type: advanced one buffer */
- (1 << 25))
- | (/* drop if no descriptors available */
- (1 << 28)));
+ ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
+ | ( /* lo free descriptor threshold (units of 64 descriptors) */
+ (1 << 22)) | ( /* descriptor type: advanced one buffer */
+ (1 << 25)) | ( /* drop if no descriptors available */
+ (1 << 28)));
/* Give hardware all but last 16 cache lines' worth of descriptors. */
dq->tail_index = dq->n_descriptors -
- 16*xm->n_descriptors_per_cache_line;
+ 16 * xm->n_descriptors_per_cache_line;
}
else
{
/* Make sure its initialized before hardware can get to it. */
dq->tx.head_index_write_back[0] = dq->head_index;
- a = vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
+ a =
+ vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
}
@@ -2470,19 +2576,19 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
if (rt == VLIB_TX)
{
- xd->regs->tx_dma_control |= (1 << 0);
- dr->control |= ((32 << 0) /* prefetch threshold */
- | (64 << 8) /* host threshold */
- | (0 << 16) /* writeback threshold*/);
+ xd->regs->tx_dma_control |= (1 << 0);
+ dr->control |= ((32 << 0) /* prefetch threshold */
+ | (64 << 8) /* host threshold */
+ | (0 << 16) /* writeback threshold */ );
}
/* Enable this queue and wait for hardware to initialize
before adding to tail. */
if (rt == VLIB_TX)
{
- dr->control |= 1 << 25;
- while (! (dr->control & (1 << 25)))
- ;
+ dr->control |= 1 << 25;
+ while (!(dr->control & (1 << 25)))
+ ;
}
/* Set head/tail indices and enable DMA. */
@@ -2493,14 +2599,13 @@ ixge_dma_init (ixge_device_t * xd, vlib_rx_or_tx_t rt, u32 queue_index)
return error;
}
-static u32 ixge_flag_change (vnet_main_t * vnm,
- vnet_hw_interface_t * hw,
- u32 flags)
+static u32
+ixge_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
{
- ixge_device_t * xd;
- ixge_regs_t * r;
+ ixge_device_t *xd;
+ ixge_regs_t *r;
u32 old;
- ixge_main_t * xm = &ixge_main;
+ ixge_main_t *xm = &ixge_main;
xd = vec_elt_at_index (xm->devices, hw->dev_instance);
r = xd->regs;
@@ -2508,121 +2613,119 @@ static u32 ixge_flag_change (vnet_main_t * vnm,
old = r->filter_control;
if (flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
- r->filter_control = old |(1 << 9) /* unicast promiscuous */;
+ r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
else
r->filter_control = old & ~(1 << 9);
return old;
}
-static void ixge_device_init (ixge_main_t * xm)
+static void
+ixge_device_init (ixge_main_t * xm)
{
- vnet_main_t * vnm = vnet_get_main();
- ixge_device_t * xd;
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_device_t *xd;
/* Reset chip(s). */
vec_foreach (xd, xm->devices)
- {
- ixge_regs_t * r = xd->regs;
- const u32 reset_bit = (1 << 26) | (1 << 3);
+ {
+ ixge_regs_t *r = xd->regs;
+ const u32 reset_bit = (1 << 26) | (1 << 3);
- r->control |= reset_bit;
+ r->control |= reset_bit;
- /* No need to suspend. Timed to take ~1e-6 secs */
- while (r->control & reset_bit)
- ;
+ /* No need to suspend. Timed to take ~1e-6 secs */
+ while (r->control & reset_bit)
+ ;
- /* Software loaded. */
- r->extended_control |= (1 << 28);
+ /* Software loaded. */
+ r->extended_control |= (1 << 28);
- ixge_phy_init (xd);
+ ixge_phy_init (xd);
- /* Register ethernet interface. */
- {
- u8 addr8[6];
- u32 i, addr32[2];
- clib_error_t * error;
-
- addr32[0] = r->rx_ethernet_address0[0][0];
- addr32[1] = r->rx_ethernet_address0[0][1];
- for (i = 0; i < 6; i++)
- addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
-
- error = ethernet_register_interface
- (vnm,
- ixge_device_class.index,
- xd->device_index,
- /* ethernet address */ addr8,
- &xd->vlib_hw_if_index,
- ixge_flag_change);
- if (error)
- clib_error_report (error);
- }
+ /* Register ethernet interface. */
+ {
+ u8 addr8[6];
+ u32 i, addr32[2];
+ clib_error_t *error;
+
+ addr32[0] = r->rx_ethernet_address0[0][0];
+ addr32[1] = r->rx_ethernet_address0[0][1];
+ for (i = 0; i < 6; i++)
+ addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
+
+ error = ethernet_register_interface
+ (vnm, ixge_device_class.index, xd->device_index,
+ /* ethernet address */ addr8,
+ &xd->vlib_hw_if_index, ixge_flag_change);
+ if (error)
+ clib_error_report (error);
+ }
- {
- vnet_sw_interface_t * sw = vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
- xd->vlib_sw_if_index = sw->sw_if_index;
- }
+ {
+ vnet_sw_interface_t *sw =
+ vnet_get_hw_sw_interface (vnm, xd->vlib_hw_if_index);
+ xd->vlib_sw_if_index = sw->sw_if_index;
+ }
- ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
+ ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
- xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
+ xm->n_descriptors[VLIB_TX] = 20 * VLIB_FRAME_SIZE;
- ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
+ ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
- /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
- r->interrupt.queue_mapping[0] =
- ((/* valid bit */ (1 << 7) |
- ixge_rx_queue_to_interrupt (0)) << 0);
+ /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
+ r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
+ ixge_rx_queue_to_interrupt (0)) << 0);
- r->interrupt.queue_mapping[0] |=
- ((/* valid bit */ (1 << 7) |
- ixge_tx_queue_to_interrupt (0)) << 8);
+ r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
+ ixge_tx_queue_to_interrupt (0)) << 8);
- /* No use in getting too many interrupts.
- Limit them to one every 3/4 ring size at line rate
- min sized packets.
- No need for this since kernel/vlib main loop provides adequate interrupt
- limiting scheme. */
- if (0)
- {
- f64 line_rate_max_pps = 10e9 / (8 * (64 + /* interframe padding */ 20));
- ixge_throttle_queue_interrupt (r, 0, .75 * xm->n_descriptors[VLIB_RX] / line_rate_max_pps);
- }
+ /* No use in getting too many interrupts.
+ Limit them to one every 3/4 ring size at line rate
+ min sized packets.
+ No need for this since kernel/vlib main loop provides adequate interrupt
+ limiting scheme. */
+ if (0)
+ {
+ f64 line_rate_max_pps =
+ 10e9 / (8 * (64 + /* interframe padding */ 20));
+ ixge_throttle_queue_interrupt (r, 0,
+ .75 * xm->n_descriptors[VLIB_RX] /
+ line_rate_max_pps);
+ }
- /* Accept all multicast and broadcast packets. Should really add them
- to the dst_ethernet_address register array. */
- r->filter_control |= (1 << 10) | (1 << 8);
+ /* Accept all multicast and broadcast packets. Should really add them
+ to the dst_ethernet_address register array. */
+ r->filter_control |= (1 << 10) | (1 << 8);
- /* Enable frames up to size in mac frame size register. */
- r->xge_mac.control |= 1 << 2;
- r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
+ /* Enable frames up to size in mac frame size register. */
+ r->xge_mac.control |= 1 << 2;
+ r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
- /* Enable all interrupts. */
- if (! IXGE_ALWAYS_POLL)
- r->interrupt.enable_write_1_to_set = ~0;
- }
+ /* Enable all interrupts. */
+ if (!IXGE_ALWAYS_POLL)
+ r->interrupt.enable_write_1_to_set = ~0;
+ }
}
static uword
-ixge_process (vlib_main_t * vm,
- vlib_node_runtime_t * rt,
- vlib_frame_t * f)
+ixge_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
{
- vnet_main_t * vnm = vnet_get_main();
- ixge_main_t * xm = &ixge_main;
- ixge_device_t * xd;
- uword event_type, * event_data = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ ixge_main_t *xm = &ixge_main;
+ ixge_device_t *xd;
+ uword event_type, *event_data = 0;
f64 timeout, link_debounce_deadline;
ixge_device_init (xm);
/* Clear all counters. */
vec_foreach (xd, xm->devices)
- {
- ixge_update_counters (xd);
- memset (xd->counters, 0, sizeof (xd->counters));
- }
+ {
+ ixge_update_counters (xd);
+ memset (xd->counters, 0, sizeof (xd->counters));
+ }
timeout = 30.0;
link_debounce_deadline = 1e70;
@@ -2630,40 +2733,41 @@ ixge_process (vlib_main_t * vm,
while (1)
{
/* 36 bit stat counters could overflow in ~50 secs.
- We poll every 30 secs to be conservative. */
+ We poll every 30 secs to be conservative. */
vlib_process_wait_for_event_or_clock (vm, timeout);
event_type = vlib_process_get_events (vm, &event_data);
- switch (event_type) {
- case EVENT_SET_FLAGS:
- /* 1 ms */
- link_debounce_deadline = vlib_time_now(vm) + 1e-3;
- timeout = 1e-3;
- break;
-
- case ~0:
- /* No events found: timer expired. */
- if (vlib_time_now(vm) > link_debounce_deadline)
- {
- vec_foreach (xd, xm->devices)
- {
- ixge_regs_t * r = xd->regs;
- u32 v = r->xge_mac.link_status;
- uword is_up = (v & (1 << 30)) != 0;
-
- vnet_hw_interface_set_flags
- (vnm, xd->vlib_hw_if_index,
- is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
- }
- link_debounce_deadline = 1e70;
- timeout = 30.0;
- }
- break;
+ switch (event_type)
+ {
+ case EVENT_SET_FLAGS:
+ /* 1 ms */
+ link_debounce_deadline = vlib_time_now (vm) + 1e-3;
+ timeout = 1e-3;
+ break;
- default:
- ASSERT (0);
- }
+ case ~0:
+ /* No events found: timer expired. */
+ if (vlib_time_now (vm) > link_debounce_deadline)
+ {
+ vec_foreach (xd, xm->devices)
+ {
+ ixge_regs_t *r = xd->regs;
+ u32 v = r->xge_mac.link_status;
+ uword is_up = (v & (1 << 30)) != 0;
+
+ vnet_hw_interface_set_flags
+ (vnm, xd->vlib_hw_if_index,
+ is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
+ }
+ link_debounce_deadline = 1e70;
+ timeout = 30.0;
+ }
+ break;
+
+ default:
+ ASSERT (0);
+ }
if (event_data)
_vec_len (event_data) = 0;
@@ -2674,8 +2778,7 @@ ixge_process (vlib_main_t * vm,
if (now - xm->time_last_stats_update > 30)
{
xm->time_last_stats_update = now;
- vec_foreach (xd, xm->devices)
- ixge_update_counters (xd);
+ vec_foreach (xd, xm->devices) ixge_update_counters (xd);
}
}
}
@@ -2684,23 +2787,26 @@ ixge_process (vlib_main_t * vm,
}
static vlib_node_registration_t ixge_process_node = {
- .function = ixge_process,
- .type = VLIB_NODE_TYPE_PROCESS,
- .name = "ixge-process",
+ .function = ixge_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "ixge-process",
};
-clib_error_t * ixge_init (vlib_main_t * vm)
+clib_error_t *
+ixge_init (vlib_main_t * vm)
{
- ixge_main_t * xm = &ixge_main;
- clib_error_t * error;
+ ixge_main_t *xm = &ixge_main;
+ clib_error_t *error;
xm->vlib_main = vm;
- memset (&xm->tx_descriptor_template, 0, sizeof (xm->tx_descriptor_template));
- memset (&xm->tx_descriptor_template_mask, 0, sizeof (xm->tx_descriptor_template_mask));
+ memset (&xm->tx_descriptor_template, 0,
+ sizeof (xm->tx_descriptor_template));
+ memset (&xm->tx_descriptor_template_mask, 0,
+ sizeof (xm->tx_descriptor_template_mask));
xm->tx_descriptor_template.status0 =
- (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
- | IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
- | IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
+ (IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED |
+ IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED |
+ IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS);
xm->tx_descriptor_template_mask.status0 = 0xffff;
xm->tx_descriptor_template_mask.status1 = 0x00003fff;
@@ -2719,16 +2825,17 @@ VLIB_INIT_FUNCTION (ixge_init);
static void
-ixge_pci_intr_handler(vlib_pci_device_t * dev)
+ixge_pci_intr_handler (vlib_pci_device_t * dev)
{
- ixge_main_t * xm = &ixge_main;
- vlib_main_t * vm = xm->vlib_main;
+ ixge_main_t *xm = &ixge_main;
+ vlib_main_t *vm = xm->vlib_main;
vlib_node_set_interrupt_pending (vm, ixge_input_node.index);
/* Let node know which device is interrupting. */
{
- vlib_node_runtime_t * rt = vlib_node_get_runtime (vm, ixge_input_node.index);
+ vlib_node_runtime_t *rt =
+ vlib_node_get_runtime (vm, ixge_input_node.index);
rt->runtime_data[0] |= 1 << dev->private_data;
}
}
@@ -2736,10 +2843,10 @@ ixge_pci_intr_handler(vlib_pci_device_t * dev)
static clib_error_t *
ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
{
- ixge_main_t * xm = &ixge_main;
- clib_error_t * error;
- void * r;
- ixge_device_t * xd;
+ ixge_main_t *xm = &ixge_main;
+ clib_error_t *error;
+ void *r;
+ ixge_device_t *xd;
/* Device found: make sure we have dma memory. */
if (unix_physmem_is_fake (vm))
@@ -2753,7 +2860,7 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
if (vec_len (xm->devices) == 1)
{
- ixge_input_node.function = ixge_input_multiarch_select();
+ ixge_input_node.function = ixge_input_multiarch_select ();
}
xd->pci_device = dev[0];
@@ -2780,14 +2887,15 @@ ixge_pci_init (vlib_main_t * vm, vlib_pci_device_t * dev)
xm->process_node_index = ixge_process_node.index;
}
- error = vlib_pci_bus_master_enable(dev);
+ error = vlib_pci_bus_master_enable (dev);
if (error)
return error;
- return vlib_pci_intr_enable(dev);
+ return vlib_pci_intr_enable (dev);
}
+/* *INDENT-OFF* */
PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
.init_function = ixge_pci_init,
.interrupt_handler = ixge_pci_intr_handler,
@@ -2798,8 +2906,10 @@ PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
{ 0 },
},
};
+/* *INDENT-ON* */
-void ixge_set_next_node (ixge_rx_next_t next, char *name)
+void
+ixge_set_next_node (ixge_rx_next_t next, char *name)
{
vlib_node_registration_t *r = &ixge_input_node;
@@ -2817,3 +2927,11 @@ void ixge_set_next_node (ixge_rx_next_t next, char *name)
}
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/nic/ixge.h b/vnet/vnet/devices/nic/ixge.h
index 8062b8c8a97..a8e652dcdab 100644
--- a/vnet/vnet/devices/nic/ixge.h
+++ b/vnet/vnet/devices/nic/ixge.h
@@ -23,7 +23,8 @@
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
-typedef volatile struct {
+typedef volatile struct
+{
/* [31:7] 128 byte aligned. */
u32 descriptor_address[2];
u32 n_descriptor_bytes;
@@ -50,7 +51,7 @@ typedef volatile struct {
u32 rx_split_control;
u32 tail_index;
- CLIB_PAD_FROM_TO (0x1c, 0x28);
+ CLIB_PAD_FROM_TO (0x1c, 0x28);
/* [7:0] rx/tx prefetch threshold
[15:8] rx/tx host threshold
@@ -62,15 +63,18 @@ typedef volatile struct {
u32 rx_coallesce_control;
- union {
- struct {
+ union
+ {
+ struct
+ {
/* packets bytes lo hi */
u32 stats[3];
u32 unused;
} rx;
- struct {
+ struct
+ {
u32 unused[2];
/* [0] enables head write back. */
@@ -80,12 +84,14 @@ typedef volatile struct {
} ixge_dma_regs_t;
/* Only advanced descriptors are supported. */
-typedef struct {
+typedef struct
+{
u64 tail_address;
u64 head_address;
} ixge_rx_to_hw_descriptor_t;
-typedef struct {
+typedef struct
+{
u32 status[3];
u16 n_packet_bytes_this_descriptor;
u16 vlan_tag;
@@ -117,7 +123,8 @@ typedef struct {
/* For layer2 packets stats0 bottom 3 bits give ether type index from filter. */
#define IXGE_RX_DESCRIPTOR_STATUS0_LAYER2_ETHERNET_TYPE(s) ((s) & 7)
-typedef struct {
+typedef struct
+{
u64 buffer_address;
u16 n_bytes_this_buffer;
u16 status0;
@@ -138,8 +145,10 @@ typedef struct {
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l) ((l) << 14)
} ixge_tx_descriptor_t;
-typedef struct {
- struct {
+typedef struct
+{
+ struct
+ {
u8 checksum_start_offset;
u8 checksum_insert_offset;
u16 checksum_end_offset;
@@ -154,14 +163,16 @@ typedef struct {
u16 max_tcp_segment_size;
} __attribute__ ((packed)) ixge_tx_context_descriptor_t;
-typedef union {
+typedef union
+{
ixge_rx_to_hw_descriptor_t rx_to_hw;
ixge_rx_from_hw_descriptor_t rx_from_hw;
ixge_tx_descriptor_t tx;
u32x4 as_u32x4;
} ixge_descriptor_t;
-typedef volatile struct {
+typedef volatile struct
+{
/* [2] pcie master disable
[3] mac reset
[26] global device reset */
@@ -173,56 +184,58 @@ typedef volatile struct {
[18] io active
[19] pcie master enable status */
u32 status_read_only;
- CLIB_PAD_FROM_TO (0xc, 0x18);
+ CLIB_PAD_FROM_TO (0xc, 0x18);
/* [14] pf reset done
[17] relaxed ordering disable
[26] extended vlan enable
[28] driver loaded */
u32 extended_control;
- CLIB_PAD_FROM_TO (0x1c, 0x20);
+ CLIB_PAD_FROM_TO (0x1c, 0x20);
/* software definable pins.
sdp_data [7:0]
sdp_is_output [15:8]
sdp_is_native [23:16]
sdp_function [31:24].
- */
+ */
u32 sdp_control;
- CLIB_PAD_FROM_TO (0x24, 0x28);
+ CLIB_PAD_FROM_TO (0x24, 0x28);
/* [0] i2c clock in
[1] i2c clock out
[2] i2c data in
[3] i2c data out */
u32 i2c_control;
- CLIB_PAD_FROM_TO (0x2c, 0x4c);
+ CLIB_PAD_FROM_TO (0x2c, 0x4c);
u32 tcp_timer;
- CLIB_PAD_FROM_TO (0x50, 0x200);
+ CLIB_PAD_FROM_TO (0x50, 0x200);
u32 led_control;
- CLIB_PAD_FROM_TO (0x204, 0x600);
+ CLIB_PAD_FROM_TO (0x204, 0x600);
u32 core_spare;
- CLIB_PAD_FROM_TO (0x604, 0x700);
+ CLIB_PAD_FROM_TO (0x604, 0x700);
- struct {
+ struct
+ {
u32 vflr_events_clear[4];
u32 mailbox_interrupt_status[4];
u32 mailbox_interrupt_enable[4];
- CLIB_PAD_FROM_TO (0x730, 0x800);
+ CLIB_PAD_FROM_TO (0x730, 0x800);
} pf_foo;
- struct {
+ struct
+ {
u32 status_write_1_to_clear;
- CLIB_PAD_FROM_TO (0x804, 0x808);
+ CLIB_PAD_FROM_TO (0x804, 0x808);
u32 status_write_1_to_set;
- CLIB_PAD_FROM_TO (0x80c, 0x810);
+ CLIB_PAD_FROM_TO (0x80c, 0x810);
u32 status_auto_clear_enable;
- CLIB_PAD_FROM_TO (0x814, 0x820);
+ CLIB_PAD_FROM_TO (0x814, 0x820);
/* [11:3] minimum inter-interrupt interval
- (2e-6 units; 20e-6 units for fast ethernet).
+ (2e-6 units; 20e-6 units for fast ethernet).
[15] low-latency interrupt moderation enable
[20:16] low-latency interrupt credit
[27:21] interval counter
@@ -230,9 +243,9 @@ typedef volatile struct {
u32 throttle0[24];
u32 enable_write_1_to_set;
- CLIB_PAD_FROM_TO (0x884, 0x888);
+ CLIB_PAD_FROM_TO (0x884, 0x888);
u32 enable_write_1_to_clear;
- CLIB_PAD_FROM_TO (0x88c, 0x890);
+ CLIB_PAD_FROM_TO (0x88c, 0x890);
u32 enable_auto_clear;
u32 msi_to_eitr_select;
/* [3:0] spd 0-3 interrupt detection enable
@@ -240,88 +253,89 @@ typedef volatile struct {
[5] other clear disable (makes other bits in status not clear on read)
etc. */
u32 control;
- CLIB_PAD_FROM_TO (0x89c, 0x900);
+ CLIB_PAD_FROM_TO (0x89c, 0x900);
/* Defines interrupt mapping for 128 rx + 128 tx queues.
64 x 4 8 bit entries.
For register [i]:
- [5:0] bit in interrupt status for rx queue 2*i + 0
- [7] valid bit
- [13:8] bit for tx queue 2*i + 0
- [15] valid bit
- similar for rx 2*i + 1 and tx 2*i + 1. */
+ [5:0] bit in interrupt status for rx queue 2*i + 0
+ [7] valid bit
+ [13:8] bit for tx queue 2*i + 0
+ [15] valid bit
+ similar for rx 2*i + 1 and tx 2*i + 1. */
u32 queue_mapping[64];
/* tcp timer [7:0] and other interrupts [15:8] */
u32 misc_mapping;
- CLIB_PAD_FROM_TO (0xa04, 0xa90);
+ CLIB_PAD_FROM_TO (0xa04, 0xa90);
/* 64 interrupts determined by mappings. */
u32 status1_write_1_to_clear[4];
u32 enable1_write_1_to_set[4];
u32 enable1_write_1_to_clear[4];
- CLIB_PAD_FROM_TO (0xac0, 0xad0);
+ CLIB_PAD_FROM_TO (0xac0, 0xad0);
u32 status1_enable_auto_clear[4];
- CLIB_PAD_FROM_TO (0xae0, 0x1000);
+ CLIB_PAD_FROM_TO (0xae0, 0x1000);
} interrupt;
ixge_dma_regs_t rx_dma0[64];
- CLIB_PAD_FROM_TO (0x2000, 0x2140);
+ CLIB_PAD_FROM_TO (0x2000, 0x2140);
u32 dcb_rx_packet_plane_t4_config[8];
u32 dcb_rx_packet_plane_t4_status[8];
- CLIB_PAD_FROM_TO (0x2180, 0x2300);
+ CLIB_PAD_FROM_TO (0x2180, 0x2300);
/* reg i defines mapping for 4 rx queues starting at 4*i + 0. */
u32 rx_queue_stats_mapping[32];
u32 rx_queue_stats_control;
- CLIB_PAD_FROM_TO (0x2384, 0x2410);
+ CLIB_PAD_FROM_TO (0x2384, 0x2410);
u32 fc_user_descriptor_ptr[2];
u32 fc_buffer_control;
- CLIB_PAD_FROM_TO (0x241c, 0x2420);
+ CLIB_PAD_FROM_TO (0x241c, 0x2420);
u32 fc_rx_dma;
- CLIB_PAD_FROM_TO (0x2424, 0x2430);
+ CLIB_PAD_FROM_TO (0x2424, 0x2430);
u32 dcb_packet_plane_control;
- CLIB_PAD_FROM_TO (0x2434, 0x2f00);
+ CLIB_PAD_FROM_TO (0x2434, 0x2f00);
u32 rx_dma_control;
u32 pf_queue_drop_enable;
- CLIB_PAD_FROM_TO (0x2f08, 0x2f20);
+ CLIB_PAD_FROM_TO (0x2f08, 0x2f20);
u32 rx_dma_descriptor_cache_config;
- CLIB_PAD_FROM_TO (0x2f24, 0x3000);
+ CLIB_PAD_FROM_TO (0x2f24, 0x3000);
/* 1 bit. */
u32 rx_enable;
- CLIB_PAD_FROM_TO (0x3004, 0x3008);
+ CLIB_PAD_FROM_TO (0x3004, 0x3008);
/* [15:0] ether type (little endian)
[31:16] opcode (big endian) */
u32 flow_control_control;
- CLIB_PAD_FROM_TO (0x300c, 0x3020);
+ CLIB_PAD_FROM_TO (0x300c, 0x3020);
/* 3 bit traffic class for each of 8 priorities. */
u32 rx_priority_to_traffic_class;
- CLIB_PAD_FROM_TO (0x3024, 0x3028);
+ CLIB_PAD_FROM_TO (0x3024, 0x3028);
u32 rx_coallesce_data_buffer_control;
- CLIB_PAD_FROM_TO (0x302c, 0x3190);
+ CLIB_PAD_FROM_TO (0x302c, 0x3190);
u32 rx_packet_buffer_flush_detect;
- CLIB_PAD_FROM_TO (0x3194, 0x3200);
- u32 flow_control_tx_timers[4]; /* 2 timer values */
- CLIB_PAD_FROM_TO (0x3210, 0x3220);
+ CLIB_PAD_FROM_TO (0x3194, 0x3200);
+ u32 flow_control_tx_timers[4]; /* 2 timer values */
+ CLIB_PAD_FROM_TO (0x3210, 0x3220);
u32 flow_control_rx_threshold_lo[8];
- CLIB_PAD_FROM_TO (0x3240, 0x3260);
+ CLIB_PAD_FROM_TO (0x3240, 0x3260);
u32 flow_control_rx_threshold_hi[8];
- CLIB_PAD_FROM_TO (0x3280, 0x32a0);
+ CLIB_PAD_FROM_TO (0x3280, 0x32a0);
u32 flow_control_refresh_threshold;
- CLIB_PAD_FROM_TO (0x32a4, 0x3c00);
+ CLIB_PAD_FROM_TO (0x32a4, 0x3c00);
/* For each of 8 traffic classes (units of bytes). */
u32 rx_packet_buffer_size[8];
- CLIB_PAD_FROM_TO (0x3c20, 0x3d00);
+ CLIB_PAD_FROM_TO (0x3c20, 0x3d00);
u32 flow_control_config;
- CLIB_PAD_FROM_TO (0x3d04, 0x4200);
+ CLIB_PAD_FROM_TO (0x3d04, 0x4200);
- struct {
+ struct
+ {
u32 pcs_config;
- CLIB_PAD_FROM_TO (0x4204, 0x4208);
+ CLIB_PAD_FROM_TO (0x4204, 0x4208);
u32 link_control;
u32 link_status;
u32 pcs_debug[2];
@@ -329,10 +343,11 @@ typedef volatile struct {
u32 link_partner_ability;
u32 auto_negotiation_tx_next_page;
u32 auto_negotiation_link_partner_next_page;
- CLIB_PAD_FROM_TO (0x4228, 0x4240);
+ CLIB_PAD_FROM_TO (0x4228, 0x4240);
} gige_mac;
- struct {
+ struct
+ {
/* [0] tx crc enable
[2] enable frames up to max frame size register [31:16]
[10] pad frames < 64 bytes if specified by user
@@ -349,25 +364,25 @@ typedef volatile struct {
u32 status;
u32 pause_and_pace_control;
- CLIB_PAD_FROM_TO (0x424c, 0x425c);
+ CLIB_PAD_FROM_TO (0x424c, 0x425c);
u32 phy_command;
u32 phy_data;
- CLIB_PAD_FROM_TO (0x4264, 0x4268);
+ CLIB_PAD_FROM_TO (0x4264, 0x4268);
/* [31:16] max frame size in bytes. */
u32 rx_max_frame_size;
- CLIB_PAD_FROM_TO (0x426c, 0x4288);
+ CLIB_PAD_FROM_TO (0x426c, 0x4288);
/* [0]
- [2] pcs receive link up? (latch lo)
- [7] local fault
+ [2] pcs receive link up? (latch lo)
+ [7] local fault
[1]
- [0] pcs 10g base r capable
- [1] pcs 10g base x capable
- [2] pcs 10g base w capable
- [10] rx local fault
- [11] tx local fault
- [15:14] 2 => device present at this address (else not present) */
+ [0] pcs 10g base r capable
+ [1] pcs 10g base x capable
+ [2] pcs 10g base w capable
+ [10] rx local fault
+ [11] tx local fault
+ [15:14] 2 => device present at this address (else not present) */
u32 xgxs_status[2];
u32 base_x_pcs_status;
@@ -397,14 +412,14 @@ typedef volatile struct {
[11] restart autoneg on transition to dx power state
[12] restart autoneg
[15:13] link mode:
- 0 => 1g no autoneg
- 1 => 10g kx4 parallel link no autoneg
- 2 => 1g bx autoneg
- 3 => 10g sfi serdes
- 4 => kx4/kx/kr
- 5 => xgmii 1g/100m
- 6 => kx4/kx/kr 1g an
- 7 kx4/kx/kr sgmii.
+ 0 => 1g no autoneg
+ 1 => 10g kx4 parallel link no autoneg
+ 2 => 1g bx autoneg
+ 3 => 10g sfi serdes
+ 4 => kx4/kx/kr
+ 5 => xgmii 1g/100m
+ 6 => kx4/kx/kr 1g an
+ 7 kx4/kx/kr sgmii.
[16] kr support
[17] fec requested
[18] fec ability
@@ -432,99 +447,100 @@ typedef volatile struct {
[24] 10g kr pcs enabled
[25] sgmii enabled
[27:26] mac link mode
- 0 => 1g
- 1 => 10g parallel
- 2 => 10g serial
- 3 => autoneg
+ 0 => 1g
+ 1 => 10g parallel
+ 2 => 10g serial
+ 3 => autoneg
[29:28] link speed
- 1 => 100m
- 2 => 1g
- 3 => 10g
+ 1 => 100m
+ 2 => 1g
+ 3 => 10g
[30] link is up
[31] kx/kx4/kr backplane autoneg completed successfully. */
u32 link_status;
/* [17:16] pma/pmd for 10g serial
- 0 => kr, 2 => sfi
+ 0 => kr, 2 => sfi
[18] disable dme pages */
u32 auto_negotiation_control2;
- CLIB_PAD_FROM_TO (0x42ac, 0x42b0);
+ CLIB_PAD_FROM_TO (0x42ac, 0x42b0);
u32 link_partner_ability[2];
- CLIB_PAD_FROM_TO (0x42b8, 0x42d0);
+ CLIB_PAD_FROM_TO (0x42b8, 0x42d0);
u32 manageability_control;
u32 link_partner_next_page[2];
- CLIB_PAD_FROM_TO (0x42dc, 0x42e0);
+ CLIB_PAD_FROM_TO (0x42dc, 0x42e0);
u32 kr_pcs_control;
u32 kr_pcs_status;
u32 fec_status[2];
- CLIB_PAD_FROM_TO (0x42f0, 0x4314);
+ CLIB_PAD_FROM_TO (0x42f0, 0x4314);
u32 sgmii_control;
- CLIB_PAD_FROM_TO (0x4318, 0x4324);
+ CLIB_PAD_FROM_TO (0x4318, 0x4324);
u32 link_status2;
- CLIB_PAD_FROM_TO (0x4328, 0x4900);
+ CLIB_PAD_FROM_TO (0x4328, 0x4900);
} xge_mac;
u32 tx_dcb_control;
u32 tx_dcb_descriptor_plane_queue_select;
u32 tx_dcb_descriptor_plane_t1_config;
u32 tx_dcb_descriptor_plane_t1_status;
- CLIB_PAD_FROM_TO (0x4910, 0x4950);
+ CLIB_PAD_FROM_TO (0x4910, 0x4950);
/* For each TC in units of 1k bytes. */
u32 tx_packet_buffer_thresholds[8];
- CLIB_PAD_FROM_TO (0x4970, 0x4980);
- struct {
+ CLIB_PAD_FROM_TO (0x4970, 0x4980);
+ struct
+ {
u32 mmw;
u32 config;
u32 status;
u32 rate_drift;
} dcb_tx_rate_scheduler;
- CLIB_PAD_FROM_TO (0x4990, 0x4a80);
+ CLIB_PAD_FROM_TO (0x4990, 0x4a80);
u32 tx_dma_control;
- CLIB_PAD_FROM_TO (0x4a84, 0x4a88);
+ CLIB_PAD_FROM_TO (0x4a84, 0x4a88);
u32 tx_dma_tcp_flags_control[2];
- CLIB_PAD_FROM_TO (0x4a90, 0x4b00);
+ CLIB_PAD_FROM_TO (0x4a90, 0x4b00);
u32 pf_mailbox[64];
- CLIB_PAD_FROM_TO (0x4c00, 0x5000);
+ CLIB_PAD_FROM_TO (0x4c00, 0x5000);
/* RX */
u32 checksum_control;
- CLIB_PAD_FROM_TO (0x5004, 0x5008);
+ CLIB_PAD_FROM_TO (0x5004, 0x5008);
u32 rx_filter_control;
- CLIB_PAD_FROM_TO (0x500c, 0x5010);
+ CLIB_PAD_FROM_TO (0x500c, 0x5010);
u32 management_vlan_tag[8];
u32 management_udp_tcp_ports[8];
- CLIB_PAD_FROM_TO (0x5050, 0x5078);
+ CLIB_PAD_FROM_TO (0x5050, 0x5078);
/* little endian. */
u32 extended_vlan_ether_type;
- CLIB_PAD_FROM_TO (0x507c, 0x5080);
+ CLIB_PAD_FROM_TO (0x507c, 0x5080);
/* [1] store/dma bad packets
[8] accept all multicast
[9] accept all unicast
[10] accept all broadcast. */
u32 filter_control;
- CLIB_PAD_FROM_TO (0x5084, 0x5088);
+ CLIB_PAD_FROM_TO (0x5084, 0x5088);
/* [15:0] vlan ethernet type (0x8100) little endian
[28] cfi bit expected
[29] drop packets with unexpected cfi bit
[30] vlan filter enable. */
u32 vlan_control;
- CLIB_PAD_FROM_TO (0x508c, 0x5090);
+ CLIB_PAD_FROM_TO (0x508c, 0x5090);
/* [1:0] hi bit of ethernet address for 12 bit index into multicast table
- 0 => 47, 1 => 46, 2 => 45, 3 => 43.
+ 0 => 47, 1 => 46, 2 => 45, 3 => 43.
[2] enable multicast filter
*/
u32 multicast_control;
- CLIB_PAD_FROM_TO (0x5094, 0x5100);
+ CLIB_PAD_FROM_TO (0x5094, 0x5100);
u32 fcoe_rx_control;
- CLIB_PAD_FROM_TO (0x5104, 0x5108);
+ CLIB_PAD_FROM_TO (0x5104, 0x5108);
u32 fc_flt_context;
- CLIB_PAD_FROM_TO (0x510c, 0x5110);
+ CLIB_PAD_FROM_TO (0x510c, 0x5110);
u32 fc_filter_control;
- CLIB_PAD_FROM_TO (0x5114, 0x5120);
+ CLIB_PAD_FROM_TO (0x5114, 0x5120);
u32 rx_message_type_lo;
- CLIB_PAD_FROM_TO (0x5124, 0x5128);
+ CLIB_PAD_FROM_TO (0x5124, 0x5128);
/* [15:0] ethernet type (little endian)
[18:16] matche pri in vlan tag
[19] priority match enable
@@ -535,50 +551,50 @@ typedef volatile struct {
[31] filter enable.
(See ethernet_type_queue_select.) */
u32 ethernet_type_queue_filter[8];
- CLIB_PAD_FROM_TO (0x5148, 0x5160);
+ CLIB_PAD_FROM_TO (0x5148, 0x5160);
/* [7:0] l2 ethernet type and
[15:8] l2 ethernet type or */
u32 management_decision_filters1[8];
u32 vf_vm_tx_switch_loopback_enable[2];
u32 rx_time_sync_control;
- CLIB_PAD_FROM_TO (0x518c, 0x5190);
+ CLIB_PAD_FROM_TO (0x518c, 0x5190);
u32 management_ethernet_type_filters[4];
u32 rx_timestamp_attributes_lo;
u32 rx_timestamp_hi;
u32 rx_timestamp_attributes_hi;
- CLIB_PAD_FROM_TO (0x51ac, 0x51b0);
+ CLIB_PAD_FROM_TO (0x51ac, 0x51b0);
u32 pf_virtual_control;
- CLIB_PAD_FROM_TO (0x51b4, 0x51d8);
+ CLIB_PAD_FROM_TO (0x51b4, 0x51d8);
u32 fc_offset_parameter;
- CLIB_PAD_FROM_TO (0x51dc, 0x51e0);
+ CLIB_PAD_FROM_TO (0x51dc, 0x51e0);
u32 vf_rx_enable[2];
u32 rx_timestamp_lo;
- CLIB_PAD_FROM_TO (0x51ec, 0x5200);
+ CLIB_PAD_FROM_TO (0x51ec, 0x5200);
/* 12 bits determined by multicast_control
lookup bits in this vector. */
u32 multicast_enable[128];
/* [0] ethernet address [31:0]
[1] [15:0] ethernet address [47:32]
- [31] valid bit.
+ [31] valid bit.
Index 0 is read from eeprom after reset. */
u32 rx_ethernet_address0[16][2];
- CLIB_PAD_FROM_TO (0x5480, 0x5800);
+ CLIB_PAD_FROM_TO (0x5480, 0x5800);
u32 wake_up_control;
- CLIB_PAD_FROM_TO (0x5804, 0x5808);
+ CLIB_PAD_FROM_TO (0x5804, 0x5808);
u32 wake_up_filter_control;
- CLIB_PAD_FROM_TO (0x580c, 0x5818);
+ CLIB_PAD_FROM_TO (0x580c, 0x5818);
u32 multiple_rx_queue_command_82598;
- CLIB_PAD_FROM_TO (0x581c, 0x5820);
+ CLIB_PAD_FROM_TO (0x581c, 0x5820);
u32 management_control;
u32 management_filter_control;
- CLIB_PAD_FROM_TO (0x5828, 0x5838);
+ CLIB_PAD_FROM_TO (0x5828, 0x5838);
u32 wake_up_ip4_address_valid;
- CLIB_PAD_FROM_TO (0x583c, 0x5840);
+ CLIB_PAD_FROM_TO (0x583c, 0x5840);
u32 wake_up_ip4_address_table[4];
u32 management_control_to_host;
- CLIB_PAD_FROM_TO (0x5854, 0x5880);
+ CLIB_PAD_FROM_TO (0x5854, 0x5880);
u32 wake_up_ip6_address_table[4];
/* unicast_and broadcast_and vlan_and ip_address_and
@@ -586,55 +602,58 @@ typedef volatile struct {
u32 management_decision_filters[8];
u32 management_ip4_or_ip6_address_filters[4][4];
- CLIB_PAD_FROM_TO (0x58f0, 0x5900);
+ CLIB_PAD_FROM_TO (0x58f0, 0x5900);
u32 wake_up_packet_length;
- CLIB_PAD_FROM_TO (0x5904, 0x5910);
+ CLIB_PAD_FROM_TO (0x5904, 0x5910);
u32 management_ethernet_address_filters[4][2];
- CLIB_PAD_FROM_TO (0x5930, 0x5a00);
+ CLIB_PAD_FROM_TO (0x5930, 0x5a00);
u32 wake_up_packet_memory[32];
- CLIB_PAD_FROM_TO (0x5a80, 0x5c00);
+ CLIB_PAD_FROM_TO (0x5a80, 0x5c00);
u32 redirection_table_82598[32];
u32 rss_random_keys_82598[10];
- CLIB_PAD_FROM_TO (0x5ca8, 0x6000);
+ CLIB_PAD_FROM_TO (0x5ca8, 0x6000);
ixge_dma_regs_t tx_dma[128];
u32 pf_vm_vlan_insert[64];
u32 tx_dma_tcp_max_alloc_size_requests;
- CLIB_PAD_FROM_TO (0x8104, 0x8110);
+ CLIB_PAD_FROM_TO (0x8104, 0x8110);
u32 vf_tx_enable[2];
- CLIB_PAD_FROM_TO (0x8118, 0x8120);
+ CLIB_PAD_FROM_TO (0x8118, 0x8120);
/* [0] dcb mode enable
[1] virtualization mode enable
[3:2] number of tcs/qs per pool. */
u32 multiple_tx_queues_command;
- CLIB_PAD_FROM_TO (0x8124, 0x8200);
+ CLIB_PAD_FROM_TO (0x8124, 0x8200);
u32 pf_vf_anti_spoof[8];
u32 pf_dma_tx_switch_control;
- CLIB_PAD_FROM_TO (0x8224, 0x82e0);
+ CLIB_PAD_FROM_TO (0x8224, 0x82e0);
u32 tx_strict_low_latency_queues[4];
- CLIB_PAD_FROM_TO (0x82f0, 0x8600);
+ CLIB_PAD_FROM_TO (0x82f0, 0x8600);
u32 tx_queue_stats_mapping_82599[32];
u32 tx_queue_packet_counts[32];
u32 tx_queue_byte_counts[32][2];
- struct {
+ struct
+ {
u32 control;
u32 status;
u32 buffer_almost_full;
- CLIB_PAD_FROM_TO (0x880c, 0x8810);
+ CLIB_PAD_FROM_TO (0x880c, 0x8810);
u32 buffer_min_ifg;
- CLIB_PAD_FROM_TO (0x8814, 0x8900);
+ CLIB_PAD_FROM_TO (0x8814, 0x8900);
} tx_security;
- struct {
+ struct
+ {
u32 index;
u32 salt;
u32 key[4];
- CLIB_PAD_FROM_TO (0x8918, 0x8a00);
+ CLIB_PAD_FROM_TO (0x8918, 0x8a00);
} tx_ipsec;
- struct {
+ struct
+ {
u32 capabilities;
u32 control;
u32 tx_sci[2];
@@ -644,10 +663,11 @@ typedef volatile struct {
/* untagged packets, encrypted packets, protected packets,
encrypted bytes, protected bytes */
u32 stats[5];
- CLIB_PAD_FROM_TO (0x8a50, 0x8c00);
+ CLIB_PAD_FROM_TO (0x8a50, 0x8c00);
} tx_link_security;
- struct {
+ struct
+ {
u32 control;
u32 timestamp_value[2];
u32 system_time[2];
@@ -655,18 +675,20 @@ typedef volatile struct {
u32 time_adjustment_offset[2];
u32 aux_control;
u32 target_time[2][2];
- CLIB_PAD_FROM_TO (0x8c34, 0x8c3c);
+ CLIB_PAD_FROM_TO (0x8c34, 0x8c3c);
u32 aux_time_stamp[2][2];
- CLIB_PAD_FROM_TO (0x8c4c, 0x8d00);
+ CLIB_PAD_FROM_TO (0x8c4c, 0x8d00);
} tx_timesync;
- struct {
+ struct
+ {
u32 control;
u32 status;
- CLIB_PAD_FROM_TO (0x8d08, 0x8e00);
+ CLIB_PAD_FROM_TO (0x8d08, 0x8e00);
} rx_security;
- struct {
+ struct
+ {
u32 index;
u32 ip_address[4];
u32 spi;
@@ -674,10 +696,11 @@ typedef volatile struct {
u32 key[4];
u32 salt;
u32 mode;
- CLIB_PAD_FROM_TO (0x8e34, 0x8f00);
+ CLIB_PAD_FROM_TO (0x8e34, 0x8f00);
} rx_ipsec;
- struct {
+ struct
+ {
u32 capabilities;
u32 control;
u32 sci[2];
@@ -686,12 +709,12 @@ typedef volatile struct {
u32 key[2][4];
/* see datasheet */
u32 stats[17];
- CLIB_PAD_FROM_TO (0x8f84, 0x9000);
+ CLIB_PAD_FROM_TO (0x8f84, 0x9000);
} rx_link_security;
/* 4 wake up, 2 management, 2 wake up. */
u32 flexible_filters[8][16][4];
- CLIB_PAD_FROM_TO (0x9800, 0xa000);
+ CLIB_PAD_FROM_TO (0x9800, 0xa000);
/* 4096 bits. */
u32 vlan_filter[128];
@@ -704,26 +727,27 @@ typedef volatile struct {
/* select one of 64 pools for each rx address. */
u32 rx_ethernet_address_pool_select[128][2];
- CLIB_PAD_FROM_TO (0xaa00, 0xc800);
+ CLIB_PAD_FROM_TO (0xaa00, 0xc800);
u32 tx_priority_to_traffic_class;
- CLIB_PAD_FROM_TO (0xc804, 0xcc00);
+ CLIB_PAD_FROM_TO (0xc804, 0xcc00);
/* In bytes units of 1k. Total packet buffer is 160k. */
u32 tx_packet_buffer_size[8];
- CLIB_PAD_FROM_TO (0xcc20, 0xcd10);
+ CLIB_PAD_FROM_TO (0xcc20, 0xcd10);
u32 tx_manageability_tc_mapping;
- CLIB_PAD_FROM_TO (0xcd14, 0xcd20);
+ CLIB_PAD_FROM_TO (0xcd14, 0xcd20);
u32 dcb_tx_packet_plane_t2_config[8];
u32 dcb_tx_packet_plane_t2_status[8];
- CLIB_PAD_FROM_TO (0xcd60, 0xce00);
+ CLIB_PAD_FROM_TO (0xcd60, 0xce00);
u32 tx_flow_control_status;
- CLIB_PAD_FROM_TO (0xce04, 0xd000);
+ CLIB_PAD_FROM_TO (0xce04, 0xd000);
ixge_dma_regs_t rx_dma1[64];
- struct {
+ struct
+ {
/* Bigendian ip4 src/dst address. */
u32 src_address[128];
u32 dst_address[128];
@@ -750,36 +774,38 @@ typedef volatile struct {
u32 interrupt[128];
} ip4_filters;
- CLIB_PAD_FROM_TO (0xea00, 0xeb00);
+ CLIB_PAD_FROM_TO (0xea00, 0xeb00);
/* 4 bit rss output index indexed by 7 bit hash.
128 8 bit fields = 32 registers. */
u32 redirection_table_82599[32];
u32 rss_random_key_82599[10];
- CLIB_PAD_FROM_TO (0xeba8, 0xec00);
+ CLIB_PAD_FROM_TO (0xeba8, 0xec00);
/* [15:0] reserved
[22:16] rx queue index
[29] low-latency interrupt on match
[31] enable */
u32 ethernet_type_queue_select[8];
- CLIB_PAD_FROM_TO (0xec20, 0xec30);
+ CLIB_PAD_FROM_TO (0xec20, 0xec30);
u32 syn_packet_queue_filter;
- CLIB_PAD_FROM_TO (0xec34, 0xec60);
+ CLIB_PAD_FROM_TO (0xec34, 0xec60);
u32 immediate_interrupt_rx_vlan_priority;
- CLIB_PAD_FROM_TO (0xec64, 0xec70);
+ CLIB_PAD_FROM_TO (0xec64, 0xec70);
u32 rss_queues_per_traffic_class;
- CLIB_PAD_FROM_TO (0xec74, 0xec90);
+ CLIB_PAD_FROM_TO (0xec74, 0xec90);
u32 lli_size_threshold;
- CLIB_PAD_FROM_TO (0xec94, 0xed00);
+ CLIB_PAD_FROM_TO (0xec94, 0xed00);
- struct {
+ struct
+ {
u32 control;
- CLIB_PAD_FROM_TO (0xed04, 0xed10);
+ CLIB_PAD_FROM_TO (0xed04, 0xed10);
u32 table[8];
- CLIB_PAD_FROM_TO (0xed30, 0xee00);
+ CLIB_PAD_FROM_TO (0xed30, 0xee00);
} fcoe_redirection;
- struct {
+ struct
+ {
/* [1:0] packet buffer allocation 0 => disabled, else 64k*2^(f-1)
[3] packet buffer initialization done
[4] perfetch match mode
@@ -790,7 +816,7 @@ typedef volatile struct {
[27:24] max linked list length
[31:28] full threshold. */
u32 control;
- CLIB_PAD_FROM_TO (0xee04, 0xee0c);
+ CLIB_PAD_FROM_TO (0xee04, 0xee0c);
u32 data[8];
@@ -810,7 +836,7 @@ typedef volatile struct {
[29:24] pool. */
u32 command;
- CLIB_PAD_FROM_TO (0xee30, 0xee3c);
+ CLIB_PAD_FROM_TO (0xee30, 0xee3c);
/* ip4 dst/src address, tcp ports, udp ports.
set bits mean bit is ignored. */
u32 ip4_masks[4];
@@ -819,7 +845,7 @@ typedef volatile struct {
u32 failed_usage_stats;
u32 filters_match_stats;
u32 filters_miss_stats;
- CLIB_PAD_FROM_TO (0xee60, 0xee68);
+ CLIB_PAD_FROM_TO (0xee60, 0xee68);
/* Lookup, signature. */
u32 hash_keys[2];
/* [15:0] ip6 src address 1 bit per byte
@@ -832,10 +858,11 @@ typedef volatile struct {
[4] flex
[5] dst ip6. */
u32 other_mask;
- CLIB_PAD_FROM_TO (0xee78, 0xf000);
+ CLIB_PAD_FROM_TO (0xee78, 0xf000);
} flow_director;
- struct {
+ struct
+ {
u32 l2_control[64];
u32 vlan_pool_filter[64];
u32 vlan_pool_filter_bitmap[128];
@@ -843,7 +870,7 @@ typedef volatile struct {
u32 mirror_rule[4];
u32 mirror_rule_vlan[8];
u32 mirror_rule_pool[8];
- CLIB_PAD_FROM_TO (0xf650, 0x10010);
+ CLIB_PAD_FROM_TO (0xf650, 0x10010);
} pf_bar;
u32 eeprom_flash_control;
@@ -852,26 +879,27 @@ typedef volatile struct {
[15:2] address
[31:16] read data. */
u32 eeprom_read;
- CLIB_PAD_FROM_TO (0x10018, 0x1001c);
+ CLIB_PAD_FROM_TO (0x10018, 0x1001c);
u32 flash_access;
- CLIB_PAD_FROM_TO (0x10020, 0x10114);
+ CLIB_PAD_FROM_TO (0x10020, 0x10114);
u32 flash_data;
u32 flash_control;
u32 flash_read_data;
- CLIB_PAD_FROM_TO (0x10120, 0x1013c);
+ CLIB_PAD_FROM_TO (0x10120, 0x1013c);
u32 flash_opcode;
u32 software_semaphore;
- CLIB_PAD_FROM_TO (0x10144, 0x10148);
+ CLIB_PAD_FROM_TO (0x10144, 0x10148);
u32 firmware_semaphore;
- CLIB_PAD_FROM_TO (0x1014c, 0x10160);
+ CLIB_PAD_FROM_TO (0x1014c, 0x10160);
u32 software_firmware_sync;
- CLIB_PAD_FROM_TO (0x10164, 0x10200);
+ CLIB_PAD_FROM_TO (0x10164, 0x10200);
u32 general_rx_control;
- CLIB_PAD_FROM_TO (0x10204, 0x11000);
+ CLIB_PAD_FROM_TO (0x10204, 0x11000);
- struct {
+ struct
+ {
u32 control;
- CLIB_PAD_FROM_TO (0x11004, 0x11010);
+ CLIB_PAD_FROM_TO (0x11004, 0x11010);
/* [3:0] enable counters
[7:4] leaky bucket counter mode
[29] reset
@@ -884,56 +912,62 @@ typedef volatile struct {
0x10 reqs that reached timeout
etc. */
u32 counter_event;
- CLIB_PAD_FROM_TO (0x11018, 0x11020);
+ CLIB_PAD_FROM_TO (0x11018, 0x11020);
u32 counters_clear_on_read[4];
u32 counter_config[4];
- struct {
+ struct
+ {
u32 address;
u32 data;
} indirect_access;
- CLIB_PAD_FROM_TO (0x11048, 0x11050);
+ CLIB_PAD_FROM_TO (0x11048, 0x11050);
u32 extended_control;
- CLIB_PAD_FROM_TO (0x11054, 0x11064);
+ CLIB_PAD_FROM_TO (0x11054, 0x11064);
u32 mirrored_revision_id;
- CLIB_PAD_FROM_TO (0x11068, 0x11070);
+ CLIB_PAD_FROM_TO (0x11068, 0x11070);
u32 dca_requester_id_information;
/* [0] global disable
[4:1] mode: 0 => legacy, 1 => dca 1.0. */
u32 dca_control;
- CLIB_PAD_FROM_TO (0x11078, 0x110b0);
+ CLIB_PAD_FROM_TO (0x11078, 0x110b0);
/* [0] pci completion abort
[1] unsupported i/o address
[2] wrong byte enable
[3] pci timeout */
u32 pcie_interrupt_status;
- CLIB_PAD_FROM_TO (0x110b4, 0x110b8);
+ CLIB_PAD_FROM_TO (0x110b4, 0x110b8);
u32 pcie_interrupt_enable;
- CLIB_PAD_FROM_TO (0x110bc, 0x110c0);
+ CLIB_PAD_FROM_TO (0x110bc, 0x110c0);
u32 msi_x_pba_clear[8];
- CLIB_PAD_FROM_TO (0x110e0, 0x12300);
+ CLIB_PAD_FROM_TO (0x110e0, 0x12300);
} pcie;
- u32 interrupt_throttle1[128-24];
- CLIB_PAD_FROM_TO (0x124a0, 0x14f00);
+ u32 interrupt_throttle1[128 - 24];
+ CLIB_PAD_FROM_TO (0x124a0, 0x14f00);
u32 core_analog_config;
- CLIB_PAD_FROM_TO (0x14f04, 0x14f10);
+ CLIB_PAD_FROM_TO (0x14f04, 0x14f10);
u32 core_common_config;
- CLIB_PAD_FROM_TO (0x14f14, 0x15f14);
+ CLIB_PAD_FROM_TO (0x14f14, 0x15f14);
u32 link_sec_software_firmware_interface;
} ixge_regs_t;
-typedef union {
- struct {
+typedef union
+{
+ struct
+ {
/* Addresses bigendian. */
- union {
- struct {
+ union
+ {
+ struct
+ {
ip6_address_t src_address;
u32 unused[1];
} ip6;
- struct {
+ struct
+ {
u32 unused[3];
ip4_address_t src_address, dst_address;
} ip4;
@@ -961,7 +995,7 @@ ixge_throttle_queue_interrupt (ixge_regs_t * r,
u32 queue_interrupt_index,
f64 inter_interrupt_interval_in_secs)
{
- volatile u32 * tr =
+ volatile u32 *tr =
(queue_interrupt_index < ARRAY_LEN (r->interrupt.throttle0)
? &r->interrupt.throttle0[queue_interrupt_index]
: &r->interrupt_throttle1[queue_interrupt_index]);
@@ -1064,25 +1098,28 @@ ixge_throttle_queue_interrupt (ixge_regs_t * r,
-typedef enum {
+typedef enum
+{
#define _(a,f) IXGE_COUNTER_##f,
#define _64(a,f) _(a,f)
foreach_ixge_counter
#undef _
#undef _64
- IXGE_N_COUNTER,
+ IXGE_N_COUNTER,
} ixge_counter_type_t;
-typedef struct {
+typedef struct
+{
u32 mdio_address;
/* 32 bit ID read from ID registers. */
u32 id;
} ixge_phy_t;
-typedef struct {
+typedef struct
+{
/* Cache aligned descriptors. */
- ixge_descriptor_t * descriptors;
+ ixge_descriptor_t *descriptors;
/* Number of descriptors in table. */
u32 n_descriptors;
@@ -1094,20 +1131,23 @@ typedef struct {
u32 queue_index;
/* Buffer indices corresponding to each active descriptor. */
- u32 * descriptor_buffer_indices;
+ u32 *descriptor_buffer_indices;
- union {
- struct {
- u32 * volatile head_index_write_back;
+ union
+ {
+ struct
+ {
+ u32 *volatile head_index_write_back;
u32 n_buffers_on_ring;
} tx;
- struct {
+ struct
+ {
/* Buffer indices to use to replenish each descriptor. */
- u32 * replenish_buffer_indices;
+ u32 *replenish_buffer_indices;
- vlib_node_runtime_t * node;
+ vlib_node_runtime_t *node;
u32 next_index;
u32 saved_start_of_packet_buffer_index;
@@ -1152,15 +1192,17 @@ typedef struct {
_ (82599_t3_lom, 0x151c) \
_ (x540t, 0x1528)
-typedef enum {
+typedef enum
+{
#define _(f,n) IXGE_##f = n,
foreach_ixge_pci_device_id
#undef _
} ixge_pci_device_id_t;
-typedef struct {
+typedef struct
+{
/* registers */
- ixge_regs_t * regs;
+ ixge_regs_t *regs;
/* Specific next index when using dynamic redirection */
u32 per_interface_next_index;
@@ -1179,7 +1221,7 @@ typedef struct {
/* VLIB interface for this instance. */
u32 vlib_hw_if_index, vlib_sw_if_index;
- ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX];
+ ixge_dma_queue_t *dma_queues[VLIB_N_RX_TX];
/* Phy index (0 or 1) and address on MDI bus. */
u32 phy_index;
@@ -1195,11 +1237,12 @@ typedef struct {
u64 counters[IXGE_N_COUNTER], counters_last_clear[IXGE_N_COUNTER];
} ixge_device_t;
-typedef struct {
- vlib_main_t * vlib_main;
+typedef struct
+{
+ vlib_main_t *vlib_main;
/* Vector of devices. */
- ixge_device_t * devices;
+ ixge_device_t *devices;
/* Descriptor ring sizes. */
u32 n_descriptors[VLIB_N_RX_TX];
@@ -1218,9 +1261,9 @@ typedef struct {
ixge_tx_descriptor_t tx_descriptor_template, tx_descriptor_template_mask;
/* Vector of buffers for which TX is done and can be freed. */
- u32 * tx_buffers_pending_free;
+ u32 *tx_buffers_pending_free;
- u32 * rx_buffers_to_add;
+ u32 *rx_buffers_to_add;
f64 time_last_stats_update;
} ixge_main_t;
@@ -1228,7 +1271,8 @@ typedef struct {
ixge_main_t ixge_main;
vnet_device_class_t ixge_device_class;
-typedef enum {
+typedef enum
+{
IXGE_RX_NEXT_IP4_INPUT,
IXGE_RX_NEXT_IP6_INPUT,
IXGE_RX_NEXT_ETHERNET_INPUT,
@@ -1239,3 +1283,11 @@ typedef enum {
void ixge_set_next_node (ixge_rx_next_t, char *);
#endif /* included_ixge_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/nic/sfp.c b/vnet/vnet/devices/nic/sfp.c
index fba94d73d95..9e9c008dc15 100644
--- a/vnet/vnet/devices/nic/sfp.c
+++ b/vnet/vnet/devices/nic/sfp.c
@@ -15,11 +15,12 @@
#include <vnet/devices/nic/sfp.h>
-static u8 * format_space_terminated (u8 * s, va_list * args)
+static u8 *
+format_space_terminated (u8 * s, va_list * args)
{
u32 l = va_arg (*args, u32);
- u8 * v = va_arg (*args, u8 *);
- u8 * p;
+ u8 *v = va_arg (*args, u8 *);
+ u8 *p;
for (p = v + l - 1; p >= v && p[0] == ' '; p--)
;
@@ -27,10 +28,11 @@ static u8 * format_space_terminated (u8 * s, va_list * args)
return s;
}
-static u8 * format_sfp_id (u8 * s, va_list * args)
+static u8 *
+format_sfp_id (u8 * s, va_list * args)
{
u32 id = va_arg (*args, u32);
- char * t = 0;
+ char *t = 0;
switch (id)
{
#define _(f) case SFP_ID_##f: t = #f; break;
@@ -42,10 +44,11 @@ static u8 * format_sfp_id (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static u8 * format_sfp_compatibility (u8 * s, va_list * args)
+static u8 *
+format_sfp_compatibility (u8 * s, va_list * args)
{
u32 c = va_arg (*args, u32);
- char * t = 0;
+ char *t = 0;
switch (c)
{
#define _(a,b,f) case SFP_COMPATIBILITY_##f: t = #f; break;
@@ -57,9 +60,14 @@ static u8 * format_sfp_compatibility (u8 * s, va_list * args)
return format (s, "%s", t);
}
-u32 sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c)
+u32
+sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c)
{
- static struct { u8 byte, bit; } t[] = {
+ static struct
+ {
+ u8 byte, bit;
+ } t[] =
+ {
#define _(a,b,f) { .byte = a, .bit = b, },
foreach_sfp_compatibility
#undef _
@@ -69,9 +77,10 @@ u32 sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c)
return (e->compatibility[t[c].byte] & (1 << t[c].bit)) != 0;
}
-u8 * format_sfp_eeprom (u8 * s, va_list * args)
+u8 *
+format_sfp_eeprom (u8 * s, va_list * args)
{
- sfp_eeprom_t * e = va_arg (*args, sfp_eeprom_t *);
+ sfp_eeprom_t *e = va_arg (*args, sfp_eeprom_t *);
uword indent = format_get_indent (s);
int i;
@@ -85,13 +94,24 @@ u8 * format_sfp_eeprom (u8 * s, va_list * args)
s = format (s, "\n%Uvendor: %U, part %U",
format_white_space, indent,
- format_space_terminated, sizeof (e->vendor_name), e->vendor_name,
- format_space_terminated, sizeof (e->vendor_part_number), e->vendor_part_number);
- s = format (s, "\n%Urevision: %U, serial: %U, date code: %U",
- format_white_space, indent,
- format_space_terminated, sizeof (e->vendor_revision), e->vendor_revision,
- format_space_terminated, sizeof (e->vendor_serial_number), e->vendor_serial_number,
- format_space_terminated, sizeof (e->vendor_date_code), e->vendor_date_code);
+ format_space_terminated, sizeof (e->vendor_name),
+ e->vendor_name, format_space_terminated,
+ sizeof (e->vendor_part_number), e->vendor_part_number);
+ s =
+ format (s, "\n%Urevision: %U, serial: %U, date code: %U",
+ format_white_space, indent, format_space_terminated,
+ sizeof (e->vendor_revision), e->vendor_revision,
+ format_space_terminated, sizeof (e->vendor_serial_number),
+ e->vendor_serial_number, format_space_terminated,
+ sizeof (e->vendor_date_code), e->vendor_date_code);
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/nic/sfp.h b/vnet/vnet/devices/nic/sfp.h
index 9d31ad31f95..a1ac7997a44 100644
--- a/vnet/vnet/devices/nic/sfp.h
+++ b/vnet/vnet/devices/nic/sfp.h
@@ -24,13 +24,15 @@
_ (on_motherboard) \
_ (sfp)
-typedef enum {
+typedef enum
+{
#define _(f) SFP_ID_##f,
foreach_sfp_id
#undef _
} sfp_id_t;
-typedef struct {
+typedef struct
+{
u8 id;
u8 extended_id;
u8 connector_type;
@@ -92,11 +94,12 @@ sfp_eeprom_is_valid (sfp_eeprom_t * e)
_ (3, 1, 1g_base_lx) \
_ (3, 0, 1g_base_sx)
-typedef enum {
+typedef enum
+{
#define _(a,b,f) SFP_COMPATIBILITY_##f,
foreach_sfp_compatibility
#undef _
- SFP_N_COMPATIBILITY,
+ SFP_N_COMPATIBILITY,
} sfp_compatibility_t;
u32 sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c);
@@ -104,3 +107,11 @@ u32 sfp_is_comatible (sfp_eeprom_t * e, sfp_compatibility_t c);
format_function_t format_sfp_eeprom;
#endif /* included_vnet_optics_sfp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/ssvm/node.c b/vnet/vnet/devices/ssvm/node.c
index b182fef9373..e7d9792bd65 100644
--- a/vnet/vnet/devices/ssvm/node.c
+++ b/vnet/vnet/devices/ssvm/node.c
@@ -16,20 +16,22 @@
vlib_node_registration_t ssvm_eth_input_node;
-typedef struct {
+typedef struct
+{
u32 next_index;
u32 sw_if_index;
} ssvm_eth_input_trace_t;
/* packet trace format function */
-static u8 * format_ssvm_eth_input_trace (u8 * s, va_list * args)
+static u8 *
+format_ssvm_eth_input_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- ssvm_eth_input_trace_t * t = va_arg (*args, ssvm_eth_input_trace_t *);
-
+ ssvm_eth_input_trace_t *t = va_arg (*args, ssvm_eth_input_trace_t *);
+
s = format (s, "SSVM_ETH_INPUT: sw_if_index %d, next index %d",
- t->sw_if_index, t->next_index);
+ t->sw_if_index, t->next_index);
return s;
}
@@ -38,20 +40,22 @@ vlib_node_registration_t ssvm_eth_input_node;
#define foreach_ssvm_eth_input_error \
_(NO_BUFFERS, "Rx packet drops (no buffers)")
-typedef enum {
+typedef enum
+{
#define _(sym,str) SSVM_ETH_INPUT_ERROR_##sym,
foreach_ssvm_eth_input_error
#undef _
- SSVM_ETH_INPUT_N_ERROR,
+ SSVM_ETH_INPUT_N_ERROR,
} ssvm_eth_input_error_t;
-static char * ssvm_eth_input_error_strings[] = {
+static char *ssvm_eth_input_error_strings[] = {
#define _(sym,string) string,
foreach_ssvm_eth_input_error
#undef _
};
-typedef enum {
+typedef enum
+{
SSVM_ETH_INPUT_NEXT_DROP,
SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT,
SSVM_ETH_INPUT_NEXT_IP4_INPUT,
@@ -60,15 +64,14 @@ typedef enum {
SSVM_ETH_INPUT_N_NEXT,
} ssvm_eth_input_next_t;
-static inline uword
+static inline uword
ssvm_eth_device_input (ssvm_eth_main_t * em,
- ssvm_private_t * intfc,
- vlib_node_runtime_t * node)
+ ssvm_private_t * intfc, vlib_node_runtime_t * node)
{
- ssvm_shared_header_t * sh = intfc->sh;
- vlib_main_t * vm = em->vlib_main;
- unix_shared_memory_queue_t * q;
- ssvm_eth_queue_elt_t * elt, * elts;
+ ssvm_shared_header_t *sh = intfc->sh;
+ vlib_main_t *vm = em->vlib_main;
+ unix_shared_memory_queue_t *q;
+ ssvm_eth_queue_elt_t *elt, *elts;
u32 elt_index;
u32 my_pid = intfc->my_pid;
int rx_queue_index;
@@ -79,32 +82,32 @@ ssvm_eth_device_input (ssvm_eth_main_t * em,
#else
u32 next_index = 0;
#endif
- vlib_buffer_free_list_t * fl;
- u32 n_left_to_next, * to_next;
+ vlib_buffer_free_list_t *fl;
+ u32 n_left_to_next, *to_next;
u32 next0;
u32 n_buffers;
u32 n_available;
u32 bi0, saved_bi0;
- vlib_buffer_t * b0, * prev;
+ vlib_buffer_t *b0, *prev;
u32 saved_cache_size = 0;
- ethernet_header_t * eh0;
+ ethernet_header_t *eh0;
u16 type0;
u32 n_rx_bytes = 0, l3_offset0;
- u32 cpu_index = os_get_cpu_number();
- u32 trace_cnt __attribute__((unused)) = vlib_get_trace_count (vm, node);
- volatile u32 * lock;
- u32 * elt_indices;
+ u32 cpu_index = os_get_cpu_number ();
+ u32 trace_cnt __attribute__ ((unused)) = vlib_get_trace_count (vm, node);
+ volatile u32 *lock;
+ u32 *elt_indices;
uword n_trace = vlib_get_trace_count (vm, node);
/* Either side down? buh-bye... */
- if (pointer_to_uword(sh->opaque [MASTER_ADMIN_STATE_INDEX]) == 0 ||
- pointer_to_uword(sh->opaque [SLAVE_ADMIN_STATE_INDEX]) == 0)
+ if (pointer_to_uword (sh->opaque[MASTER_ADMIN_STATE_INDEX]) == 0 ||
+ pointer_to_uword (sh->opaque[SLAVE_ADMIN_STATE_INDEX]) == 0)
return 0;
if (intfc->i_am_master)
- q = (unix_shared_memory_queue_t *)(sh->opaque [TO_MASTER_Q_INDEX]);
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_MASTER_Q_INDEX]);
else
- q = (unix_shared_memory_queue_t *)(sh->opaque [TO_SLAVE_Q_INDEX]);
+ q = (unix_shared_memory_queue_t *) (sh->opaque[TO_SLAVE_Q_INDEX]);
/* Nothing to do? */
if (q->cursize == 0)
@@ -113,34 +116,34 @@ ssvm_eth_device_input (ssvm_eth_main_t * em,
fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
vec_reset_length (intfc->rx_queue);
-
+
lock = (u32 *) q;
while (__sync_lock_test_and_set (lock, 1))
;
while (q->cursize > 0)
{
- unix_shared_memory_queue_sub_raw (q, (u8 *)&elt_index);
- ASSERT(elt_index < 2048);
+ unix_shared_memory_queue_sub_raw (q, (u8 *) & elt_index);
+ ASSERT (elt_index < 2048);
vec_add1 (intfc->rx_queue, elt_index);
}
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
*lock = 0;
n_present_in_cache = vec_len (em->buffer_cache);
if (vec_len (em->buffer_cache) < vec_len (intfc->rx_queue) * 2)
{
- vec_validate (em->buffer_cache,
- n_to_alloc + vec_len (em->buffer_cache) - 1);
- n_allocated =
- vlib_buffer_alloc (vm, &em->buffer_cache [n_present_in_cache],
- n_to_alloc);
-
+ vec_validate (em->buffer_cache,
+ n_to_alloc + vec_len (em->buffer_cache) - 1);
+ n_allocated =
+ vlib_buffer_alloc (vm, &em->buffer_cache[n_present_in_cache],
+ n_to_alloc);
+
n_present_in_cache += n_allocated;
_vec_len (em->buffer_cache) = n_present_in_cache;
}
- elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
n_buffers = vec_len (intfc->rx_queue);
rx_queue_index = 0;
@@ -148,119 +151,118 @@ ssvm_eth_device_input (ssvm_eth_main_t * em,
while (n_buffers > 0)
{
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
+
while (n_buffers > 0 && n_left_to_next > 0)
- {
- elt = elts + intfc->rx_queue[rx_queue_index];
-
- saved_cache_size = n_present_in_cache;
- if (PREDICT_FALSE(saved_cache_size == 0))
+ {
+ elt = elts + intfc->rx_queue[rx_queue_index];
+
+ saved_cache_size = n_present_in_cache;
+ if (PREDICT_FALSE (saved_cache_size == 0))
{
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
goto out;
}
- saved_bi0 = bi0 = em->buffer_cache [--n_present_in_cache];
- b0 = vlib_get_buffer (vm, bi0);
- prev = 0;
-
- while (1)
- {
- vlib_buffer_init_for_free_list (b0, fl);
-
- b0->current_data = elt->current_data_hint;
- b0->current_length = elt->length_this_buffer;
- b0->total_length_not_including_first_buffer =
- elt->total_length_not_including_first_buffer;
-
- clib_memcpy (b0->data + b0->current_data, elt->data,
- b0->current_length);
-
- if (PREDICT_FALSE(prev != 0))
- prev->next_buffer = bi0;
-
- if (PREDICT_FALSE(elt->flags & SSVM_BUFFER_NEXT_PRESENT))
- {
- prev = b0;
- if (PREDICT_FALSE(n_present_in_cache == 0))
+ saved_bi0 = bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ prev = 0;
+
+ while (1)
+ {
+ vlib_buffer_init_for_free_list (b0, fl);
+
+ b0->current_data = elt->current_data_hint;
+ b0->current_length = elt->length_this_buffer;
+ b0->total_length_not_including_first_buffer =
+ elt->total_length_not_including_first_buffer;
+
+ clib_memcpy (b0->data + b0->current_data, elt->data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev != 0))
+ prev->next_buffer = bi0;
+
+ if (PREDICT_FALSE (elt->flags & SSVM_BUFFER_NEXT_PRESENT))
+ {
+ prev = b0;
+ if (PREDICT_FALSE (n_present_in_cache == 0))
{
- vlib_put_next_frame (vm, node, next_index,
+ vlib_put_next_frame (vm, node, next_index,
n_left_to_next);
goto out;
}
- bi0 = em->buffer_cache [--n_present_in_cache];
- b0 = vlib_get_buffer (vm, bi0);
- }
- else
- break;
- }
-
- saved_cache_size = n_present_in_cache;
-
- to_next[0] = saved_bi0;
- to_next++;
- n_left_to_next--;
-
- b0 = vlib_get_buffer (vm, saved_bi0);
- eh0 = vlib_buffer_get_current (b0);
-
- type0 = clib_net_to_host_u16 (eh0->type);
-
- next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
-
- if (type0 == ETHERNET_TYPE_IP4)
- next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
- else if (type0 == ETHERNET_TYPE_IP6)
- next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
- else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
- next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
-
+ bi0 = em->buffer_cache[--n_present_in_cache];
+ b0 = vlib_get_buffer (vm, bi0);
+ }
+ else
+ break;
+ }
+
+ saved_cache_size = n_present_in_cache;
+
+ to_next[0] = saved_bi0;
+ to_next++;
+ n_left_to_next--;
+
+ b0 = vlib_get_buffer (vm, saved_bi0);
+ eh0 = vlib_buffer_get_current (b0);
+
+ type0 = clib_net_to_host_u16 (eh0->type);
+
+ next0 = SSVM_ETH_INPUT_NEXT_ETHERNET_INPUT;
+
+ if (type0 == ETHERNET_TYPE_IP4)
+ next0 = SSVM_ETH_INPUT_NEXT_IP4_INPUT;
+ else if (type0 == ETHERNET_TYPE_IP6)
+ next0 = SSVM_ETH_INPUT_NEXT_IP6_INPUT;
+ else if (type0 == ETHERNET_TYPE_MPLS_UNICAST)
+ next0 = SSVM_ETH_INPUT_NEXT_MPLS_INPUT;
+
l3_offset0 = ((next0 == SSVM_ETH_INPUT_NEXT_IP4_INPUT ||
next0 == SSVM_ETH_INPUT_NEXT_IP6_INPUT ||
- next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ?
+ next0 == SSVM_ETH_INPUT_NEXT_MPLS_INPUT) ?
sizeof (ethernet_header_t) : 0);
-
- n_rx_bytes += b0->current_length
- + b0->total_length_not_including_first_buffer;
-
- b0->current_data += l3_offset0;
- b0->current_length -= l3_offset0;
- b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
-
- vnet_buffer(b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
- vnet_buffer(b0)->sw_if_index[VLIB_TX] = (u32)~0;
-
- /*
- * Turn this on if you run into
- * "bad monkey" contexts, and you want to know exactly
- * which nodes they've visited... See main.c...
- */
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b0);
-
- if (PREDICT_FALSE(n_trace > 0))
- {
- ssvm_eth_input_trace_t *tr;
-
- vlib_trace_buffer (vm, node, next0,
- b0, /* follow_chain */ 1);
- vlib_set_trace_count (vm, node, --n_trace);
-
- tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
-
- tr->next_index = next0;
- tr->sw_if_index = intfc->vlib_hw_if_index;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi0, next0);
- n_buffers--;
- rx_queue_index++;
- }
+
+ n_rx_bytes += b0->current_length
+ + b0->total_length_not_including_first_buffer;
+
+ b0->current_data += l3_offset0;
+ b0->current_length -= l3_offset0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = intfc->vlib_hw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ /*
+ * Turn this on if you run into
+ * "bad monkey" contexts, and you want to know exactly
+ * which nodes they've visited... See main.c...
+ */
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+
+ if (PREDICT_FALSE (n_trace > 0))
+ {
+ ssvm_eth_input_trace_t *tr;
+
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 1);
+ vlib_set_trace_count (vm, node, --n_trace);
+
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+
+ tr->next_index = next0;
+ tr->sw_if_index = intfc->vlib_hw_if_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi0, next0);
+ n_buffers--;
+ rx_queue_index++;
+ }
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
-
- out:
+
+out:
if (em->buffer_cache)
_vec_len (em->buffer_cache) = saved_cache_size;
else
@@ -268,48 +270,47 @@ ssvm_eth_device_input (ssvm_eth_main_t * em,
ssvm_lock (sh, my_pid, 2);
- ASSERT(vec_len(intfc->rx_queue) > 0);
+ ASSERT (vec_len (intfc->rx_queue) > 0);
- n_available = (u32)pointer_to_uword(sh->opaque[CHUNK_POOL_NFREE]);
- elt_indices = (u32 *)(sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
- clib_memcpy (&elt_indices[n_available], intfc->rx_queue,
- vec_len (intfc->rx_queue) * sizeof (u32));
+ clib_memcpy (&elt_indices[n_available], intfc->rx_queue,
+ vec_len (intfc->rx_queue) * sizeof (u32));
n_available += vec_len (intfc->rx_queue);
- sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer(n_available, void* );
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
ssvm_unlock (sh);
vlib_error_count (vm, node->node_index, SSVM_ETH_INPUT_ERROR_NO_BUFFERS,
n_buffers);
- vlib_increment_combined_counter
- (vnet_get_main()->interface_main.combined_sw_if_counters
- + VNET_INTERFACE_COUNTER_RX, cpu_index,
- intfc->vlib_hw_if_index,
- rx_queue_index, n_rx_bytes);
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX, cpu_index,
+ intfc->vlib_hw_if_index, rx_queue_index, n_rx_bytes);
return rx_queue_index;
}
-
+
static uword
ssvm_eth_input_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- ssvm_eth_main_t * em = &ssvm_eth_main;
- ssvm_private_t * intfc;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
uword n_rx_packets = 0;
vec_foreach (intfc, em->intfcs)
- {
- n_rx_packets += ssvm_eth_device_input (em, intfc, node);
- }
+ {
+ n_rx_packets += ssvm_eth_device_input (em, intfc, node);
+ }
return n_rx_packets;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
.function = ssvm_eth_input_node_fn,
.name = "ssvm_eth_input",
@@ -317,7 +318,7 @@ VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
.format_trace = format_ssvm_eth_input_trace,
.type = VLIB_NODE_TYPE_INPUT,
.state = VLIB_NODE_STATE_DISABLED,
-
+
.n_errors = ARRAY_LEN(ssvm_eth_input_error_strings),
.error_strings = ssvm_eth_input_error_strings,
@@ -334,4 +335,13 @@ VLIB_REGISTER_NODE (ssvm_eth_input_node) = {
};
VLIB_NODE_FUNCTION_MULTIARCH (ssvm_eth_input_node, ssvm_eth_input_node_fn)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/ssvm/ssvm_eth.c b/vnet/vnet/devices/ssvm/ssvm_eth.c
index 7c65e2121f5..f35127bd7ff 100644
--- a/vnet/vnet/devices/ssvm/ssvm_eth.c
+++ b/vnet/vnet/devices/ssvm/ssvm_eth.c
@@ -21,26 +21,27 @@ _(RING_FULL, "Tx packet drops (ring full)") \
_(NO_BUFFERS, "Tx packet drops (no buffers)") \
_(ADMIN_DOWN, "Tx packet drops (admin down)")
-typedef enum {
+typedef enum
+{
#define _(f,s) SSVM_ETH_TX_ERROR_##f,
foreach_ssvm_eth_tx_func_error
#undef _
- SSVM_ETH_TX_N_ERROR,
+ SSVM_ETH_TX_N_ERROR,
} ssvm_eth_tx_func_error_t;
-static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
- vnet_hw_interface_t * hi,
- u32 flags);
+static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
+ vnet_hw_interface_t * hi, u32 flags);
-int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
+int
+ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
{
- ssvm_private_t * intfc;
- void * oldheap;
- clib_error_t * e;
- unix_shared_memory_queue_t * q;
- ssvm_shared_header_t * sh;
- ssvm_eth_queue_elt_t * elts;
- u32 * elt_indices;
+ ssvm_private_t *intfc;
+ void *oldheap;
+ clib_error_t *e;
+ unix_shared_memory_queue_t *q;
+ ssvm_shared_header_t *sh;
+ ssvm_eth_queue_elt_t *elts;
+ u32 *elt_indices;
u8 enet_addr[6];
int i, rv;
@@ -49,37 +50,37 @@ int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
intfc->ssvm_size = em->segment_size;
intfc->i_am_master = 1;
intfc->name = name;
- intfc->my_pid = getpid();
+ intfc->my_pid = getpid ();
if (is_master == 0)
{
- rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */);
+ rv = ssvm_slave_init (intfc, 20 /* timeout in seconds */ );
if (rv < 0)
- return rv;
+ return rv;
goto create_vnet_interface;
}
intfc->requested_va = em->next_base_va;
em->next_base_va += em->segment_size;
- rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */);
+ rv = ssvm_master_init (intfc, intfc - em->intfcs /* master index */ );
if (rv < 0)
return rv;
-
+
/* OK, segment created, set up queues and so forth. */
-
+
sh = intfc->sh;
oldheap = ssvm_push_heap (sh);
q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
- 0 /* consumer pid not interesting */,
- 0 /* signal not sent */);
- sh->opaque [TO_MASTER_Q_INDEX] = (void *)q;
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_MASTER_Q_INDEX] = (void *) q;
q = unix_shared_memory_queue_init (em->queue_elts, sizeof (u32),
- 0 /* consumer pid not interesting */,
- 0 /* signal not sent */);
- sh->opaque [TO_SLAVE_Q_INDEX] = (void *)q;
-
- /*
+ 0 /* consumer pid not interesting */ ,
+ 0 /* signal not sent */ );
+ sh->opaque[TO_SLAVE_Q_INDEX] = (void *) q;
+
+ /*
* Preallocate the requested number of buffer chunks
* There must be a better way to do this, etc.
* Add some slop to avoid pool reallocation, which will not go well
@@ -89,17 +90,17 @@ int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
vec_validate_aligned (elts, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (elt_indices, em->nbuffers - 1, CLIB_CACHE_LINE_BYTES);
-
+
for (i = 0; i < em->nbuffers; i++)
elt_indices[i] = i;
- sh->opaque [CHUNK_POOL_INDEX] = (void *) elts;
- sh->opaque [CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
- sh->opaque [CHUNK_POOL_NFREE] = (void *)(uword) em->nbuffers;
-
+ sh->opaque[CHUNK_POOL_INDEX] = (void *) elts;
+ sh->opaque[CHUNK_POOL_FREELIST_INDEX] = (void *) elt_indices;
+ sh->opaque[CHUNK_POOL_NFREE] = (void *) (uword) em->nbuffers;
+
ssvm_pop_heap (oldheap);
- create_vnet_interface:
+create_vnet_interface:
sh = intfc->sh;
@@ -108,13 +109,11 @@ int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
enet_addr[1] = 0xFE;
enet_addr[2] = is_master;
enet_addr[5] = sh->master_index;
-
+
e = ethernet_register_interface
- (em->vnet_main, ssvm_eth_device_class.index,
- intfc - em->intfcs,
+ (em->vnet_main, ssvm_eth_device_class.index, intfc - em->intfcs,
/* ethernet address */ enet_addr,
- &intfc->vlib_hw_if_index,
- ssvm_eth_flag_change);
+ &intfc->vlib_hw_if_index, ssvm_eth_flag_change);
if (e)
{
@@ -124,54 +123,55 @@ int ssvm_eth_create (ssvm_eth_main_t * em, u8 * name, int is_master)
}
/* Declare link up */
- vnet_hw_interface_set_flags (em->vnet_main, intfc->vlib_hw_if_index,
- VNET_HW_INTERFACE_FLAG_LINK_UP);
+ vnet_hw_interface_set_flags (em->vnet_main, intfc->vlib_hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
/* Let the games begin... */
if (is_master)
- sh->ready = 1;
+ sh->ready = 1;
return 0;
}
static clib_error_t *
ssvm_config (vlib_main_t * vm, unformat_input_t * input)
{
- u8 * name;
+ u8 *name;
int is_master = 1;
int i, rv;
- ssvm_eth_main_t * em = &ssvm_eth_main;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
- while (unformat_check_input(input) != UNFORMAT_END_OF_INPUT)
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "base-va %llx", &em->next_base_va))
- ;
+ ;
else if (unformat (input, "segment-size %lld", &em->segment_size))
- em->segment_size = 1ULL << (max_log2 (em->segment_size));
+ em->segment_size = 1ULL << (max_log2 (em->segment_size));
else if (unformat (input, "nbuffers %lld", &em->nbuffers))
- ;
+ ;
else if (unformat (input, "queue-elts %lld", &em->queue_elts))
- ;
+ ;
else if (unformat (input, "slave"))
- is_master = 0;
+ is_master = 0;
else if (unformat (input, "%s", &name))
- vec_add1 (em->names, name);
+ vec_add1 (em->names, name);
else
- break;
+ break;
}
/* No configured instances, we're done... */
if (vec_len (em->names) == 0)
- return 0;
+ return 0;
for (i = 0; i < vec_len (em->names); i++)
{
rv = ssvm_eth_create (em, em->names[i], is_master);
if (rv < 0)
- return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d",
- em->names[i], rv);
+ return clib_error_return (0, "ssvm_eth_create '%s' failed, error %d",
+ em->names[i], rv);
}
- vlib_node_set_state (vm, ssvm_eth_input_node.index, VLIB_NODE_STATE_POLLING);
+ vlib_node_set_state (vm, ssvm_eth_input_node.index,
+ VLIB_NODE_STATE_POLLING);
return 0;
}
@@ -179,27 +179,28 @@ ssvm_config (vlib_main_t * vm, unformat_input_t * input)
VLIB_CONFIG_FUNCTION (ssvm_config, "ssvm_eth");
-static clib_error_t * ssvm_eth_init (vlib_main_t * vm)
+static clib_error_t *
+ssvm_eth_init (vlib_main_t * vm)
{
- ssvm_eth_main_t * em = &ssvm_eth_main;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
- if (((sizeof(ssvm_eth_queue_elt_t) / CLIB_CACHE_LINE_BYTES)
- * CLIB_CACHE_LINE_BYTES) != sizeof(ssvm_eth_queue_elt_t))
+ if (((sizeof (ssvm_eth_queue_elt_t) / CLIB_CACHE_LINE_BYTES)
+ * CLIB_CACHE_LINE_BYTES) != sizeof (ssvm_eth_queue_elt_t))
clib_warning ("ssvm_eth_queue_elt_t size %d not a multiple of %d",
- sizeof(ssvm_eth_queue_elt_t), CLIB_CACHE_LINE_BYTES);
+ sizeof (ssvm_eth_queue_elt_t), CLIB_CACHE_LINE_BYTES);
em->vlib_main = vm;
- em->vnet_main = vnet_get_main();
+ em->vnet_main = vnet_get_main ();
em->elog_main = &vm->elog_main;
/* default config param values... */
em->next_base_va = 0x600000000ULL;
- /*
+ /*
* Allocate 2 full superframes in each dir (256 x 2 x 2 x 2048 bytes),
* 2mb; double that so we have plenty of space... 4mb
*/
- em->segment_size = 8<<20;
+ em->segment_size = 8 << 20;
em->nbuffers = 1024;
em->queue_elts = 512;
return 0;
@@ -207,13 +208,14 @@ static clib_error_t * ssvm_eth_init (vlib_main_t * vm)
VLIB_INIT_FUNCTION (ssvm_eth_init);
-static char * ssvm_eth_tx_func_error_strings[] = {
+static char *ssvm_eth_tx_func_error_strings[] = {
#define _(n,s) s,
- foreach_ssvm_eth_tx_func_error
+ foreach_ssvm_eth_tx_func_error
#undef _
};
-static u8 * format_ssvm_eth_device_name (u8 * s, va_list * args)
+static u8 *
+format_ssvm_eth_device_name (u8 * s, va_list * args)
{
u32 i = va_arg (*args, u32);
@@ -221,13 +223,15 @@ static u8 * format_ssvm_eth_device_name (u8 * s, va_list * args)
return s;
}
-static u8 * format_ssvm_eth_device (u8 * s, va_list * args)
+static u8 *
+format_ssvm_eth_device (u8 * s, va_list * args)
{
s = format (s, "SSVM Ethernet");
return s;
}
-static u8 * format_ssvm_eth_tx_trace (u8 * s, va_list * args)
+static u8 *
+format_ssvm_eth_tx_trace (u8 * s, va_list * args)
{
s = format (s, "Unimplemented...");
return s;
@@ -236,19 +240,18 @@ static u8 * format_ssvm_eth_tx_trace (u8 * s, va_list * args)
static uword
ssvm_eth_interface_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * f)
+ vlib_node_runtime_t * node, vlib_frame_t * f)
{
- ssvm_eth_main_t * em = &ssvm_eth_main;
- vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
- ssvm_private_t * intfc = vec_elt_at_index (em->intfcs, rd->dev_instance);
- ssvm_shared_header_t * sh = intfc->sh;
- unix_shared_memory_queue_t * q;
- u32 * from;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, rd->dev_instance);
+ ssvm_shared_header_t *sh = intfc->sh;
+ unix_shared_memory_queue_t *q;
+ u32 *from;
u32 n_left;
- ssvm_eth_queue_elt_t * elts, * elt, * prev_elt;
+ ssvm_eth_queue_elt_t *elts, *elt, *prev_elt;
u32 my_pid = intfc->my_pid;
- vlib_buffer_t * b0;
+ vlib_buffer_t *b0;
u32 bi0;
u32 size_this_buffer;
u32 chunks_this_buffer;
@@ -259,12 +262,12 @@ ssvm_eth_interface_tx (vlib_main_t * vm,
volatile u32 *queue_lock;
u32 n_to_alloc = VLIB_FRAME_SIZE;
u32 n_allocated, n_present_in_cache, n_available;
- u32 * elt_indices;
-
+ u32 *elt_indices;
+
if (i_am_master)
- q = (unix_shared_memory_queue_t *)sh->opaque [TO_SLAVE_Q_INDEX];
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_SLAVE_Q_INDEX];
else
- q = (unix_shared_memory_queue_t *)sh->opaque [TO_MASTER_Q_INDEX];
+ q = (unix_shared_memory_queue_t *) sh->opaque[TO_MASTER_Q_INDEX];
queue_lock = (u32 *) q;
@@ -276,8 +279,8 @@ ssvm_eth_interface_tx (vlib_main_t * vm,
n_present_in_cache = vec_len (em->chunk_cache);
/* admin / link up/down check */
- if (sh->opaque [MASTER_ADMIN_STATE_INDEX] == 0 ||
- sh->opaque [SLAVE_ADMIN_STATE_INDEX] == 0)
+ if (sh->opaque[MASTER_ADMIN_STATE_INDEX] == 0 ||
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] == 0)
{
interface_down = 1;
goto out;
@@ -285,27 +288,26 @@ ssvm_eth_interface_tx (vlib_main_t * vm,
ssvm_lock (sh, my_pid, 1);
- elts = (ssvm_eth_queue_elt_t *) (sh->opaque [CHUNK_POOL_INDEX]);
- elt_indices = (u32 *) (sh->opaque [CHUNK_POOL_FREELIST_INDEX]);
- n_available = (u32) pointer_to_uword(sh->opaque [CHUNK_POOL_NFREE]);
+ elts = (ssvm_eth_queue_elt_t *) (sh->opaque[CHUNK_POOL_INDEX]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) pointer_to_uword (sh->opaque[CHUNK_POOL_NFREE]);
- if (n_present_in_cache < n_left*2)
+ if (n_present_in_cache < n_left * 2)
{
- vec_validate (em->chunk_cache,
- n_to_alloc + n_present_in_cache - 1);
+ vec_validate (em->chunk_cache, n_to_alloc + n_present_in_cache - 1);
n_allocated = n_to_alloc < n_available ? n_to_alloc : n_available;
- if (PREDICT_TRUE(n_allocated > 0))
+ if (PREDICT_TRUE (n_allocated > 0))
{
clib_memcpy (&em->chunk_cache[n_present_in_cache],
- &elt_indices[n_available - n_allocated],
- sizeof(u32) * n_allocated);
+ &elt_indices[n_available - n_allocated],
+ sizeof (u32) * n_allocated);
}
n_present_in_cache += n_allocated;
n_available -= n_allocated;
- sh->opaque [CHUNK_POOL_NFREE] = uword_to_pointer(n_available, void*);
+ sh->opaque[CHUNK_POOL_NFREE] = uword_to_pointer (n_available, void *);
_vec_len (em->chunk_cache) = n_present_in_cache;
}
@@ -315,115 +317,117 @@ ssvm_eth_interface_tx (vlib_main_t * vm,
{
bi0 = from[0];
b0 = vlib_get_buffer (vm, bi0);
-
+
size_this_buffer = vlib_buffer_length_in_chain (vm, b0);
chunks_this_buffer = (size_this_buffer + (SSVM_BUFFER_SIZE - 1))
- / SSVM_BUFFER_SIZE;
+ / SSVM_BUFFER_SIZE;
/* If we're not going to be able to enqueue the buffer, tail drop. */
if (q->cursize >= q->maxsize)
- {
- is_ring_full = 1;
- break;
- }
+ {
+ is_ring_full = 1;
+ break;
+ }
prev_elt = 0;
elt_index = ~0;
for (i = 0; i < chunks_this_buffer; i++)
- {
- if (PREDICT_FALSE (n_present_in_cache == 0))
- goto out;
-
- elt_index = em->chunk_cache[--n_present_in_cache];
- elt = elts + elt_index;
-
- elt->type = SSVM_PACKET_TYPE;
- elt->flags = 0;
- elt->total_length_not_including_first_buffer =
- b0->total_length_not_including_first_buffer;
- elt->length_this_buffer = b0->current_length;
- elt->current_data_hint = b0->current_data;
- elt->owner = !i_am_master;
- elt->tag = 1;
-
- clib_memcpy (elt->data, b0->data + b0->current_data, b0->current_length);
-
- if (PREDICT_FALSE (prev_elt != 0))
- prev_elt->next_index = elt - elts;
-
- if (PREDICT_FALSE(i < (chunks_this_buffer-1)))
- {
- elt->flags = SSVM_BUFFER_NEXT_PRESENT;
- ASSERT (b0->flags & VLIB_BUFFER_NEXT_PRESENT);
- b0 = vlib_get_buffer (vm, b0->next_buffer);
- }
- prev_elt = elt;
- }
+ {
+ if (PREDICT_FALSE (n_present_in_cache == 0))
+ goto out;
+
+ elt_index = em->chunk_cache[--n_present_in_cache];
+ elt = elts + elt_index;
+
+ elt->type = SSVM_PACKET_TYPE;
+ elt->flags = 0;
+ elt->total_length_not_including_first_buffer =
+ b0->total_length_not_including_first_buffer;
+ elt->length_this_buffer = b0->current_length;
+ elt->current_data_hint = b0->current_data;
+ elt->owner = !i_am_master;
+ elt->tag = 1;
+
+ clib_memcpy (elt->data, b0->data + b0->current_data,
+ b0->current_length);
+
+ if (PREDICT_FALSE (prev_elt != 0))
+ prev_elt->next_index = elt - elts;
+
+ if (PREDICT_FALSE (i < (chunks_this_buffer - 1)))
+ {
+ elt->flags = SSVM_BUFFER_NEXT_PRESENT;
+ ASSERT (b0->flags & VLIB_BUFFER_NEXT_PRESENT);
+ b0 = vlib_get_buffer (vm, b0->next_buffer);
+ }
+ prev_elt = elt;
+ }
while (__sync_lock_test_and_set (queue_lock, 1))
- ;
-
- unix_shared_memory_queue_add_raw (q, (u8 *)&elt_index);
- CLIB_MEMORY_BARRIER();
+ ;
+
+ unix_shared_memory_queue_add_raw (q, (u8 *) & elt_index);
+ CLIB_MEMORY_BARRIER ();
*queue_lock = 0;
from++;
n_left--;
}
- out:
- if (PREDICT_FALSE(n_left))
+out:
+ if (PREDICT_FALSE (n_left))
{
if (is_ring_full)
- vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_RING_FULL,
- n_left);
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_RING_FULL,
+ n_left);
else if (interface_down)
- vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_ADMIN_DOWN,
- n_left);
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_ADMIN_DOWN,
+ n_left);
else
- vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_NO_BUFFERS,
- n_left);
+ vlib_error_count (vm, node->node_index, SSVM_ETH_TX_ERROR_NO_BUFFERS,
+ n_left);
vlib_buffer_free (vm, from, n_left);
}
else
- vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
- if (PREDICT_TRUE(vec_len(em->chunk_cache)))
- _vec_len(em->chunk_cache) = n_present_in_cache;
+ if (PREDICT_TRUE (vec_len (em->chunk_cache)))
+ _vec_len (em->chunk_cache) = n_present_in_cache;
return f->n_vectors;
}
-static void ssvm_eth_clear_hw_interface_counters (u32 instance)
+static void
+ssvm_eth_clear_hw_interface_counters (u32 instance)
{
/* Nothing for now */
}
static clib_error_t *
-ssvm_eth_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+ssvm_eth_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
{
- vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
- ssvm_eth_main_t * em = &ssvm_eth_main;
- ssvm_private_t * intfc = vec_elt_at_index (em->intfcs, hif->dev_instance);
- ssvm_shared_header_t * sh;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc = vec_elt_at_index (em->intfcs, hif->dev_instance);
+ ssvm_shared_header_t *sh;
/* publish link-state in shared-memory, to discourage buffer-wasting */
sh = intfc->sh;
if (intfc->i_am_master)
- sh->opaque [MASTER_ADMIN_STATE_INDEX] = (void *) is_up;
+ sh->opaque[MASTER_ADMIN_STATE_INDEX] = (void *) is_up;
else
- sh->opaque [SLAVE_ADMIN_STATE_INDEX] = (void *) is_up;
-
+ sh->opaque[SLAVE_ADMIN_STATE_INDEX] = (void *) is_up;
+
return 0;
}
static clib_error_t *
ssvm_eth_subif_add_del_function (vnet_main_t * vnm,
- u32 hw_if_index,
- struct vnet_sw_interface_t * st,
- int is_add)
+ u32 hw_if_index,
+ struct vnet_sw_interface_t *st, int is_add)
{
/* Nothing for now */
return 0;
@@ -433,33 +437,33 @@ ssvm_eth_subif_add_del_function (vnet_main_t * vnm,
* Dynamically redirect all pkts from a specific interface
* to the specified node
*/
-static void
-ssvm_eth_set_interface_next_node (vnet_main_t *vnm, u32 hw_if_index,
- u32 node_index)
+static void
+ssvm_eth_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
+ u32 node_index)
{
- ssvm_eth_main_t * em = &ssvm_eth_main;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
- ssvm_private_t * intfc = pool_elt_at_index (em->intfcs, hw->dev_instance);
-
+ ssvm_private_t *intfc = pool_elt_at_index (em->intfcs, hw->dev_instance);
+
/* Shut off redirection */
if (node_index == ~0)
{
intfc->per_interface_next_index = node_index;
return;
}
-
- intfc->per_interface_next_index =
+
+ intfc->per_interface_next_index =
vlib_node_add_next (em->vlib_main, ssvm_eth_input_node.index, node_index);
}
-static u32 ssvm_eth_flag_change (vnet_main_t * vnm,
- vnet_hw_interface_t * hi,
- u32 flags)
+static u32
+ssvm_eth_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags)
{
- /* nothing for now */
- return 0;
+ /* nothing for now */
+ return 0;
}
+/* *INDENT-OFF* */
VNET_DEVICE_CLASS (ssvm_eth_device_class) = {
.name = "ssvm-eth",
.tx_function = ssvm_eth_interface_tx,
@@ -477,3 +481,12 @@ VNET_DEVICE_CLASS (ssvm_eth_device_class) = {
VLIB_DEVICE_TX_FUNCTION_MULTIARCH (ssvm_eth_device_class,
ssvm_eth_interface_tx)
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/ssvm/ssvm_eth.h b/vnet/vnet/devices/ssvm/ssvm_eth.h
index 23af7ed5ae8..f4156e3ae70 100644
--- a/vnet/vnet/devices/ssvm/ssvm_eth.h
+++ b/vnet/vnet/devices/ssvm/ssvm_eth.h
@@ -38,7 +38,8 @@ extern vlib_node_registration_t ssvm_eth_input_node;
(VLIB_BUFFER_DATA_SIZE + VLIB_BUFFER_PRE_DATA_SIZE)
#define SSVM_PACKET_TYPE 1
-typedef struct {
+typedef struct
+{
/* Type of queue element */
u8 type;
u8 flags;
@@ -51,17 +52,18 @@ typedef struct {
u16 pad;
u32 next_index;
/* offset 16 */
- u8 data [SSVM_BUFFER_SIZE];
+ u8 data[SSVM_BUFFER_SIZE];
/* pad to an even multiple of 64 octets */
u8 pad2[CLIB_CACHE_LINE_BYTES - 16];
} ssvm_eth_queue_elt_t;
-typedef struct {
+typedef struct
+{
/* vector of point-to-point connections */
- ssvm_private_t * intfcs;
+ ssvm_private_t *intfcs;
- u32 * buffer_cache;
- u32 * chunk_cache;
+ u32 *buffer_cache;
+ u32 *chunk_cache;
/* Configurable parameters */
/* base address for next placement */
@@ -71,19 +73,20 @@ typedef struct {
u64 queue_elts;
/* Segment names */
- u8 ** names;
+ u8 **names;
/* convenience */
- vlib_main_t * vlib_main;
- vnet_main_t * vnet_main;
- elog_main_t * elog_main;
+ vlib_main_t *vlib_main;
+ vnet_main_t *vnet_main;
+ elog_main_t *elog_main;
} ssvm_eth_main_t;
ssvm_eth_main_t ssvm_eth_main;
-typedef enum {
+typedef enum
+{
CHUNK_POOL_FREELIST_INDEX = 0,
- CHUNK_POOL_INDEX,
+ CHUNK_POOL_INDEX,
CHUNK_POOL_NFREE,
TO_MASTER_Q_INDEX,
TO_SLAVE_Q_INDEX,
@@ -94,13 +97,14 @@ typedef enum {
/*
* debug scaffolding.
*/
-static inline void ssvm_eth_validate_freelists (int need_lock)
+static inline void
+ssvm_eth_validate_freelists (int need_lock)
{
#if CLIB_DEBUG > 0
- ssvm_eth_main_t * em = &ssvm_eth_main;
- ssvm_private_t * intfc;
- ssvm_shared_header_t * sh;
- u32 * elt_indices;
+ ssvm_eth_main_t *em = &ssvm_eth_main;
+ ssvm_private_t *intfc;
+ ssvm_shared_header_t *sh;
+ u32 *elt_indices;
u32 n_available;
int i;
@@ -109,20 +113,28 @@ static inline void ssvm_eth_validate_freelists (int need_lock)
intfc = em->intfcs + i;
sh = intfc->sh;
u32 my_pid = intfc->my_pid;
-
+
if (need_lock)
- ssvm_lock (sh, my_pid, 15);
+ ssvm_lock (sh, my_pid, 15);
- elt_indices = (u32 *) (sh->opaque [CHUNK_POOL_FREELIST_INDEX]);
- n_available = (u32) (uword) (sh->opaque [CHUNK_POOL_NFREE]);
+ elt_indices = (u32 *) (sh->opaque[CHUNK_POOL_FREELIST_INDEX]);
+ n_available = (u32) (uword) (sh->opaque[CHUNK_POOL_NFREE]);
for (i = 0; i < n_available; i++)
ASSERT (elt_indices[i] < 2048);
if (need_lock)
- ssvm_unlock (sh);
+ ssvm_unlock (sh);
}
#endif
}
#endif /* __included_ssvm_eth_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/virtio/vhost-user.c b/vnet/vnet/devices/virtio/vhost-user.c
index dca60f14c67..51b0c409d2a 100644
--- a/vnet/vnet/devices/virtio/vhost-user.c
+++ b/vnet/vnet/devices/virtio/vhost-user.c
@@ -1,4 +1,4 @@
-/*
+/*
*------------------------------------------------------------------
* vhost.c - vhost-user
*
@@ -66,16 +66,17 @@ vlib_node_registration_t vhost_user_input_node;
_(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
_(MMAP_FAIL, "mmap failure")
-typedef enum {
+typedef enum
+{
#define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
foreach_vhost_user_tx_func_error
#undef _
- VHOST_USER_TX_FUNC_N_ERROR,
+ VHOST_USER_TX_FUNC_N_ERROR,
} vhost_user_tx_func_error_t;
-static char * vhost_user_tx_func_error_strings[] = {
+static char *vhost_user_tx_func_error_strings[] = {
#define _(n,s) s,
- foreach_vhost_user_tx_func_error
+ foreach_vhost_user_tx_func_error
#undef _
};
@@ -85,19 +86,21 @@ static char * vhost_user_tx_func_error_strings[] = {
_(MMAP_FAIL, "mmap failure") \
_(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)")
-typedef enum {
+typedef enum
+{
#define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f,
foreach_vhost_user_input_func_error
#undef _
- VHOST_USER_INPUT_FUNC_N_ERROR,
+ VHOST_USER_INPUT_FUNC_N_ERROR,
} vhost_user_input_func_error_t;
-static char * vhost_user_input_func_error_strings[] = {
+static char *vhost_user_input_func_error_strings[] = {
#define _(n,s) s,
- foreach_vhost_user_input_func_error
+ foreach_vhost_user_input_func_error
#undef _
};
+/* *INDENT-OFF* */
static vhost_user_main_t vhost_user_main = {
.mtu_bytes = 1518,
};
@@ -105,12 +108,14 @@ static vhost_user_main_t vhost_user_main = {
VNET_HW_INTERFACE_CLASS (vhost_interface_class, static) = {
.name = "vhost-user",
};
+/* *INDENT-ON* */
-static u8 * format_vhost_user_interface_name (u8 * s, va_list * args)
+static u8 *
+format_vhost_user_interface_name (u8 * s, va_list * args)
{
u32 i = va_arg (*args, u32);
u32 show_dev_instance = ~0;
- vhost_user_main_t * vum = &vhost_user_main;
+ vhost_user_main_t *vum = &vhost_user_main;
if (i < vec_len (vum->show_dev_instance_by_real_dev_instance))
show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
@@ -122,141 +127,168 @@ static u8 * format_vhost_user_interface_name (u8 * s, va_list * args)
return s;
}
-static int vhost_user_name_renumber (vnet_hw_interface_t * hi,
- u32 new_dev_instance)
+static int
+vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance)
{
- vhost_user_main_t * vum = &vhost_user_main;
+ vhost_user_main_t *vum = &vhost_user_main;
vec_validate_init_empty (vum->show_dev_instance_by_real_dev_instance,
- hi->dev_instance, ~0);
+ hi->dev_instance, ~0);
- vum->show_dev_instance_by_real_dev_instance [hi->dev_instance] =
+ vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
new_dev_instance;
- DBG_SOCK("renumbered vhost-user interface dev_instance %d to %d",
- hi->dev_instance, new_dev_instance);
+ DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
+ hi->dev_instance, new_dev_instance);
return 0;
}
-static inline void * map_guest_mem(vhost_user_intf_t * vui, uword addr)
+static inline void *
+map_guest_mem (vhost_user_intf_t * vui, uword addr)
{
int i;
- for (i=0; i<vui->nregions; i++) {
- if ((vui->regions[i].guest_phys_addr <= addr) &&
- ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) > addr)) {
- return (void *) (vui->region_mmap_addr[i] + addr - vui->regions[i].guest_phys_addr);
- }
- }
- DBG_VQ("failed to map guest mem addr %llx", addr);
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].guest_phys_addr <= addr) &&
+ ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].guest_phys_addr);
+ }
+ }
+ DBG_VQ ("failed to map guest mem addr %llx", addr);
return 0;
}
-static inline void * map_user_mem(vhost_user_intf_t * vui, uword addr)
+static inline void *
+map_user_mem (vhost_user_intf_t * vui, uword addr)
{
int i;
- for (i=0; i<vui->nregions; i++) {
- if ((vui->regions[i].userspace_addr <= addr) &&
- ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) > addr)) {
- return (void *) (vui->region_mmap_addr[i] + addr - vui->regions[i].userspace_addr);
- }
- }
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if ((vui->regions[i].userspace_addr <= addr) &&
+ ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
+ addr))
+ {
+ return (void *) (vui->region_mmap_addr[i] + addr -
+ vui->regions[i].userspace_addr);
+ }
+ }
return 0;
}
-static long get_huge_page_size(int fd)
+static long
+get_huge_page_size (int fd)
{
struct statfs s;
- fstatfs(fd, &s);
+ fstatfs (fd, &s);
return s.f_bsize;
}
-static void unmap_all_mem_regions(vhost_user_intf_t * vui)
+static void
+unmap_all_mem_regions (vhost_user_intf_t * vui)
{
- int i,r;
- for (i=0; i<vui->nregions; i++) {
- if (vui->region_mmap_addr[i] != (void *) -1) {
+ int i, r;
+ for (i = 0; i < vui->nregions; i++)
+ {
+ if (vui->region_mmap_addr[i] != (void *) -1)
+ {
- long page_sz = get_huge_page_size(vui->region_mmap_fd[i]);
+ long page_sz = get_huge_page_size (vui->region_mmap_fd[i]);
- ssize_t map_sz = (vui->regions[i].memory_size +
- vui->regions[i].mmap_offset + page_sz) & ~(page_sz - 1);
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz) & ~(page_sz - 1);
- r = munmap(vui->region_mmap_addr[i] - vui->regions[i].mmap_offset, map_sz);
+ r =
+ munmap (vui->region_mmap_addr[i] - vui->regions[i].mmap_offset,
+ map_sz);
- DBG_SOCK("unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
- vui->region_mmap_addr[i], map_sz, page_sz);
+ DBG_SOCK
+ ("unmap memory region %d addr 0x%lx len 0x%lx page_sz 0x%x", i,
+ vui->region_mmap_addr[i], map_sz, page_sz);
- vui->region_mmap_addr[i]= (void *) -1;
+ vui->region_mmap_addr[i] = (void *) -1;
- if (r == -1) {
- clib_warning("failed to unmap memory region (errno %d)", errno);
- }
- close(vui->region_mmap_fd[i]);
+ if (r == -1)
+ {
+ clib_warning ("failed to unmap memory region (errno %d)",
+ errno);
+ }
+ close (vui->region_mmap_fd[i]);
+ }
}
- }
vui->nregions = 0;
}
-static clib_error_t * vhost_user_callfd_read_ready (unix_file_t * uf)
+static clib_error_t *
+vhost_user_callfd_read_ready (unix_file_t * uf)
{
- __attribute__((unused)) int n;
+ __attribute__ ((unused)) int n;
u8 buff[8];
- n = read(uf->file_descriptor, ((char*)&buff), 8);
+ n = read (uf->file_descriptor, ((char *) &buff), 8);
return 0;
}
-static inline void vhost_user_if_disconnect(vhost_user_intf_t * vui)
+static inline void
+vhost_user_if_disconnect (vhost_user_intf_t * vui)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vnet_main_t * vnm = vnet_get_main();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vnet_main_t *vnm = vnet_get_main ();
int q;
- vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
- if (vui->unix_file_index != ~0) {
+ if (vui->unix_file_index != ~0)
+ {
unix_file_del (&unix_main, unix_main.file_pool + vui->unix_file_index);
vui->unix_file_index = ~0;
- }
+ }
- hash_unset(vum->vhost_user_interface_index_by_sock_fd, vui->unix_fd);
- hash_unset(vum->vhost_user_interface_index_by_listener_fd, vui->unix_fd);
- close(vui->unix_fd);
+ hash_unset (vum->vhost_user_interface_index_by_sock_fd, vui->unix_fd);
+ hash_unset (vum->vhost_user_interface_index_by_listener_fd, vui->unix_fd);
+ close (vui->unix_fd);
vui->unix_fd = -1;
vui->is_up = 0;
- for (q = 0; q < vui->num_vrings; q++) {
- vui->vrings[q].desc = NULL;
- vui->vrings[q].avail = NULL;
- vui->vrings[q].used = NULL;
- vui->vrings[q].log_guest_addr = 0;
- vui->vrings[q].log_used = 0;
- }
+ for (q = 0; q < vui->num_vrings; q++)
+ {
+ vui->vrings[q].desc = NULL;
+ vui->vrings[q].avail = NULL;
+ vui->vrings[q].used = NULL;
+ vui->vrings[q].log_guest_addr = 0;
+ vui->vrings[q].log_used = 0;
+ }
- unmap_all_mem_regions(vui);
- DBG_SOCK("interface ifindex %d disconnected", vui->sw_if_index);
+ unmap_all_mem_regions (vui);
+ DBG_SOCK ("interface ifindex %d disconnected", vui->sw_if_index);
}
#define VHOST_LOG_PAGE 0x1000
-always_inline void vhost_user_log_dirty_pages(vhost_user_intf_t * vui,
- u64 addr, u64 len)
+always_inline void
+vhost_user_log_dirty_pages (vhost_user_intf_t * vui, u64 addr, u64 len)
{
- if (PREDICT_TRUE(vui->log_base_addr == 0
- || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL)))) {
- return;
- }
- if (PREDICT_FALSE((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size)) {
- DBG_SOCK("vhost_user_log_dirty_pages(): out of range\n");
- return;
- }
+ if (PREDICT_TRUE (vui->log_base_addr == 0
+ || !(vui->features & (1 << FEAT_VHOST_F_LOG_ALL))))
+ {
+ return;
+ }
+ if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
+ {
+ DBG_SOCK ("vhost_user_log_dirty_pages(): out of range\n");
+ return;
+ }
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
u64 page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- ((u8*)vui->log_base_addr)[page / 8] |= 1 << page % 8;
- page++;
- }
+ while (page * VHOST_LOG_PAGE < addr + len)
+ {
+ ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
+ page++;
+ }
}
#define vhost_user_log_dirty_ring(vui, vq, member) \
@@ -265,7 +297,8 @@ always_inline void vhost_user_log_dirty_pages(vhost_user_intf_t * vui,
sizeof(vq->used->member)); \
}
-static clib_error_t * vhost_user_socket_read (unix_file_t * uf)
+static clib_error_t *
+vhost_user_socket_read (unix_file_t * uf)
{
int n, i;
int fd, number_of_fds = 0;
@@ -273,30 +306,30 @@ static clib_error_t * vhost_user_socket_read (unix_file_t * uf)
vhost_user_msg_t msg;
struct msghdr mh;
struct iovec iov[1];
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
struct cmsghdr *cmsg;
- uword * p;
+ uword *p;
u8 q;
- unix_file_t template = {0};
- vnet_main_t * vnm = vnet_get_main();
+ unix_file_t template = { 0 };
+ vnet_main_t *vnm = vnet_get_main ();
p = hash_get (vum->vhost_user_interface_index_by_sock_fd,
- uf->file_descriptor);
- if (p == 0) {
- DBG_SOCK ("FD %d doesn't belong to any interface",
- uf->file_descriptor);
+ uf->file_descriptor);
+ if (p == 0)
+ {
+ DBG_SOCK ("FD %d doesn't belong to any interface", uf->file_descriptor);
return 0;
}
else
vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
- char control[CMSG_SPACE(VHOST_MEMORY_MAX_NREGIONS * sizeof(int))];
+ char control[CMSG_SPACE (VHOST_MEMORY_MAX_NREGIONS * sizeof (int))];
- memset(&mh, 0, sizeof(mh));
- memset(control, 0, sizeof(control));
+ memset (&mh, 0, sizeof (mh));
+ memset (control, 0, sizeof (control));
- for (i=0; i < VHOST_MEMORY_MAX_NREGIONS; i++)
+ for (i = 0; i < VHOST_MEMORY_MAX_NREGIONS; i++)
fds[i] = -1;
/* set the payload */
@@ -306,420 +339,445 @@ static clib_error_t * vhost_user_socket_read (unix_file_t * uf)
mh.msg_iov = iov;
mh.msg_iovlen = 1;
mh.msg_control = control;
- mh.msg_controllen = sizeof(control);
+ mh.msg_controllen = sizeof (control);
- n = recvmsg(uf->file_descriptor, &mh, 0);
+ n = recvmsg (uf->file_descriptor, &mh, 0);
if (n != VHOST_USER_MSG_HDR_SZ)
goto close_socket;
- if (mh.msg_flags & MSG_CTRUNC) {
- goto close_socket;
- }
+ if (mh.msg_flags & MSG_CTRUNC)
+ {
+ goto close_socket;
+ }
- cmsg = CMSG_FIRSTHDR(&mh);
+ cmsg = CMSG_FIRSTHDR (&mh);
if (cmsg && (cmsg->cmsg_len > 0) && (cmsg->cmsg_level == SOL_SOCKET) &&
(cmsg->cmsg_type == SCM_RIGHTS) &&
- (cmsg->cmsg_len - CMSG_LEN(0) <= VHOST_MEMORY_MAX_NREGIONS * sizeof(int))) {
- number_of_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
- clib_memcpy(fds, CMSG_DATA(cmsg), number_of_fds * sizeof(int));
- }
+ (cmsg->cmsg_len - CMSG_LEN (0) <=
+ VHOST_MEMORY_MAX_NREGIONS * sizeof (int)))
+ {
+ number_of_fds = (cmsg->cmsg_len - CMSG_LEN (0)) / sizeof (int);
+ clib_memcpy (fds, CMSG_DATA (cmsg), number_of_fds * sizeof (int));
+ }
- /* version 1, no reply bit set*/
- if ((msg.flags & 7) != 1) {
- DBG_SOCK("malformed message received. closing socket");
- goto close_socket;
- }
+ /* version 1, no reply bit set */
+ if ((msg.flags & 7) != 1)
+ {
+ DBG_SOCK ("malformed message received. closing socket");
+ goto close_socket;
+ }
{
- int rv __attribute__((unused));
- /* $$$$ pay attention to rv */
- rv = read(uf->file_descriptor, ((char*)&msg) + n, msg.size);
+ int rv __attribute__ ((unused));
+ /* $$$$ pay attention to rv */
+ rv = read (uf->file_descriptor, ((char *) &msg) + n, msg.size);
}
- switch (msg.request) {
+ switch (msg.request)
+ {
case VHOST_USER_GET_FEATURES:
- DBG_SOCK("if %d msg VHOST_USER_GET_FEATURES",
- vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_FEATURES", vui->hw_if_index);
msg.flags |= 4;
msg.u64 = (1 << FEAT_VIRTIO_NET_F_MRG_RXBUF) |
- (1 << FEAT_VIRTIO_F_ANY_LAYOUT) |
- (1 << FEAT_VHOST_F_LOG_ALL) |
- (1 << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
- (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES);
+ (1 << FEAT_VIRTIO_F_ANY_LAYOUT) |
+ (1 << FEAT_VHOST_F_LOG_ALL) |
+ (1 << FEAT_VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES);
msg.u64 &= vui->feature_mask;
- msg.size = sizeof(msg.u64);
+ msg.size = sizeof (msg.u64);
break;
case VHOST_USER_SET_FEATURES:
- DBG_SOCK("if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_FEATURES features 0x%016llx",
+ vui->hw_if_index, msg.u64);
vui->features = msg.u64;
if (vui->features & (1 << FEAT_VIRTIO_NET_F_MRG_RXBUF))
- vui->virtio_net_hdr_sz = 12;
+ vui->virtio_net_hdr_sz = 12;
else
- vui->virtio_net_hdr_sz = 10;
+ vui->virtio_net_hdr_sz = 10;
- vui->is_any_layout = (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
+ vui->is_any_layout =
+ (vui->features & (1 << FEAT_VIRTIO_F_ANY_LAYOUT)) ? 1 : 0;
ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
- vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
vui->is_up = 0;
- for (q = 0; q < 2; q++) {
- vui->vrings[q].desc = 0;
- vui->vrings[q].avail = 0;
- vui->vrings[q].used = 0;
- vui->vrings[q].log_guest_addr = 0;
- vui->vrings[q].log_used = 0;
- }
+ for (q = 0; q < 2; q++)
+ {
+ vui->vrings[q].desc = 0;
+ vui->vrings[q].avail = 0;
+ vui->vrings[q].used = 0;
+ vui->vrings[q].log_guest_addr = 0;
+ vui->vrings[q].log_used = 0;
+ }
- DBG_SOCK("interface %d disconnected", vui->sw_if_index);
+ DBG_SOCK ("interface %d disconnected", vui->sw_if_index);
break;
case VHOST_USER_SET_MEM_TABLE:
- DBG_SOCK("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
- vui->hw_if_index, msg.memory.nregions);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_MEM_TABLE nregions %d",
+ vui->hw_if_index, msg.memory.nregions);
if ((msg.memory.nregions < 1) ||
- (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS)) {
-
- DBG_SOCK("number of mem regions must be between 1 and %i",
- VHOST_MEMORY_MAX_NREGIONS);
-
- goto close_socket;
- }
-
- if (msg.memory.nregions != number_of_fds) {
- DBG_SOCK("each memory region must have FD");
- goto close_socket;
- }
- unmap_all_mem_regions(vui);
- for(i=0; i < msg.memory.nregions; i++) {
- clib_memcpy(&(vui->regions[i]), &msg.memory.regions[i],
- sizeof(vhost_user_memory_region_t));
-
- long page_sz = get_huge_page_size(fds[i]);
-
- /* align size to 2M page */
- ssize_t map_sz = (vui->regions[i].memory_size +
- vui->regions[i].mmap_offset + page_sz) & ~(page_sz - 1);
-
- vui->region_mmap_addr[i] = mmap(0, map_sz, PROT_READ | PROT_WRITE,
- MAP_SHARED, fds[i], 0);
-
- DBG_SOCK("map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx "
- "page_sz 0x%x", i, map_sz, fds[i], vui->region_mmap_addr[i], page_sz);
-
- if (vui->region_mmap_addr[i] == MAP_FAILED) {
- clib_warning("failed to map memory. errno is %d", errno);
- goto close_socket;
- }
- vui->region_mmap_addr[i] += vui->regions[i].mmap_offset;
- vui->region_mmap_fd[i] = fds[i];
- }
+ (msg.memory.nregions > VHOST_MEMORY_MAX_NREGIONS))
+ {
+
+ DBG_SOCK ("number of mem regions must be between 1 and %i",
+ VHOST_MEMORY_MAX_NREGIONS);
+
+ goto close_socket;
+ }
+
+ if (msg.memory.nregions != number_of_fds)
+ {
+ DBG_SOCK ("each memory region must have FD");
+ goto close_socket;
+ }
+ unmap_all_mem_regions (vui);
+ for (i = 0; i < msg.memory.nregions; i++)
+ {
+ clib_memcpy (&(vui->regions[i]), &msg.memory.regions[i],
+ sizeof (vhost_user_memory_region_t));
+
+ long page_sz = get_huge_page_size (fds[i]);
+
+ /* align size to 2M page */
+ ssize_t map_sz = (vui->regions[i].memory_size +
+ vui->regions[i].mmap_offset +
+ page_sz) & ~(page_sz - 1);
+
+ vui->region_mmap_addr[i] = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fds[i], 0);
+
+ DBG_SOCK
+ ("map memory region %d addr 0 len 0x%lx fd %d mapped 0x%lx "
+ "page_sz 0x%x", i, map_sz, fds[i], vui->region_mmap_addr[i],
+ page_sz);
+
+ if (vui->region_mmap_addr[i] == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+ vui->region_mmap_addr[i] += vui->regions[i].mmap_offset;
+ vui->region_mmap_fd[i] = fds[i];
+ }
vui->nregions = msg.memory.nregions;
break;
case VHOST_USER_SET_VRING_NUM:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
- vui->hw_if_index, msg.state.index, msg.state.num);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_NUM idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
- if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
- (msg.state.num == 0) || /* it cannot be zero */
- (msg.state.num % 2)) /* must be power of 2 */
- goto close_socket;
+ if ((msg.state.num > 32768) || /* maximum ring size is 32768 */
+ (msg.state.num == 0) || /* it cannot be zero */
+ (msg.state.num % 2)) /* must be power of 2 */
+ goto close_socket;
vui->vrings[msg.state.index].qsz = msg.state.num;
break;
case VHOST_USER_SET_VRING_ADDR:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
- vui->hw_if_index, msg.state.index);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ADDR idx %d",
+ vui->hw_if_index, msg.state.index);
- vui->vrings[msg.state.index].desc = (vring_desc_t *)
- map_user_mem(vui, msg.addr.desc_user_addr);
+ vui->vrings[msg.state.index].desc = (vring_desc_t *)
+ map_user_mem (vui, msg.addr.desc_user_addr);
vui->vrings[msg.state.index].used = (vring_used_t *)
- map_user_mem(vui, msg.addr.used_user_addr);
+ map_user_mem (vui, msg.addr.used_user_addr);
vui->vrings[msg.state.index].avail = (vring_avail_t *)
- map_user_mem(vui, msg.addr.avail_user_addr);
+ map_user_mem (vui, msg.addr.avail_user_addr);
if ((vui->vrings[msg.state.index].desc == NULL) ||
- (vui->vrings[msg.state.index].used == NULL) ||
- (vui->vrings[msg.state.index].avail == NULL)) {
- DBG_SOCK("failed to map user memory for hw_if_index %d",
- vui->hw_if_index);
- goto close_socket;
- }
+ (vui->vrings[msg.state.index].used == NULL) ||
+ (vui->vrings[msg.state.index].avail == NULL))
+ {
+ DBG_SOCK ("failed to map user memory for hw_if_index %d",
+ vui->hw_if_index);
+ goto close_socket;
+ }
vui->vrings[msg.state.index].log_guest_addr = msg.addr.log_guest_addr;
vui->vrings[msg.state.index].log_used =
- (msg.addr.flags & (1 << VHOST_VRING_F_LOG)) ? 1 : 0;
+ (msg.addr.flags & (1 << VHOST_VRING_F_LOG)) ? 1 : 0;
/* Spec says: If VHOST_USER_F_PROTOCOL_FEATURES has not been negotiated,
- the ring is initialized in an enabled state. */
+ the ring is initialized in an enabled state. */
- if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES))) {
- vui->vrings[msg.state.index].enabled = 1;
- }
+ if (!(vui->features & (1 << FEAT_VHOST_USER_F_PROTOCOL_FEATURES)))
+ {
+ vui->vrings[msg.state.index].enabled = 1;
+ }
vui->vrings[msg.state.index].last_used_idx =
- vui->vrings[msg.state.index].used->idx;
+ vui->vrings[msg.state.index].used->idx;
/* tell driver that we don't want interrupts */
vui->vrings[msg.state.index].used->flags |= 1;
break;
case VHOST_USER_SET_OWNER:
- DBG_SOCK("if %d msg VHOST_USER_SET_OWNER",
- vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_OWNER", vui->hw_if_index);
break;
case VHOST_USER_RESET_OWNER:
- DBG_SOCK("if %d msg VHOST_USER_RESET_OWNER",
- vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_RESET_OWNER", vui->hw_if_index);
break;
case VHOST_USER_SET_VRING_CALL:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_CALL u64 %d",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_CALL u64 %d",
+ vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
if (!(msg.u64 & 0x100))
- {
- if (number_of_fds != 1)
- goto close_socket;
-
- /* if there is old fd, delete it */
- if (vui->vrings[q].callfd) {
- unix_file_t * uf = pool_elt_at_index (unix_main.file_pool,
- vui->vrings[q].callfd_idx);
- unix_file_del (&unix_main, uf);
- }
- vui->vrings[q].callfd = fds[0];
- template.read_function = vhost_user_callfd_read_ready;
- template.file_descriptor = fds[0];
- vui->vrings[q].callfd_idx = unix_file_add (&unix_main, &template);
- }
+ {
+ if (number_of_fds != 1)
+ goto close_socket;
+
+ /* if there is old fd, delete it */
+ if (vui->vrings[q].callfd)
+ {
+ unix_file_t *uf = pool_elt_at_index (unix_main.file_pool,
+ vui->vrings[q].callfd_idx);
+ unix_file_del (&unix_main, uf);
+ }
+ vui->vrings[q].callfd = fds[0];
+ template.read_function = vhost_user_callfd_read_ready;
+ template.file_descriptor = fds[0];
+ vui->vrings[q].callfd_idx = unix_file_add (&unix_main, &template);
+ }
else
- vui->vrings[q].callfd = -1;
+ vui->vrings[q].callfd = -1;
break;
case VHOST_USER_SET_VRING_KICK:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_KICK u64 %d",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_KICK u64 %d",
+ vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
if (!(msg.u64 & 0x100))
- {
- if (number_of_fds != 1)
- goto close_socket;
+ {
+ if (number_of_fds != 1)
+ goto close_socket;
- vui->vrings[q].kickfd = fds[0];
- }
+ vui->vrings[q].kickfd = fds[0];
+ }
else
- vui->vrings[q].kickfd = -1;
+ vui->vrings[q].kickfd = -1;
break;
case VHOST_USER_SET_VRING_ERR:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_ERR u64 %d",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_ERR u64 %d",
+ vui->hw_if_index, msg.u64);
q = (u8) (msg.u64 & 0xFF);
if (!(msg.u64 & 0x100))
- {
- if (number_of_fds != 1)
- goto close_socket;
+ {
+ if (number_of_fds != 1)
+ goto close_socket;
- fd = fds[0];
- }
+ fd = fds[0];
+ }
else
- fd = -1;
+ fd = -1;
vui->vrings[q].errfd = fd;
break;
case VHOST_USER_SET_VRING_BASE:
- DBG_SOCK("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
- vui->hw_if_index, msg.state.index, msg.state.num);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
vui->vrings[msg.state.index].last_avail_idx = msg.state.num;
break;
case VHOST_USER_GET_VRING_BASE:
- DBG_SOCK("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
- vui->hw_if_index, msg.state.index, msg.state.num);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_VRING_BASE idx %d num %d",
+ vui->hw_if_index, msg.state.index, msg.state.num);
/* Spec says: Client must [...] stop ring upon receiving VHOST_USER_GET_VRING_BASE. */
vui->vrings[msg.state.index].enabled = 0;
msg.state.num = vui->vrings[msg.state.index].last_avail_idx;
msg.flags |= 4;
- msg.size = sizeof(msg.state);
+ msg.size = sizeof (msg.state);
break;
case VHOST_USER_NONE:
- DBG_SOCK("if %d msg VHOST_USER_NONE",
- vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_NONE", vui->hw_if_index);
break;
case VHOST_USER_SET_LOG_BASE:
- {
- DBG_SOCK("if %d msg VHOST_USER_SET_LOG_BASE",
- vui->hw_if_index);
-
- if (msg.size != sizeof(msg.log)) {
- DBG_SOCK("invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
- msg.size, sizeof(msg.log));
- goto close_socket;
- }
-
- if (!(vui->protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD))) {
- DBG_SOCK("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
- goto close_socket;
- }
-
- fd = fds[0];
- /* align size to 2M page */
- long page_sz = get_huge_page_size(fd);
- ssize_t map_sz = (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
-
- vui->log_base_addr = mmap(0, map_sz, PROT_READ | PROT_WRITE,
- MAP_SHARED, fd, 0);
-
- DBG_SOCK("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
- map_sz, msg.log.offset, fd, vui->log_base_addr);
-
- if (vui->log_base_addr == MAP_FAILED) {
- clib_warning("failed to map memory. errno is %d", errno);
- goto close_socket;
+ {
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_BASE", vui->hw_if_index);
+
+ if (msg.size != sizeof (msg.log))
+ {
+ DBG_SOCK
+ ("invalid msg size for VHOST_USER_SET_LOG_BASE: %d instead of %d",
+ msg.size, sizeof (msg.log));
+ goto close_socket;
+ }
+
+ if (!
+ (vui->protocol_features & (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD)))
+ {
+ DBG_SOCK
+ ("VHOST_USER_PROTOCOL_F_LOG_SHMFD not set but VHOST_USER_SET_LOG_BASE received");
+ goto close_socket;
+ }
+
+ fd = fds[0];
+ /* align size to 2M page */
+ long page_sz = get_huge_page_size (fd);
+ ssize_t map_sz =
+ (msg.log.size + msg.log.offset + page_sz) & ~(page_sz - 1);
+
+ vui->log_base_addr = mmap (0, map_sz, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+
+ DBG_SOCK
+ ("map log region addr 0 len 0x%lx off 0x%lx fd %d mapped 0x%lx",
+ map_sz, msg.log.offset, fd, vui->log_base_addr);
+
+ if (vui->log_base_addr == MAP_FAILED)
+ {
+ clib_warning ("failed to map memory. errno is %d", errno);
+ goto close_socket;
+ }
+
+ vui->log_base_addr += msg.log.offset;
+ vui->log_size = msg.log.size;
+
+ msg.flags |= 4;
+ msg.size = sizeof (msg.u64);
+
+ break;
}
- vui->log_base_addr += msg.log.offset;
- vui->log_size = msg.log.size;
-
- msg.flags |= 4;
- msg.size = sizeof(msg.u64);
-
- break;
- }
-
case VHOST_USER_SET_LOG_FD:
- DBG_SOCK("if %d msg VHOST_USER_SET_LOG_FD",
- vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_LOG_FD", vui->hw_if_index);
break;
case VHOST_USER_GET_PROTOCOL_FEATURES:
- DBG_SOCK("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES", vui->hw_if_index);
+ DBG_SOCK ("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
+ vui->hw_if_index);
msg.flags |= 4;
msg.u64 = (1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD);
- msg.size = sizeof(msg.u64);
+ msg.size = sizeof (msg.u64);
break;
case VHOST_USER_SET_PROTOCOL_FEATURES:
- DBG_SOCK("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%lx",
- vui->hw_if_index, msg.u64);
+ DBG_SOCK ("if %d msg VHOST_USER_SET_PROTOCOL_FEATURES features 0x%lx",
+ vui->hw_if_index, msg.u64);
vui->protocol_features = msg.u64;
break;
case VHOST_USER_SET_VRING_ENABLE:
- DBG_SOCK("if %d VHOST_USER_SET_VRING_ENABLE, enable: %d",
- vui->hw_if_index, msg.state.num);
+ DBG_SOCK ("if %d VHOST_USER_SET_VRING_ENABLE, enable: %d",
+ vui->hw_if_index, msg.state.num);
vui->vrings[msg.state.index].enabled = msg.state.num;
break;
default:
- DBG_SOCK("unknown vhost-user message %d received. closing socket",
- msg.request);
+ DBG_SOCK ("unknown vhost-user message %d received. closing socket",
+ msg.request);
goto close_socket;
- }
+ }
- /* if we have pointers to descriptor table, go up*/
+ /* if we have pointers to descriptor table, go up */
if (!vui->is_up &&
- vui->vrings[VHOST_NET_VRING_IDX_TX].desc &&
- vui->vrings[VHOST_NET_VRING_IDX_RX].desc) {
+ vui->vrings[VHOST_NET_VRING_IDX_TX].desc &&
+ vui->vrings[VHOST_NET_VRING_IDX_RX].desc)
+ {
- DBG_SOCK("interface %d connected", vui->sw_if_index);
+ DBG_SOCK ("interface %d connected", vui->sw_if_index);
- vnet_hw_interface_set_flags (vnm, vui->hw_if_index, VNET_HW_INTERFACE_FLAG_LINK_UP);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
vui->is_up = 1;
- }
+ }
/* if we need to reply */
if (msg.flags & 4)
- {
- n = send(uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
+ {
+ n =
+ send (uf->file_descriptor, &msg, VHOST_USER_MSG_HDR_SZ + msg.size, 0);
if (n != (msg.size + VHOST_USER_MSG_HDR_SZ))
- goto close_socket;
- }
+ goto close_socket;
+ }
return 0;
close_socket:
- vhost_user_if_disconnect(vui);
+ vhost_user_if_disconnect (vui);
return 0;
}
-static clib_error_t * vhost_user_socket_error (unix_file_t * uf)
+static clib_error_t *
+vhost_user_socket_error (unix_file_t * uf)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
- uword * p;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ uword *p;
p = hash_get (vum->vhost_user_interface_index_by_sock_fd,
- uf->file_descriptor);
- if (p == 0) {
- DBG_SOCK ("fd %d doesn't belong to any interface",
- uf->file_descriptor);
+ uf->file_descriptor);
+ if (p == 0)
+ {
+ DBG_SOCK ("fd %d doesn't belong to any interface", uf->file_descriptor);
return 0;
}
else
vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
- vhost_user_if_disconnect(vui);
+ vhost_user_if_disconnect (vui);
return 0;
}
-static clib_error_t * vhost_user_socksvr_accept_ready (unix_file_t * uf)
+static clib_error_t *
+vhost_user_socksvr_accept_ready (unix_file_t * uf)
{
int client_fd, client_len;
struct sockaddr_un client;
- unix_file_t template = {0};
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
- uword * p;
+ unix_file_t template = { 0 };
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ uword *p;
p = hash_get (vum->vhost_user_interface_index_by_listener_fd,
- uf->file_descriptor);
- if (p == 0) {
- DBG_SOCK ("fd %d doesn't belong to any interface",
- uf->file_descriptor);
+ uf->file_descriptor);
+ if (p == 0)
+ {
+ DBG_SOCK ("fd %d doesn't belong to any interface", uf->file_descriptor);
return 0;
}
else
vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
- client_len = sizeof(client);
+ client_len = sizeof (client);
client_fd = accept (uf->file_descriptor,
- (struct sockaddr *)&client,
- (socklen_t *)&client_len);
+ (struct sockaddr *) &client,
+ (socklen_t *) & client_len);
if (client_fd < 0)
- return clib_error_return_unix (0, "accept");
+ return clib_error_return_unix (0, "accept");
template.read_function = vhost_user_socket_read;
template.error_function = vhost_user_socket_error;
@@ -728,7 +786,7 @@ static clib_error_t * vhost_user_socksvr_accept_ready (unix_file_t * uf)
vui->client_fd = client_fd;
hash_set (vum->vhost_user_interface_index_by_sock_fd, vui->client_fd,
- vui - vum->vhost_user_interfaces);
+ vui - vum->vhost_user_interfaces);
return 0;
}
@@ -736,22 +794,25 @@ static clib_error_t * vhost_user_socksvr_accept_ready (unix_file_t * uf)
static clib_error_t *
vhost_user_init (vlib_main_t * vm)
{
- clib_error_t * error;
- vhost_user_main_t * vum = &vhost_user_main;
- vlib_thread_main_t * tm = vlib_get_thread_main();
+ clib_error_t *error;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
error = vlib_call_init_function (vm, ip4_init);
if (error)
return error;
- vum->vhost_user_interface_index_by_listener_fd = hash_create (0, sizeof (uword));
- vum->vhost_user_interface_index_by_sock_fd = hash_create (0, sizeof (uword));
- vum->vhost_user_interface_index_by_sw_if_index = hash_create (0, sizeof (uword));
+ vum->vhost_user_interface_index_by_listener_fd =
+ hash_create (0, sizeof (uword));
+ vum->vhost_user_interface_index_by_sock_fd =
+ hash_create (0, sizeof (uword));
+ vum->vhost_user_interface_index_by_sw_if_index =
+ hash_create (0, sizeof (uword));
vum->coalesce_frames = 32;
vum->coalesce_time = 1e-3;
vec_validate_aligned (vum->rx_buffers, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
+ CLIB_CACHE_LINE_BYTES);
return 0;
}
@@ -767,14 +828,16 @@ vhost_user_exit (vlib_main_t * vm)
VLIB_MAIN_LOOP_EXIT_FUNCTION (vhost_user_exit);
-enum {
+enum
+{
VHOST_USER_RX_NEXT_ETHERNET_INPUT,
VHOST_USER_RX_NEXT_DROP,
VHOST_USER_RX_N_NEXT,
};
-typedef struct {
+typedef struct
+{
u16 virtqueue;
u16 device_index;
#if VHOST_USER_COPY_TX_HDR == 1
@@ -782,94 +845,93 @@ typedef struct {
#endif
} vhost_user_input_trace_t;
-static u8 * format_vhost_user_input_trace (u8 * s, va_list * va)
+static u8 *
+format_vhost_user_input_trace (u8 * s, va_list * va)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
- CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main();
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_input_trace_t * t = va_arg (*va, vhost_user_input_trace_t *);
- vhost_user_intf_t * vui = vec_elt_at_index (vum->vhost_user_interfaces,
- t->device_index);
+ CLIB_UNUSED (vnet_main_t * vnm) = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_input_trace_t *t = va_arg (*va, vhost_user_input_trace_t *);
+ vhost_user_intf_t *vui = vec_elt_at_index (vum->vhost_user_interfaces,
+ t->device_index);
- vnet_sw_interface_t * sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
+ vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
#if VHOST_USER_COPY_TX_HDR == 1
uword indent = format_get_indent (s);
#endif
s = format (s, "%U virtqueue %d",
- format_vnet_sw_interface_name, vnm, sw,
- t->virtqueue);
+ format_vnet_sw_interface_name, vnm, sw, t->virtqueue);
#if VHOST_USER_COPY_TX_HDR == 1
s = format (s, "\n%Uvirtio_net_hdr flags 0x%02x gso_type %u hdr_len %u",
- format_white_space, indent,
- t->hdr.flags,
- t->hdr.gso_type,
- t->hdr.hdr_len);
+ format_white_space, indent,
+ t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len);
#endif
return s;
}
-void vhost_user_rx_trace (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vhost_user_intf_t *vui,
- i16 virtqueue)
+void
+vhost_user_rx_trace (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vhost_user_intf_t * vui, i16 virtqueue)
{
- u32 * b, n_left;
- vhost_user_main_t * vum = &vhost_user_main;
+ u32 *b, n_left;
+ vhost_user_main_t *vum = &vhost_user_main;
u32 next_index = VHOST_USER_RX_NEXT_ETHERNET_INPUT;
- n_left = vec_len(vui->d_trace_buffers);
+ n_left = vec_len (vui->d_trace_buffers);
b = vui->d_trace_buffers;
while (n_left >= 1)
- {
- u32 bi0;
- vlib_buffer_t * b0;
- vhost_user_input_trace_t * t0;
-
- bi0 = b[0];
- n_left -= 1;
-
- b0 = vlib_get_buffer (vm, bi0);
- vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
- t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
- t0->virtqueue = virtqueue;
- t0->device_index = vui - vum->vhost_user_interfaces;
+ {
+ u32 bi0;
+ vlib_buffer_t *b0;
+ vhost_user_input_trace_t *t0;
+
+ bi0 = b[0];
+ n_left -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0);
+ t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
+ t0->virtqueue = virtqueue;
+ t0->device_index = vui - vum->vhost_user_interfaces;
#if VHOST_USER_COPY_TX_HDR == 1
- clib_memcpy(&t0->hdr, b0->pre_data, sizeof(virtio_net_hdr_t));
+ clib_memcpy (&t0->hdr, b0->pre_data, sizeof (virtio_net_hdr_t));
#endif
- b+=1;
- }
+ b += 1;
+ }
}
-static inline void vhost_user_send_call(vlib_main_t * vm, vhost_user_vring_t * vq)
+static inline void
+vhost_user_send_call (vlib_main_t * vm, vhost_user_vring_t * vq)
{
- vhost_user_main_t * vum = &vhost_user_main;
- u64 x = 1;
- int rv __attribute__((unused));
- /* $$$$ pay attention to rv */
- rv = write(vq->callfd, &x, sizeof(x));
- vq->n_since_last_int = 0;
- vq->int_deadline = vlib_time_now(vm) + vum->coalesce_time;
+ vhost_user_main_t *vum = &vhost_user_main;
+ u64 x = 1;
+ int rv __attribute__ ((unused));
+ /* $$$$ pay attention to rv */
+ rv = write (vq->callfd, &x, sizeof (x));
+ vq->n_since_last_int = 0;
+ vq->int_deadline = vlib_time_now (vm) + vum->coalesce_time;
}
-static u32 vhost_user_if_input ( vlib_main_t * vm,
- vhost_user_main_t * vum,
- vhost_user_intf_t * vui,
- vlib_node_runtime_t * node)
+static u32
+vhost_user_if_input (vlib_main_t * vm,
+ vhost_user_main_t * vum,
+ vhost_user_intf_t * vui, vlib_node_runtime_t * node)
{
- vhost_user_vring_t * txvq = &vui->vrings[VHOST_NET_VRING_IDX_TX];
- vhost_user_vring_t * rxvq = &vui->vrings[VHOST_NET_VRING_IDX_RX];
+ vhost_user_vring_t *txvq = &vui->vrings[VHOST_NET_VRING_IDX_TX];
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_NET_VRING_IDX_RX];
uword n_rx_packets = 0, n_rx_bytes = 0;
uword n_left;
- u32 n_left_to_next, * to_next;
+ u32 n_left_to_next, *to_next;
u32 next_index = 0;
u32 next0;
uword n_trace = vlib_get_trace_count (vm, node);
@@ -880,45 +942,46 @@ static u32 vhost_user_if_input ( vlib_main_t * vm,
vec_reset_length (vui->d_trace_buffers);
/* no descriptor ptr - bail out */
- if (PREDICT_FALSE(!txvq->desc || !txvq->avail || !txvq->enabled))
+ if (PREDICT_FALSE (!txvq->desc || !txvq->avail || !txvq->enabled))
return 0;
/* do we have pending intterupts ? */
if ((txvq->n_since_last_int) && (txvq->int_deadline < now))
- vhost_user_send_call(vm, txvq);
+ vhost_user_send_call (vm, txvq);
if ((rxvq->n_since_last_int) && (rxvq->int_deadline < now))
- vhost_user_send_call(vm, rxvq);
+ vhost_user_send_call (vm, rxvq);
/* only bit 0 of avail.flags is used so we don't want to deal with this
interface if any other bit is set */
- if (PREDICT_FALSE(txvq->avail->flags & 0xFFFE))
+ if (PREDICT_FALSE (txvq->avail->flags & 0xFFFE))
return 0;
/* nothing to do */
if (txvq->avail->idx == txvq->last_avail_idx)
return 0;
- if (PREDICT_TRUE(txvq->avail->idx > txvq->last_avail_idx))
+ if (PREDICT_TRUE (txvq->avail->idx > txvq->last_avail_idx))
n_left = txvq->avail->idx - txvq->last_avail_idx;
- else /* wrapped */
- n_left = (u16) -1 - txvq->last_avail_idx + txvq->avail->idx;
-
- if (PREDICT_FALSE(!vui->admin_up)) {
- /* if intf is admin down, just drop all packets waiting in the ring */
- txvq->last_avail_idx = txvq->last_used_idx = txvq->avail->idx;
- CLIB_MEMORY_BARRIER();
- txvq->used->idx = txvq->last_used_idx;
- vhost_user_log_dirty_ring(vui, txvq, idx);
- vhost_user_send_call(vm, txvq);
- return 0;
- }
+ else /* wrapped */
+ n_left = (u16) - 1 - txvq->last_avail_idx + txvq->avail->idx;
- if (PREDICT_FALSE(n_left > txvq->qsz))
+ if (PREDICT_FALSE (!vui->admin_up))
+ {
+ /* if intf is admin down, just drop all packets waiting in the ring */
+ txvq->last_avail_idx = txvq->last_used_idx = txvq->avail->idx;
+ CLIB_MEMORY_BARRIER ();
+ txvq->used->idx = txvq->last_used_idx;
+ vhost_user_log_dirty_ring (vui, txvq, idx);
+ vhost_user_send_call (vm, txvq);
+ return 0;
+ }
+
+ if (PREDICT_FALSE (n_left > txvq->qsz))
return 0;
qsz_mask = txvq->qsz - 1;
- cpu_index = os_get_cpu_number();
+ cpu_index = os_get_cpu_number ();
drops = 0;
flush = 0;
@@ -932,200 +995,239 @@ static u32 vhost_user_if_input ( vlib_main_t * vm,
* to cycle through the descriptors without having to check for errors.
* For jumbo frames, the bottleneck is memory copy anyway.
*/
- if (PREDICT_FALSE(!vum->rx_buffers[cpu_index])) {
- vec_alloc (vum->rx_buffers[cpu_index], VLIB_FRAME_SIZE);
-
- if (PREDICT_FALSE(!vum->rx_buffers[cpu_index]))
- flush = n_left; //Drop all input
- }
-
- if (PREDICT_FALSE(_vec_len(vum->rx_buffers[cpu_index]) < n_left)) {
- _vec_len(vum->rx_buffers[cpu_index]) +=
- vlib_buffer_alloc_from_free_list(vm, vum->rx_buffers[cpu_index] + _vec_len(vum->rx_buffers[cpu_index]),
- VLIB_FRAME_SIZE - _vec_len(vum->rx_buffers[cpu_index]),
- VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
-
- if (PREDICT_FALSE(n_left > _vec_len(vum->rx_buffers[cpu_index])))
- flush = n_left - _vec_len(vum->rx_buffers[cpu_index]);
- }
+ if (PREDICT_FALSE (!vum->rx_buffers[cpu_index]))
+ {
+ vec_alloc (vum->rx_buffers[cpu_index], VLIB_FRAME_SIZE);
- if (PREDICT_FALSE(flush)) {
- //Remove some input buffers
- drops += flush;
- n_left -= flush;
- vlib_error_count(vm, vhost_user_input_node.index,
- VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
- while (flush) {
- u16 desc_chain_head = txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
- txvq->last_avail_idx++;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_chain_head;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
- vhost_user_log_dirty_ring(vui, txvq, ring[txvq->last_used_idx & qsz_mask]);
- txvq->last_used_idx++;
- flush--;
+ if (PREDICT_FALSE (!vum->rx_buffers[cpu_index]))
+ flush = n_left; //Drop all input
}
- }
-
- rx_len = vec_len(vum->rx_buffers[cpu_index]); //vector might be null
- while (n_left > 0) {
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (n_left > 0 && n_left_to_next > 0) {
- vlib_buffer_t *b_head, *b_current;
- u32 bi_head, bi_current;
- u16 desc_chain_head, desc_current;
- u8 error = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
-
- desc_chain_head = desc_current = txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
- bi_head = bi_current = vum->rx_buffers[cpu_index][--rx_len];
- b_head = b_current = vlib_get_buffer (vm, bi_head);
- vlib_buffer_chain_init(b_head);
+ if (PREDICT_FALSE (_vec_len (vum->rx_buffers[cpu_index]) < n_left))
+ {
+ _vec_len (vum->rx_buffers[cpu_index]) +=
+ vlib_buffer_alloc_from_free_list (vm,
+ vum->rx_buffers[cpu_index] +
+ _vec_len (vum->rx_buffers
+ [cpu_index]),
+ VLIB_FRAME_SIZE -
+ _vec_len (vum->rx_buffers
+ [cpu_index]),
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
+
+ if (PREDICT_FALSE (n_left > _vec_len (vum->rx_buffers[cpu_index])))
+ flush = n_left - _vec_len (vum->rx_buffers[cpu_index]);
+ }
- uword offset;
- if (PREDICT_TRUE(vui->is_any_layout) ||
- !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)) {
- /* ANYLAYOUT or single buffer */
- offset = vui->virtio_net_hdr_sz;
- } else {
- /* CSR case without ANYLAYOUT, skip 1st buffer */
- offset = txvq->desc[desc_current].len;
- }
+ if (PREDICT_FALSE (flush))
+ {
+ //Remove some input buffers
+ drops += flush;
+ n_left -= flush;
+ vlib_error_count (vm, vhost_user_input_node.index,
+ VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
+ while (flush)
+ {
+ u16 desc_chain_head =
+ txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ txvq->last_avail_idx++;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].id =
+ desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & qsz_mask]);
+ txvq->last_used_idx++;
+ flush--;
+ }
+ }
- while(1) {
- void * buffer_addr = map_guest_mem(vui, txvq->desc[desc_current].addr);
- if (PREDICT_FALSE(buffer_addr == 0)) {
- error = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
- break;
- }
+ rx_len = vec_len (vum->rx_buffers[cpu_index]); //vector might be null
+ while (n_left > 0)
+ {
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left > 0 && n_left_to_next > 0)
+ {
+ vlib_buffer_t *b_head, *b_current;
+ u32 bi_head, bi_current;
+ u16 desc_chain_head, desc_current;
+ u8 error = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
+
+ desc_chain_head = desc_current =
+ txvq->avail->ring[txvq->last_avail_idx & qsz_mask];
+ bi_head = bi_current = vum->rx_buffers[cpu_index][--rx_len];
+ b_head = b_current = vlib_get_buffer (vm, bi_head);
+ vlib_buffer_chain_init (b_head);
+
+ uword offset;
+ if (PREDICT_TRUE (vui->is_any_layout) ||
+ !(txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT))
+ {
+ /* ANYLAYOUT or single buffer */
+ offset = vui->virtio_net_hdr_sz;
+ }
+ else
+ {
+ /* CSR case without ANYLAYOUT, skip 1st buffer */
+ offset = txvq->desc[desc_current].len;
+ }
+
+ while (1)
+ {
+ void *buffer_addr =
+ map_guest_mem (vui, txvq->desc[desc_current].addr);
+ if (PREDICT_FALSE (buffer_addr == 0))
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL;
+ break;
+ }
#if VHOST_USER_COPY_TX_HDR == 1
- if (PREDICT_TRUE(offset))
- clib_memcpy(b->pre_data, buffer_addr, sizeof(virtio_net_hdr_t)); /* 12 byte hdr is not used on tx */
+ if (PREDICT_TRUE (offset))
+ clib_memcpy (b->pre_data, buffer_addr, sizeof (virtio_net_hdr_t)); /* 12 byte hdr is not used on tx */
#endif
- if (txvq->desc[desc_current].len > offset) {
- u16 len = txvq->desc[desc_current].len - offset;
- u16 copied = vlib_buffer_chain_append_data_with_alloc(vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX,
- b_head, &b_current, buffer_addr + offset, len);
-
- if (copied != len) {
- error = VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER;
- break;
- }
- }
- offset = 0;
-
- /* if next flag is set, take next desc in the chain */
- if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT )
- desc_current = txvq->desc[desc_current].next;
- else
- break;
- }
-
- /* consume the descriptor and return it as used */
- txvq->last_avail_idx++;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].id = desc_chain_head;
- txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
- vhost_user_log_dirty_ring(vui, txvq, ring[txvq->last_used_idx & qsz_mask]);
- txvq->last_used_idx++;
-
- if(PREDICT_FALSE(b_head->current_length < 14 &&
- error == VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR)) {
- error = VHOST_USER_INPUT_FUNC_ERROR_UNDERSIZED_FRAME;
- }
-
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b_head);
-
- vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
- vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32)~0;
- b_head->error = node->errors[error];
-
- if (PREDICT_FALSE (n_trace > n_rx_packets))
- vec_add1 (vui->d_trace_buffers, bi_head);
-
- if (PREDICT_FALSE(error)) {
- drops++;
- next0 = VHOST_USER_RX_NEXT_DROP;
- } else {
- n_rx_bytes += b_head->current_length + b_head->total_length_not_including_first_buffer;
- n_rx_packets++;
- next0 = VHOST_USER_RX_NEXT_ETHERNET_INPUT;
- }
-
- to_next[0] = bi_head;
- to_next++;
- n_left_to_next--;
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next,
- bi_head, next0);
- n_left--;
+ if (txvq->desc[desc_current].len > offset)
+ {
+ u16 len = txvq->desc[desc_current].len - offset;
+ u16 copied = vlib_buffer_chain_append_data_with_alloc (vm,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX,
+ b_head,
+ &b_current,
+ buffer_addr
+ +
+ offset,
+ len);
+
+ if (copied != len)
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER;
+ break;
+ }
+ }
+ offset = 0;
+
+ /* if next flag is set, take next desc in the chain */
+ if (txvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ desc_current = txvq->desc[desc_current].next;
+ else
+ break;
+ }
+
+ /* consume the descriptor and return it as used */
+ txvq->last_avail_idx++;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].id =
+ desc_chain_head;
+ txvq->used->ring[txvq->last_used_idx & qsz_mask].len = 0;
+ vhost_user_log_dirty_ring (vui, txvq,
+ ring[txvq->last_used_idx & qsz_mask]);
+ txvq->last_used_idx++;
+
+ if (PREDICT_FALSE (b_head->current_length < 14 &&
+ error == VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR))
+ {
+ error = VHOST_USER_INPUT_FUNC_ERROR_UNDERSIZED_FRAME;
+ }
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b_head);
+
+ vnet_buffer (b_head)->sw_if_index[VLIB_RX] = vui->sw_if_index;
+ vnet_buffer (b_head)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ b_head->error = node->errors[error];
+
+ if (PREDICT_FALSE (n_trace > n_rx_packets))
+ vec_add1 (vui->d_trace_buffers, bi_head);
+
+ if (PREDICT_FALSE (error))
+ {
+ drops++;
+ next0 = VHOST_USER_RX_NEXT_DROP;
+ }
+ else
+ {
+ n_rx_bytes +=
+ b_head->current_length +
+ b_head->total_length_not_including_first_buffer;
+ n_rx_packets++;
+ next0 = VHOST_USER_RX_NEXT_ETHERNET_INPUT;
+ }
+
+ to_next[0] = bi_head;
+ to_next++;
+ n_left_to_next--;
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next,
+ bi_head, next0);
+ n_left--;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
- }
-
- if (PREDICT_TRUE(vum->rx_buffers[cpu_index] != 0))
- _vec_len(vum->rx_buffers[cpu_index]) = rx_len;
+ if (PREDICT_TRUE (vum->rx_buffers[cpu_index] != 0))
+ _vec_len (vum->rx_buffers[cpu_index]) = rx_len;
/* give buffers back to driver */
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
txvq->used->idx = txvq->last_used_idx;
- vhost_user_log_dirty_ring(vui, txvq, idx);
+ vhost_user_log_dirty_ring (vui, txvq, idx);
if (PREDICT_FALSE (vec_len (vui->d_trace_buffers) > 0))
- {
- vhost_user_rx_trace (vm, node, vui, VHOST_NET_VRING_IDX_TX);
- vlib_set_trace_count (vm, node, n_trace - vec_len (vui->d_trace_buffers));
- }
+ {
+ vhost_user_rx_trace (vm, node, vui, VHOST_NET_VRING_IDX_TX);
+ vlib_set_trace_count (vm, node,
+ n_trace - vec_len (vui->d_trace_buffers));
+ }
/* interrupt (call) handling */
- if((txvq->callfd > 0) && !(txvq->avail->flags & 1)) {
- txvq->n_since_last_int += n_rx_packets;
+ if ((txvq->callfd > 0) && !(txvq->avail->flags & 1))
+ {
+ txvq->n_since_last_int += n_rx_packets;
- if(txvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call(vm, txvq);
- }
+ if (txvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, txvq);
+ }
- if (PREDICT_FALSE(drops)) {
- vlib_increment_simple_counter
- (vnet_main.interface_main.sw_if_counters
- + VNET_INTERFACE_COUNTER_DROP, os_get_cpu_number(),
- vui->sw_if_index, drops);
- }
+ if (PREDICT_FALSE (drops))
+ {
+ vlib_increment_simple_counter
+ (vnet_main.interface_main.sw_if_counters
+ + VNET_INTERFACE_COUNTER_DROP, os_get_cpu_number (),
+ vui->sw_if_index, drops);
+ }
/* increase rx counters */
vlib_increment_combined_counter
- (vnet_main.interface_main.combined_sw_if_counters
- + VNET_INTERFACE_COUNTER_RX,
- os_get_cpu_number(),
- vui->sw_if_index,
- n_rx_packets, n_rx_bytes);
+ (vnet_main.interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ os_get_cpu_number (), vui->sw_if_index, n_rx_packets, n_rx_bytes);
return n_rx_packets;
}
static uword
vhost_user_input (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * f)
+ vlib_node_runtime_t * node, vlib_frame_t * f)
{
- vhost_user_main_t * vum = &vhost_user_main;
- dpdk_main_t * dm = &dpdk_main;
- vhost_user_intf_t * vui;
+ vhost_user_main_t *vum = &vhost_user_main;
+ dpdk_main_t *dm = &dpdk_main;
+ vhost_user_intf_t *vui;
uword n_rx_packets = 0;
- u32 cpu_index = os_get_cpu_number();
+ u32 cpu_index = os_get_cpu_number ();
int i;
- for(i = 0; i < vec_len(vum->vhost_user_interfaces); i++ )
+ for (i = 0; i < vec_len (vum->vhost_user_interfaces); i++)
{
- vui = vec_elt_at_index(vum->vhost_user_interfaces, i);
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, i);
if (vui->is_up &&
- (i % dm->input_cpu_count) == (cpu_index - dm->input_cpu_first_index))
- n_rx_packets += vhost_user_if_input (vm, vum, vui, node);
+ (i % dm->input_cpu_count) ==
+ (cpu_index - dm->input_cpu_first_index))
+ n_rx_packets += vhost_user_if_input (vm, vum, vui, node);
}
return n_rx_packets;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vhost_user_input_node) = {
.function = vhost_user_input,
.type = VLIB_NODE_TYPE_INPUT,
@@ -1148,90 +1250,103 @@ VLIB_REGISTER_NODE (vhost_user_input_node) = {
};
VLIB_NODE_FUNCTION_MULTIARCH (vhost_user_input_node, vhost_user_input)
+/* *INDENT-ON* */
static uword
vhost_user_intfc_tx (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
- u32 * buffers = vlib_frame_args (frame);
+ u32 *buffers = vlib_frame_args (frame);
u32 n_left = 0;
u16 used_index;
- vhost_user_main_t * vum = &vhost_user_main;
+ vhost_user_main_t *vum = &vhost_user_main;
uword n_packets = 0;
- vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
- vhost_user_intf_t * vui = vec_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
- vhost_user_vring_t * rxvq = &vui->vrings[VHOST_NET_VRING_IDX_RX];
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ vhost_user_intf_t *vui =
+ vec_elt_at_index (vum->vhost_user_interfaces, rd->dev_instance);
+ vhost_user_vring_t *rxvq = &vui->vrings[VHOST_NET_VRING_IDX_RX];
u16 qsz_mask;
u8 error = VHOST_USER_TX_FUNC_ERROR_NONE;
- if (PREDICT_FALSE(!vui->is_up))
- goto done2;
+ if (PREDICT_FALSE (!vui->is_up))
+ goto done2;
- if (PREDICT_FALSE(!rxvq->desc || !rxvq->avail || vui->sock_errno != 0 || !rxvq->enabled)) {
- error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
- goto done2;
- }
+ if (PREDICT_FALSE
+ (!rxvq->desc || !rxvq->avail || vui->sock_errno != 0 || !rxvq->enabled))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
+ goto done2;
+ }
- if (PREDICT_FALSE(vui->lockp != 0))
+ if (PREDICT_FALSE (vui->lockp != 0))
{
while (__sync_lock_test_and_set (vui->lockp, 1))
- ;
+ ;
}
/* only bit 0 of avail.flags is used so we don't want to deal with this
interface if any other bit is set */
- if (PREDICT_FALSE(rxvq->avail->flags & 0xFFFE)) {
- error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
- goto done2;
- }
+ if (PREDICT_FALSE (rxvq->avail->flags & 0xFFFE))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
+ goto done2;
+ }
- if (PREDICT_FALSE((rxvq->avail->idx == rxvq->last_avail_idx))) {
- error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
- goto done2;
- }
+ if (PREDICT_FALSE ((rxvq->avail->idx == rxvq->last_avail_idx)))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done2;
+ }
n_left = n_packets = frame->n_vectors;
used_index = rxvq->used->idx;
- qsz_mask = rxvq->qsz - 1; /* qsz is always power of 2 */
+ qsz_mask = rxvq->qsz - 1; /* qsz is always power of 2 */
while (n_left > 0)
- {
+ {
vlib_buffer_t *b0, *current_b0;
u16 desc_chain_head, desc_current, desc_len;
void *buffer_addr;
uword offset;
if (n_left >= 2)
- vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
b0 = vlib_get_buffer (vm, buffers[0]);
buffers++;
n_left--;
- if (PREDICT_FALSE(rxvq->last_avail_idx == rxvq->avail->idx)) {
- error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
- goto done;
- }
+ if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
- desc_current = desc_chain_head = rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+ desc_current = desc_chain_head =
+ rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
offset = vui->virtio_net_hdr_sz;
desc_len = offset;
- if (PREDICT_FALSE(!(buffer_addr = map_guest_mem(vui, rxvq->desc[desc_current].addr)))) {
- error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
- goto done;
- }
- CLIB_PREFETCH(buffer_addr, clib_min(rxvq->desc[desc_current].len,
- 4*CLIB_CACHE_LINE_BYTES), STORE);
-
- virtio_net_hdr_mrg_rxbuf_t * hdr = (virtio_net_hdr_mrg_rxbuf_t *) buffer_addr;
+ if (PREDICT_FALSE
+ (!(buffer_addr =
+ map_guest_mem (vui, rxvq->desc[desc_current].addr))))
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ CLIB_PREFETCH (buffer_addr, clib_min (rxvq->desc[desc_current].len,
+ 4 * CLIB_CACHE_LINE_BYTES),
+ STORE);
+
+ virtio_net_hdr_mrg_rxbuf_t *hdr =
+ (virtio_net_hdr_mrg_rxbuf_t *) buffer_addr;
hdr->hdr.flags = 0;
hdr->hdr.gso_type = 0;
- vhost_user_log_dirty_pages(vui, rxvq->desc[desc_current].addr, vui->virtio_net_hdr_sz);
+ vhost_user_log_dirty_pages (vui, rxvq->desc[desc_current].addr,
+ vui->virtio_net_hdr_sz);
if (vui->virtio_net_hdr_sz == 12)
- hdr->num_buffers = 1;
+ hdr->num_buffers = 1;
u16 bytes_left = b0->current_length;
buffer_addr += offset;
@@ -1239,134 +1354,167 @@ vhost_user_intfc_tx (vlib_main_t * vm,
//FIXME: This was in the code but I don't think it is valid
/*if (PREDICT_FALSE(!vui->is_any_layout && (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT))) {
- rxvq->desc[desc_current].len = vui->virtio_net_hdr_sz;
- }*/
-
- while(1) {
- if (!bytes_left) { //Get new input
- if (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT) {
- current_b0 = vlib_get_buffer(vm, current_b0->next_buffer);
- bytes_left = current_b0->current_length;
- } else {
- //End of packet
- break;
- }
- }
-
- if (rxvq->desc[desc_current].len <= offset) { //Get new output
- if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) {
- offset = 0;
- desc_current = rxvq->desc[desc_current].next;
- if (PREDICT_FALSE(!(buffer_addr = map_guest_mem(vui, rxvq->desc[desc_current].addr)))) {
- used_index -= hdr->num_buffers - 1;
- rxvq->last_avail_idx -= hdr->num_buffers - 1;
- error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
- goto done;
- }
- } else if (vui->virtio_net_hdr_sz == 12) { //MRG is available
-
- //Move from available to used buffer
- rxvq->used->ring[used_index & qsz_mask].id = desc_chain_head;
- rxvq->used->ring[used_index & qsz_mask].len = desc_len;
- vhost_user_log_dirty_ring(vui, rxvq, ring[used_index & qsz_mask]);
- rxvq->last_avail_idx++;
- used_index++;
- hdr->num_buffers++;
-
- if (PREDICT_FALSE(rxvq->last_avail_idx == rxvq->avail->idx)) {
- //Dequeue queued descriptors for this packet
- used_index -= hdr->num_buffers - 1;
- rxvq->last_avail_idx -= hdr->num_buffers - 1;
- error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
- goto done;
- }
-
- //Look at next one
- desc_chain_head = rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
- desc_current = desc_chain_head;
- desc_len = 0;
- offset = 0;
- if (PREDICT_FALSE(!(buffer_addr = map_guest_mem(vui, rxvq->desc[desc_current].addr)))) {
- //Dequeue queued descriptors for this packet
- used_index -= hdr->num_buffers - 1;
- rxvq->last_avail_idx -= hdr->num_buffers - 1;
- error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
- goto done;
- }
- } else {
- error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
- goto done;
- }
- }
-
- u16 bytes_to_copy = bytes_left > (rxvq->desc[desc_current].len - offset) ? (rxvq->desc[desc_current].len - offset) : bytes_left;
- clib_memcpy(buffer_addr, vlib_buffer_get_current (current_b0) + current_b0->current_length - bytes_left, bytes_to_copy);
-
- vhost_user_log_dirty_pages(vui, rxvq->desc[desc_current].addr + offset, bytes_to_copy);
- bytes_left -= bytes_to_copy;
- offset += bytes_to_copy;
- buffer_addr += bytes_to_copy;
- desc_len += bytes_to_copy;
- }
+ rxvq->desc[desc_current].len = vui->virtio_net_hdr_sz;
+ } */
+
+ while (1)
+ {
+ if (!bytes_left)
+ { //Get new input
+ if (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
+ bytes_left = current_b0->current_length;
+ }
+ else
+ {
+ //End of packet
+ break;
+ }
+ }
+
+ if (rxvq->desc[desc_current].len <= offset)
+ { //Get new output
+ if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
+ {
+ offset = 0;
+ desc_current = rxvq->desc[desc_current].next;
+ if (PREDICT_FALSE
+ (!(buffer_addr =
+ map_guest_mem (vui, rxvq->desc[desc_current].addr))))
+ {
+ used_index -= hdr->num_buffers - 1;
+ rxvq->last_avail_idx -= hdr->num_buffers - 1;
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ }
+ else if (vui->virtio_net_hdr_sz == 12)
+ { //MRG is available
+
+ //Move from available to used buffer
+ rxvq->used->ring[used_index & qsz_mask].id =
+ desc_chain_head;
+ rxvq->used->ring[used_index & qsz_mask].len = desc_len;
+ vhost_user_log_dirty_ring (vui, rxvq,
+ ring[used_index & qsz_mask]);
+ rxvq->last_avail_idx++;
+ used_index++;
+ hdr->num_buffers++;
+
+ if (PREDICT_FALSE
+ (rxvq->last_avail_idx == rxvq->avail->idx))
+ {
+ //Dequeue queued descriptors for this packet
+ used_index -= hdr->num_buffers - 1;
+ rxvq->last_avail_idx -= hdr->num_buffers - 1;
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+
+ //Look at next one
+ desc_chain_head =
+ rxvq->avail->ring[rxvq->last_avail_idx & qsz_mask];
+ desc_current = desc_chain_head;
+ desc_len = 0;
+ offset = 0;
+ if (PREDICT_FALSE
+ (!(buffer_addr =
+ map_guest_mem (vui, rxvq->desc[desc_current].addr))))
+ {
+ //Dequeue queued descriptors for this packet
+ used_index -= hdr->num_buffers - 1;
+ rxvq->last_avail_idx -= hdr->num_buffers - 1;
+ error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
+ goto done;
+ }
+ }
+ else
+ {
+ error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
+ goto done;
+ }
+ }
+
+ u16 bytes_to_copy =
+ bytes_left >
+ (rxvq->desc[desc_current].len -
+ offset) ? (rxvq->desc[desc_current].len - offset) : bytes_left;
+ clib_memcpy (buffer_addr,
+ vlib_buffer_get_current (current_b0) +
+ current_b0->current_length - bytes_left,
+ bytes_to_copy);
+
+ vhost_user_log_dirty_pages (vui,
+ rxvq->desc[desc_current].addr + offset,
+ bytes_to_copy);
+ bytes_left -= bytes_to_copy;
+ offset += bytes_to_copy;
+ buffer_addr += bytes_to_copy;
+ desc_len += bytes_to_copy;
+ }
//Move from available to used ring
rxvq->used->ring[used_index & qsz_mask].id = desc_chain_head;
rxvq->used->ring[used_index & qsz_mask].len = desc_len;
- vhost_user_log_dirty_ring(vui, rxvq, ring[used_index & qsz_mask]);
+ vhost_user_log_dirty_ring (vui, rxvq, ring[used_index & qsz_mask]);
rxvq->last_avail_idx++;
used_index++;
- }
+ }
done:
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
rxvq->used->idx = used_index;
- vhost_user_log_dirty_ring(vui, rxvq, idx);
+ vhost_user_log_dirty_ring (vui, rxvq, idx);
/* interrupt (call) handling */
- if((rxvq->callfd > 0) && !(rxvq->avail->flags & 1)) {
- rxvq->n_since_last_int += n_packets - n_left;
+ if ((rxvq->callfd > 0) && !(rxvq->avail->flags & 1))
+ {
+ rxvq->n_since_last_int += n_packets - n_left;
- if(rxvq->n_since_last_int > vum->coalesce_frames)
- vhost_user_send_call(vm, rxvq);
- }
+ if (rxvq->n_since_last_int > vum->coalesce_frames)
+ vhost_user_send_call (vm, rxvq);
+ }
done2:
- if (PREDICT_FALSE(vui->lockp != 0))
- *vui->lockp = 0;
-
- if (PREDICT_FALSE(n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE)) {
- vlib_error_count(vm, node->node_index, error, n_left);
- vlib_increment_simple_counter
- (vnet_main.interface_main.sw_if_counters
- + VNET_INTERFACE_COUNTER_DROP,
- os_get_cpu_number(),
- vui->sw_if_index,
- n_left);
- }
+ if (PREDICT_FALSE (vui->lockp != 0))
+ *vui->lockp = 0;
+
+ if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
+ {
+ vlib_error_count (vm, node->node_index, error, n_left);
+ vlib_increment_simple_counter
+ (vnet_main.interface_main.sw_if_counters
+ + VNET_INTERFACE_COUNTER_DROP,
+ os_get_cpu_number (), vui->sw_if_index, n_left);
+ }
vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
return frame->n_vectors;
}
static clib_error_t *
-vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+vhost_user_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
+ u32 flags)
{
- vnet_hw_interface_t * hif = vnet_get_hw_interface (vnm, hw_if_index);
+ vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui = vec_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui =
+ vec_elt_at_index (vum->vhost_user_interfaces, hif->dev_instance);
vui->admin_up = is_up;
if (is_up)
vnet_hw_interface_set_flags (vnm, vui->hw_if_index,
- VNET_HW_INTERFACE_FLAG_LINK_UP);
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
return /* no error */ 0;
}
+/* *INDENT-OFF* */
VNET_DEVICE_CLASS (vhost_user_dev_class,static) = {
.name = "vhost-user",
.tx_function = vhost_user_intfc_tx,
@@ -1380,100 +1528,116 @@ VNET_DEVICE_CLASS (vhost_user_dev_class,static) = {
VLIB_DEVICE_TX_FUNCTION_MULTIARCH (vhost_user_dev_class,
vhost_user_intfc_tx)
+/* *INDENT-ON* */
static uword
vhost_user_process (vlib_main_t * vm,
- vlib_node_runtime_t * rt,
- vlib_frame_t * f)
+ vlib_node_runtime_t * rt, vlib_frame_t * f)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
- struct sockaddr_un sun;
- int sockfd;
- unix_file_t template = {0};
- f64 timeout = 3153600000.0 /* 100 years */;
- uword *event_data = 0;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ struct sockaddr_un sun;
+ int sockfd;
+ unix_file_t template = { 0 };
+ f64 timeout = 3153600000.0 /* 100 years */ ;
+ uword *event_data = 0;
+
+ sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
+ sun.sun_family = AF_UNIX;
+ template.read_function = vhost_user_socket_read;
+ template.error_function = vhost_user_socket_error;
- sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
- sun.sun_family = AF_UNIX;
- template.read_function = vhost_user_socket_read;
- template.error_function = vhost_user_socket_error;
+ if (sockfd < 0)
+ return 0;
- if (sockfd < 0)
- return 0;
+ while (1)
+ {
+ vlib_process_wait_for_event_or_clock (vm, timeout);
+ vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+
+ timeout = 3.0;
- while (1) {
- vlib_process_wait_for_event_or_clock (vm, timeout);
- vlib_process_get_events (vm, &event_data);
- vec_reset_length (event_data);
-
- timeout = 3.0;
-
- vec_foreach (vui, vum->vhost_user_interfaces) {
-
- if (vui->sock_is_server || !vui->active)
- continue;
-
- if (vui->unix_fd == -1) {
- /* try to connect */
-
- strncpy(sun.sun_path, (char *) vui->sock_filename, sizeof(sun.sun_path) - 1);
-
- if (connect(sockfd, (struct sockaddr *) &sun, sizeof(struct sockaddr_un)) == 0) {
- vui->sock_errno = 0;
- vui->unix_fd = sockfd;
- template.file_descriptor = sockfd;
- vui->unix_file_index = unix_file_add (&unix_main, &template);
- hash_set (vum->vhost_user_interface_index_by_sock_fd, sockfd, vui - vum->vhost_user_interfaces);
-
- sockfd = socket(AF_UNIX, SOCK_STREAM, 0);
- if (sockfd < 0)
- return 0;
- }
- else {
- vui->sock_errno = errno;
- }
- } else {
- /* check if socket is alive */
- int error = 0;
- socklen_t len = sizeof (error);
- int retval = getsockopt(vui->unix_fd, SOL_SOCKET, SO_ERROR, &error, &len);
-
- if (retval)
- vhost_user_if_disconnect(vui);
- }
- }
+ vec_foreach (vui, vum->vhost_user_interfaces)
+ {
+
+ if (vui->sock_is_server || !vui->active)
+ continue;
+
+ if (vui->unix_fd == -1)
+ {
+ /* try to connect */
+
+ strncpy (sun.sun_path, (char *) vui->sock_filename,
+ sizeof (sun.sun_path) - 1);
+
+ if (connect
+ (sockfd, (struct sockaddr *) &sun,
+ sizeof (struct sockaddr_un)) == 0)
+ {
+ vui->sock_errno = 0;
+ vui->unix_fd = sockfd;
+ template.file_descriptor = sockfd;
+ vui->unix_file_index = unix_file_add (&unix_main, &template);
+ hash_set (vum->vhost_user_interface_index_by_sock_fd, sockfd,
+ vui - vum->vhost_user_interfaces);
+
+ sockfd = socket (AF_UNIX, SOCK_STREAM, 0);
+ if (sockfd < 0)
+ return 0;
+ }
+ else
+ {
+ vui->sock_errno = errno;
+ }
+ }
+ else
+ {
+ /* check if socket is alive */
+ int error = 0;
+ socklen_t len = sizeof (error);
+ int retval =
+ getsockopt (vui->unix_fd, SOL_SOCKET, SO_ERROR, &error, &len);
+
+ if (retval)
+ vhost_user_if_disconnect (vui);
+ }
+ }
}
- return 0;
+ return 0;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vhost_user_process_node,static) = {
.function = vhost_user_process,
.type = VLIB_NODE_TYPE_PROCESS,
.name = "vhost-user-process",
};
+/* *INDENT-ON* */
-int vhost_user_delete_if(vnet_main_t * vnm, vlib_main_t * vm,
- u32 sw_if_index)
+int
+vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
uword *p = NULL;
int rv = 0;
- p = hash_get (vum->vhost_user_interface_index_by_sw_if_index,
- sw_if_index);
- if (p == 0) {
- return VNET_API_ERROR_INVALID_SW_IF_INDEX;
- } else {
- vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
- }
+ p = hash_get (vum->vhost_user_interface_index_by_sw_if_index, sw_if_index);
+ if (p == 0)
+ {
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ }
+ else
+ {
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
+ }
// interface is inactive
vui->active = 0;
// disconnect interface sockets
- vhost_user_if_disconnect(vui);
+ vhost_user_if_disconnect (vui);
// add to inactive interface list
vec_add1 (vum->vhost_user_inactive_interfaces_index, p[0]);
@@ -1488,35 +1652,40 @@ int vhost_user_delete_if(vnet_main_t * vnm, vlib_main_t * vm,
}
// init server socket on specified sock_filename
-static int vhost_user_init_server_sock(const char * sock_filename, int *sockfd)
+static int
+vhost_user_init_server_sock (const char *sock_filename, int *sockfd)
{
int rv = 0;
- struct sockaddr_un un = {};
+ struct sockaddr_un un = { };
int fd;
/* create listening socket */
- fd = socket(AF_UNIX, SOCK_STREAM, 0);
+ fd = socket (AF_UNIX, SOCK_STREAM, 0);
- if (fd < 0) {
- return VNET_API_ERROR_SYSCALL_ERROR_1;
- }
+ if (fd < 0)
+ {
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
un.sun_family = AF_UNIX;
- strncpy((char *) un.sun_path, (char *) sock_filename, sizeof(un.sun_path) - 1);
+ strncpy ((char *) un.sun_path, (char *) sock_filename,
+ sizeof (un.sun_path) - 1);
/* remove if exists */
- unlink( (char *) sock_filename);
+ unlink ((char *) sock_filename);
- if (bind(fd, (struct sockaddr *) &un, sizeof(un)) == -1) {
- rv = VNET_API_ERROR_SYSCALL_ERROR_2;
- goto error;
- }
+ if (bind (fd, (struct sockaddr *) &un, sizeof (un)) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_2;
+ goto error;
+ }
- if (listen(fd, 1) == -1) {
- rv = VNET_API_ERROR_SYSCALL_ERROR_3;
- goto error;
- }
+ if (listen (fd, 1) == -1)
+ {
+ rv = VNET_API_ERROR_SYSCALL_ERROR_3;
+ goto error;
+ }
- unix_file_t template = {0};
+ unix_file_t template = { 0 };
template.read_function = vhost_user_socksvr_accept_ready;
template.file_descriptor = fd;
unix_file_add (&unix_main, &template);
@@ -1524,27 +1693,32 @@ static int vhost_user_init_server_sock(const char * sock_filename, int *sockfd)
return rv;
error:
- close(fd);
+ close (fd);
return rv;
}
// get new vhost_user_intf_t from inactive interfaces or create new one
-static vhost_user_intf_t *vhost_user_vui_new()
+static vhost_user_intf_t *
+vhost_user_vui_new ()
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui = NULL;
- int inactive_cnt = vec_len(vum->vhost_user_inactive_interfaces_index);
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui = NULL;
+ int inactive_cnt = vec_len (vum->vhost_user_inactive_interfaces_index);
// if there are any inactive ifaces
- if (inactive_cnt > 0) {
- // take last
- u32 vui_idx = vum->vhost_user_inactive_interfaces_index[inactive_cnt - 1];
- if (vec_len(vum->vhost_user_interfaces) > vui_idx) {
- vui = vec_elt_at_index (vum->vhost_user_interfaces, vui_idx);
- DBG_SOCK("reusing inactive vhost-user interface index %d", vui_idx);
+ if (inactive_cnt > 0)
+ {
+ // take last
+ u32 vui_idx =
+ vum->vhost_user_inactive_interfaces_index[inactive_cnt - 1];
+ if (vec_len (vum->vhost_user_interfaces) > vui_idx)
+ {
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, vui_idx);
+ DBG_SOCK ("reusing inactive vhost-user interface index %d",
+ vui_idx);
+ }
+ // "remove" from inactive list
+ _vec_len (vum->vhost_user_inactive_interfaces_index) -= 1;
}
- // "remove" from inactive list
- _vec_len(vum->vhost_user_inactive_interfaces_index) -= 1;
- }
// vui was not retrieved from inactive ifaces - create new
if (!vui)
@@ -1553,34 +1727,37 @@ static vhost_user_intf_t *vhost_user_vui_new()
}
// create ethernet interface for vhost user intf
-static void vhost_user_create_ethernet(vnet_main_t * vnm, vlib_main_t * vm,
- vhost_user_intf_t *vui, u8 *hwaddress)
+static void
+vhost_user_create_ethernet (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_t * vui, u8 * hwaddress)
{
- vhost_user_main_t * vum = &vhost_user_main;
+ vhost_user_main_t *vum = &vhost_user_main;
u8 hwaddr[6];
- clib_error_t * error;
+ clib_error_t *error;
/* create hw and sw interface */
- if (hwaddress) {
- clib_memcpy(hwaddr, hwaddress, 6);
- } else {
- f64 now = vlib_time_now(vm);
- u32 rnd;
- rnd = (u32) (now * 1e6);
- rnd = random_u32 (&rnd);
-
- clib_memcpy (hwaddr+2, &rnd, sizeof(rnd));
- hwaddr[0] = 2;
- hwaddr[1] = 0xfe;
- }
+ if (hwaddress)
+ {
+ clib_memcpy (hwaddr, hwaddress, 6);
+ }
+ else
+ {
+ f64 now = vlib_time_now (vm);
+ u32 rnd;
+ rnd = (u32) (now * 1e6);
+ rnd = random_u32 (&rnd);
+
+ clib_memcpy (hwaddr + 2, &rnd, sizeof (rnd));
+ hwaddr[0] = 2;
+ hwaddr[1] = 0xfe;
+ }
error = ethernet_register_interface
(vnm,
vhost_user_dev_class.index,
- vui - vum->vhost_user_interfaces /* device instance */,
- hwaddr /* ethernet address */,
- &vui->hw_if_index,
- 0 /* flag change */);
+ vui - vum->vhost_user_interfaces /* device instance */ ,
+ hwaddr /* ethernet address */ ,
+ &vui->hw_if_index, 0 /* flag change */ );
if (error)
clib_error_report (error);
@@ -1589,22 +1766,23 @@ static void vhost_user_create_ethernet(vnet_main_t * vnm, vlib_main_t * vm,
}
// initialize vui with specified attributes
-static void vhost_user_vui_init(vnet_main_t * vnm,
- vhost_user_intf_t *vui, int sockfd,
- const char * sock_filename,
- u8 is_server, u64 feature_mask,
- u32 * sw_if_index)
+static void
+vhost_user_vui_init (vnet_main_t * vnm,
+ vhost_user_intf_t * vui, int sockfd,
+ const char *sock_filename,
+ u8 is_server, u64 feature_mask, u32 * sw_if_index)
{
- vnet_sw_interface_t * sw;
+ vnet_sw_interface_t *sw;
sw = vnet_get_hw_sw_interface (vnm, vui->hw_if_index);
- vlib_thread_main_t * tm = vlib_get_thread_main();
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
int q;
vui->unix_fd = sockfd;
vui->sw_if_index = sw->sw_if_index;
vui->num_vrings = 2;
vui->sock_is_server = is_server;
- strncpy(vui->sock_filename, sock_filename, ARRAY_LEN(vui->sock_filename)-1);
+ strncpy (vui->sock_filename, sock_filename,
+ ARRAY_LEN (vui->sock_filename) - 1);
vui->sock_errno = 0;
vui->is_up = 0;
vui->feature_mask = feature_mask;
@@ -1612,128 +1790,138 @@ static void vhost_user_vui_init(vnet_main_t * vnm,
vui->unix_file_index = ~0;
vui->log_base_addr = 0;
- for (q = 0; q < 2; q++) {
- vui->vrings[q].enabled = 0;
- }
+ for (q = 0; q < 2; q++)
+ {
+ vui->vrings[q].enabled = 0;
+ }
- vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
+ vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
if (sw_if_index)
- *sw_if_index = vui->sw_if_index;
+ *sw_if_index = vui->sw_if_index;
if (tm->n_vlib_mains > 1)
- {
- vui->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- memset ((void *) vui->lockp, 0, CLIB_CACHE_LINE_BYTES);
- }
+ {
+ vui->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
+ memset ((void *) vui->lockp, 0, CLIB_CACHE_LINE_BYTES);
+ }
}
// register vui and start polling on it
-static void vhost_user_vui_register(vlib_main_t * vm, vhost_user_intf_t *vui)
+static void
+vhost_user_vui_register (vlib_main_t * vm, vhost_user_intf_t * vui)
{
- vhost_user_main_t * vum = &vhost_user_main;
- dpdk_main_t * dm = &dpdk_main;
+ vhost_user_main_t *vum = &vhost_user_main;
+ dpdk_main_t *dm = &dpdk_main;
int cpu_index;
- vlib_thread_main_t * tm = vlib_get_thread_main();
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
hash_set (vum->vhost_user_interface_index_by_listener_fd, vui->unix_fd,
- vui - vum->vhost_user_interfaces);
+ vui - vum->vhost_user_interfaces);
hash_set (vum->vhost_user_interface_index_by_sw_if_index, vui->sw_if_index,
- vui - vum->vhost_user_interfaces);
+ vui - vum->vhost_user_interfaces);
/* start polling */
cpu_index = dm->input_cpu_first_index +
- (vui - vum->vhost_user_interfaces) % dm->input_cpu_count;
+ (vui - vum->vhost_user_interfaces) % dm->input_cpu_count;
if (tm->n_vlib_mains == 1)
vlib_node_set_state (vm, vhost_user_input_node.index,
- VLIB_NODE_STATE_POLLING);
+ VLIB_NODE_STATE_POLLING);
else
vlib_node_set_state (vlib_mains[cpu_index], vhost_user_input_node.index,
- VLIB_NODE_STATE_POLLING);
+ VLIB_NODE_STATE_POLLING);
/* tell process to start polling for sockets */
- vlib_process_signal_event(vm, vhost_user_process_node.index, 0, 0);
+ vlib_process_signal_event (vm, vhost_user_process_node.index, 0, 0);
}
-int vhost_user_create_if(vnet_main_t * vnm, vlib_main_t * vm,
- const char * sock_filename,
- u8 is_server,
- u32 * sw_if_index,
- u64 feature_mask,
- u8 renumber, u32 custom_dev_instance,
- u8 *hwaddr)
+int
+vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 * sw_if_index,
+ u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr)
{
- vhost_user_intf_t * vui = NULL;
+ vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
int sockfd = -1;
int rv = 0;
- if (is_server) {
- if ((rv = vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
- return rv;
- }
- }
+ if (is_server)
+ {
+ if ((rv = vhost_user_init_server_sock (sock_filename, &sockfd)) != 0)
+ {
+ return rv;
+ }
+ }
vui = vhost_user_vui_new ();
- ASSERT(vui != NULL);
+ ASSERT (vui != NULL);
vhost_user_create_ethernet (vnm, vm, vui, hwaddr);
vhost_user_vui_init (vnm, vui, sockfd, sock_filename, is_server,
- feature_mask, &sw_if_idx);
+ feature_mask, &sw_if_idx);
- if (renumber) {
- vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
- }
+ if (renumber)
+ {
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+ }
vhost_user_vui_register (vm, vui);
if (sw_if_index)
- *sw_if_index = sw_if_idx;
+ *sw_if_index = sw_if_idx;
return rv;
}
-int vhost_user_modify_if(vnet_main_t * vnm, vlib_main_t * vm,
- const char * sock_filename,
- u8 is_server,
- u32 sw_if_index,
- u64 feature_mask,
- u8 renumber, u32 custom_dev_instance)
+int
+vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename,
+ u8 is_server,
+ u32 sw_if_index,
+ u64 feature_mask, u8 renumber, u32 custom_dev_instance)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui = NULL;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui = NULL;
u32 sw_if_idx = ~0;
int sockfd = -1;
int rv = 0;
uword *p = NULL;
- p = hash_get (vum->vhost_user_interface_index_by_sw_if_index,
- sw_if_index);
- if (p == 0) {
- return VNET_API_ERROR_INVALID_SW_IF_INDEX;
- } else {
- vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
- }
+ p = hash_get (vum->vhost_user_interface_index_by_sw_if_index, sw_if_index);
+ if (p == 0)
+ {
+ return VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ }
+ else
+ {
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, p[0]);
+ }
// interface is inactive
vui->active = 0;
// disconnect interface sockets
- vhost_user_if_disconnect(vui);
+ vhost_user_if_disconnect (vui);
- if (is_server) {
- if ((rv = vhost_user_init_server_sock (sock_filename, &sockfd)) != 0) {
- return rv;
- }
- }
+ if (is_server)
+ {
+ if ((rv = vhost_user_init_server_sock (sock_filename, &sockfd)) != 0)
+ {
+ return rv;
+ }
+ }
vhost_user_vui_init (vnm, vui, sockfd, sock_filename, is_server,
- feature_mask, &sw_if_idx);
+ feature_mask, &sw_if_idx);
- if (renumber) {
- vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
- }
+ if (renumber)
+ {
+ vnet_interface_name_renumber (sw_if_idx, custom_dev_instance);
+ }
vhost_user_vui_register (vm, vui);
@@ -1742,124 +1930,135 @@ int vhost_user_modify_if(vnet_main_t * vnm, vlib_main_t * vm,
clib_error_t *
vhost_user_connect_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
- u8 * sock_filename = NULL;
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u8 *sock_filename = NULL;
u32 sw_if_index;
u8 is_server = 0;
- u64 feature_mask = (u64)~0;
+ u64 feature_mask = (u64) ~ 0;
u8 renumber = 0;
u32 custom_dev_instance = ~0;
u8 hwaddr[6];
u8 *hw = NULL;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
- if (unformat (line_input, "socket %s", &sock_filename))
- ;
- else if (unformat (line_input, "server"))
- is_server = 1;
- else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
- ;
- else if (unformat (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
- hw = hwaddr;
- else if (unformat (line_input, "renumber %d", &custom_dev_instance)) {
- renumber = 1;
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "socket %s", &sock_filename))
+ ;
+ else if (unformat (line_input, "server"))
+ is_server = 1;
+ else if (unformat (line_input, "feature-mask 0x%llx", &feature_mask))
+ ;
+ else
+ if (unformat
+ (line_input, "hwaddr %U", unformat_ethernet_address, hwaddr))
+ hw = hwaddr;
+ else if (unformat (line_input, "renumber %d", &custom_dev_instance))
+ {
+ renumber = 1;
+ }
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
- else
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
- }
unformat_free (line_input);
- vnet_main_t *vnm = vnet_get_main();
+ vnet_main_t *vnm = vnet_get_main ();
int rv;
- if ((rv = vhost_user_create_if(vnm, vm, (char *)sock_filename,
- is_server, &sw_if_index, feature_mask,
- renumber, custom_dev_instance, hw))) {
- vec_free(sock_filename);
+ if ((rv = vhost_user_create_if (vnm, vm, (char *) sock_filename,
+ is_server, &sw_if_index, feature_mask,
+ renumber, custom_dev_instance, hw)))
+ {
+ vec_free (sock_filename);
return clib_error_return (0, "vhost_user_create_if returned %d", rv);
- }
+ }
- vec_free(sock_filename);
- vlib_cli_output(vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main(), sw_if_index);
+ vec_free (sock_filename);
+ vlib_cli_output (vm, "%U\n", format_vnet_sw_if_index_name, vnet_get_main (),
+ sw_if_index);
return 0;
}
clib_error_t *
vhost_user_delete_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
{
- unformat_input_t _line_input, * line_input = &_line_input;
+ unformat_input_t _line_input, *line_input = &_line_input;
u32 sw_if_index = ~0;
/* Get a line of input. */
- if (! unformat_user (input, unformat_line_input, line_input))
+ if (!unformat_user (input, unformat_line_input, line_input))
return 0;
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT) {
- if (unformat (line_input, "sw_if_index %d", &sw_if_index))
- ;
- else
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
- }
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
unformat_free (line_input);
- vnet_main_t *vnm = vnet_get_main();
+ vnet_main_t *vnm = vnet_get_main ();
- vhost_user_delete_if(vnm, vm, sw_if_index);
+ vhost_user_delete_if (vnm, vm, sw_if_index);
return 0;
}
-int vhost_user_dump_ifs(vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_details_t **out_vuids)
+int
+vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids)
{
int rv = 0;
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
- vhost_user_intf_details_t * r_vuids = NULL;
- vhost_user_intf_details_t * vuid = NULL;
- u32 * hw_if_indices = 0;
- vnet_hw_interface_t * hi;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ vhost_user_intf_details_t *r_vuids = NULL;
+ vhost_user_intf_details_t *vuid = NULL;
+ u32 *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
u8 *s = NULL;
int i;
if (!out_vuids)
- return -1;
+ return -1;
- vec_foreach (vui, vum->vhost_user_interfaces) {
+ vec_foreach (vui, vum->vhost_user_interfaces)
+ {
if (vui->active)
- vec_add1(hw_if_indices, vui->hw_if_index);
+ vec_add1 (hw_if_indices, vui->hw_if_index);
}
- for (i = 0; i < vec_len (hw_if_indices); i++) {
- hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
- vui = vec_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
-
- vec_add2(r_vuids, vuid, 1);
- vuid->sw_if_index = vui->sw_if_index;
- vuid->virtio_net_hdr_sz = vui->virtio_net_hdr_sz;
- vuid->features = vui->features;
- vuid->is_server = vui->sock_is_server;
- vuid->num_regions = vui->nregions;
- vuid->sock_errno = vui->sock_errno;
- strncpy((char *)vuid->sock_filename, (char *)vui->sock_filename,
- ARRAY_LEN(vuid->sock_filename)-1);
-
- s = format (s, "%v%c", hi->name, 0);
-
- strncpy((char *)vuid->if_name, (char *)s,
- ARRAY_LEN(vuid->if_name)-1);
- _vec_len(s) = 0;
- }
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+
+ vec_add2 (r_vuids, vuid, 1);
+ vuid->sw_if_index = vui->sw_if_index;
+ vuid->virtio_net_hdr_sz = vui->virtio_net_hdr_sz;
+ vuid->features = vui->features;
+ vuid->is_server = vui->sock_is_server;
+ vuid->num_regions = vui->nregions;
+ vuid->sock_errno = vui->sock_errno;
+ strncpy ((char *) vuid->sock_filename, (char *) vui->sock_filename,
+ ARRAY_LEN (vuid->sock_filename) - 1);
+
+ s = format (s, "%v%c", hi->name, 0);
+
+ strncpy ((char *) vuid->if_name, (char *) s,
+ ARRAY_LEN (vuid->if_name) - 1);
+ _vec_len (s) = 0;
+ }
vec_free (s);
vec_free (hw_if_indices);
@@ -1871,124 +2070,152 @@ int vhost_user_dump_ifs(vnet_main_t * vnm, vlib_main_t * vm, vhost_user_intf_det
clib_error_t *
show_vhost_user_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
{
- clib_error_t * error = 0;
- vnet_main_t * vnm = vnet_get_main();
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
- u32 hw_if_index, * hw_if_indices = 0;
- vnet_hw_interface_t * hi;
+ clib_error_t *error = 0;
+ vnet_main_t *vnm = vnet_get_main ();
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
+ u32 hw_if_index, *hw_if_indices = 0;
+ vnet_hw_interface_t *hi;
int i, j, q;
int show_descr = 0;
- struct feat_struct { u8 bit; char *str;};
+ struct feat_struct
+ {
+ u8 bit;
+ char *str;
+ };
struct feat_struct *feat_entry;
static struct feat_struct feat_array[] = {
#define _(s,b) { .str = #s, .bit = b, },
- foreach_virtio_net_feature
+ foreach_virtio_net_feature
#undef _
- { .str = NULL }
+ {.str = NULL}
};
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
- if (unformat (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index)) {
- vec_add1 (hw_if_indices, hw_if_index);
- vlib_cli_output(vm, "add %d", hw_if_index);
- }
- else if (unformat (input, "descriptors") || unformat (input, "desc") )
- show_descr = 1;
- else {
- error = clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
- goto done;
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat
+ (input, "%U", unformat_vnet_hw_interface, vnm, &hw_if_index))
+ {
+ vec_add1 (hw_if_indices, hw_if_index);
+ vlib_cli_output (vm, "add %d", hw_if_index);
+ }
+ else if (unformat (input, "descriptors") || unformat (input, "desc"))
+ show_descr = 1;
+ else
+ {
+ error = clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ goto done;
+ }
}
- }
- if (vec_len (hw_if_indices) == 0) {
- vec_foreach (vui, vum->vhost_user_interfaces) {
- if (vui->active)
- vec_add1(hw_if_indices, vui->hw_if_index);
+ if (vec_len (hw_if_indices) == 0)
+ {
+ vec_foreach (vui, vum->vhost_user_interfaces)
+ {
+ if (vui->active)
+ vec_add1 (hw_if_indices, vui->hw_if_index);
+ }
}
- }
vlib_cli_output (vm, "Virtio vhost-user interfaces");
vlib_cli_output (vm, "Global:\n coalesce frames %d time %e\n\n",
- vum->coalesce_frames, vum->coalesce_time);
-
- for (i = 0; i < vec_len (hw_if_indices); i++) {
- hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
- vui = vec_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
- vlib_cli_output (vm, "Interface: %s (ifindex %d)",
- hi->name, hw_if_indices[i]);
-
- vlib_cli_output (vm, "virtio_net_hdr_sz %d\n features (0x%llx): \n",
- vui->virtio_net_hdr_sz, vui->features);
-
- feat_entry = (struct feat_struct *) &feat_array;
- while(feat_entry->str) {
- if (vui->features & (1 << feat_entry->bit))
- vlib_cli_output (vm, " %s (%d)", feat_entry->str, feat_entry->bit);
- feat_entry++;
- }
-
- vlib_cli_output (vm, "\n");
-
+ vum->coalesce_frames, vum->coalesce_time);
- vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
- vui->sock_filename, vui->sock_is_server ? "server" : "client",
- strerror(vui->sock_errno));
-
- vlib_cli_output (vm, " Memory regions (total %d)\n", vui->nregions);
-
- if (vui->nregions){
- vlib_cli_output(vm, " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
- vlib_cli_output(vm, " ====== ===== ================== ================== ================== ================== ==================\n");
- }
- for (j = 0; j < vui->nregions; j++) {
- vlib_cli_output(vm, " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n", j,
- vui->region_mmap_fd[j],
- vui->regions[j].guest_phys_addr,
- vui->regions[j].memory_size,
- vui->regions[j].userspace_addr,
- vui->regions[j].mmap_offset,
- pointer_to_uword( vui->region_mmap_addr[j]) );
- }
- for (q = 0; q < vui->num_vrings; q++) {
- vlib_cli_output(vm, "\n Virtqueue %d\n", q);
-
- vlib_cli_output(vm, " qsz %d last_avail_idx %d last_used_idx %d\n",
- vui->vrings[q].qsz,
- vui->vrings[q].last_avail_idx,
- vui->vrings[q].last_used_idx);
-
- if (vui->vrings[q].avail && vui->vrings[q].used)
- vlib_cli_output(vm, " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
- vui->vrings[q].avail->flags,
- vui->vrings[q].avail->idx,
- vui->vrings[q].used->flags,
- vui->vrings[q].used->idx);
-
- vlib_cli_output(vm, " kickfd %d callfd %d errfd %d\n",
- vui->vrings[q].kickfd,
- vui->vrings[q].callfd,
- vui->vrings[q].errfd);
-
- if (show_descr) {
- vlib_cli_output(vm, "\n descriptor table:\n");
- vlib_cli_output(vm, " id addr len flags next user_addr\n");
- vlib_cli_output(vm, " ===== ================== ===== ====== ===== ==================\n");
- for(j = 0; j < vui->vrings[q].qsz; j++) {
- vlib_cli_output(vm, " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
- j,
- vui->vrings[q].desc[j].addr,
- vui->vrings[q].desc[j].len,
- vui->vrings[q].desc[j].flags,
- vui->vrings[q].desc[j].next,
- pointer_to_uword(map_guest_mem(vui, vui->vrings[q].desc[j].addr)));}
- }
+ for (i = 0; i < vec_len (hw_if_indices); i++)
+ {
+ hi = vnet_get_hw_interface (vnm, hw_if_indices[i]);
+ vui = vec_elt_at_index (vum->vhost_user_interfaces, hi->dev_instance);
+ vlib_cli_output (vm, "Interface: %s (ifindex %d)",
+ hi->name, hw_if_indices[i]);
+
+ vlib_cli_output (vm, "virtio_net_hdr_sz %d\n features (0x%llx): \n",
+ vui->virtio_net_hdr_sz, vui->features);
+
+ feat_entry = (struct feat_struct *) &feat_array;
+ while (feat_entry->str)
+ {
+ if (vui->features & (1 << feat_entry->bit))
+ vlib_cli_output (vm, " %s (%d)", feat_entry->str,
+ feat_entry->bit);
+ feat_entry++;
+ }
+
+ vlib_cli_output (vm, "\n");
+
+
+ vlib_cli_output (vm, " socket filename %s type %s errno \"%s\"\n\n",
+ vui->sock_filename,
+ vui->sock_is_server ? "server" : "client",
+ strerror (vui->sock_errno));
+
+ vlib_cli_output (vm, " Memory regions (total %d)\n", vui->nregions);
+
+ if (vui->nregions)
+ {
+ vlib_cli_output (vm,
+ " region fd guest_phys_addr memory_size userspace_addr mmap_offset mmap_addr\n");
+ vlib_cli_output (vm,
+ " ====== ===== ================== ================== ================== ================== ==================\n");
+ }
+ for (j = 0; j < vui->nregions; j++)
+ {
+ vlib_cli_output (vm,
+ " %d %-5d 0x%016lx 0x%016lx 0x%016lx 0x%016lx 0x%016lx\n",
+ j, vui->region_mmap_fd[j],
+ vui->regions[j].guest_phys_addr,
+ vui->regions[j].memory_size,
+ vui->regions[j].userspace_addr,
+ vui->regions[j].mmap_offset,
+ pointer_to_uword (vui->region_mmap_addr[j]));
+ }
+ for (q = 0; q < vui->num_vrings; q++)
+ {
+ vlib_cli_output (vm, "\n Virtqueue %d\n", q);
+
+ vlib_cli_output (vm,
+ " qsz %d last_avail_idx %d last_used_idx %d\n",
+ vui->vrings[q].qsz, vui->vrings[q].last_avail_idx,
+ vui->vrings[q].last_used_idx);
+
+ if (vui->vrings[q].avail && vui->vrings[q].used)
+ vlib_cli_output (vm,
+ " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
+ vui->vrings[q].avail->flags,
+ vui->vrings[q].avail->idx,
+ vui->vrings[q].used->flags,
+ vui->vrings[q].used->idx);
+
+ vlib_cli_output (vm, " kickfd %d callfd %d errfd %d\n",
+ vui->vrings[q].kickfd,
+ vui->vrings[q].callfd, vui->vrings[q].errfd);
+
+ if (show_descr)
+ {
+ vlib_cli_output (vm, "\n descriptor table:\n");
+ vlib_cli_output (vm,
+ " id addr len flags next user_addr\n");
+ vlib_cli_output (vm,
+ " ===== ================== ===== ====== ===== ==================\n");
+ for (j = 0; j < vui->vrings[q].qsz; j++)
+ {
+ vlib_cli_output (vm,
+ " %-5d 0x%016lx %-5d 0x%04x %-5d 0x%016lx\n",
+ j, vui->vrings[q].desc[j].addr,
+ vui->vrings[q].desc[j].len,
+ vui->vrings[q].desc[j].flags,
+ vui->vrings[q].desc[j].next,
+ pointer_to_uword (map_guest_mem
+ (vui,
+ vui->vrings[q].
+ desc[j].addr)));
+ }
+ }
+ }
+ vlib_cli_output (vm, "\n");
}
- vlib_cli_output (vm, "\n");
- }
done:
vec_free (hw_if_indices);
return error;
@@ -1997,19 +2224,19 @@ done:
static clib_error_t *
vhost_user_config (vlib_main_t * vm, unformat_input_t * input)
{
- vhost_user_main_t * vum = &vhost_user_main;
+ vhost_user_main_t *vum = &vhost_user_main;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "coalesce-frames %d", &vum->coalesce_frames))
- ;
+ ;
else if (unformat (input, "coalesce-time %f", &vum->coalesce_time))
- ;
+ ;
else if (unformat (input, "dont-dump-memory"))
- vum->dont_dump_vhost_user_memory = 1;
+ vum->dont_dump_vhost_user_memory = 1;
else
- return clib_error_return (0, "unknown input `%U'",
- format_unformat_error, input);
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
}
return 0;
@@ -2021,14 +2248,22 @@ VLIB_CONFIG_FUNCTION (vhost_user_config, "vhost-user");
void
vhost_user_unmap_all (void)
{
- vhost_user_main_t * vum = &vhost_user_main;
- vhost_user_intf_t * vui;
+ vhost_user_main_t *vum = &vhost_user_main;
+ vhost_user_intf_t *vui;
if (vum->dont_dump_vhost_user_memory)
{
vec_foreach (vui, vum->vhost_user_interfaces)
- {
- unmap_all_mem_regions(vui);
- }
+ {
+ unmap_all_mem_regions (vui);
+ }
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vnet/vnet/devices/virtio/vhost-user.h b/vnet/vnet/devices/virtio/vhost-user.h
index c0f6e6ff7af..fc413609362 100644
--- a/vnet/vnet/devices/virtio/vhost-user.h
+++ b/vnet/vnet/devices/virtio/vhost-user.h
@@ -52,50 +52,58 @@
_ (VHOST_USER_F_PROTOCOL_FEATURES, 30)
-typedef enum {
+typedef enum
+{
#define _(f,n) FEAT_##f = (n),
foreach_virtio_net_feature
#undef _
} virtio_net_feature_t;
-int vhost_user_create_if(vnet_main_t * vnm, vlib_main_t * vm,
- const char * sock_filename, u8 is_server,
- u32 * sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance, u8 *hwaddr);
-int vhost_user_modify_if(vnet_main_t * vnm, vlib_main_t * vm,
- const char * sock_filename, u8 is_server,
- u32 sw_if_index, u64 feature_mask,
- u8 renumber, u32 custom_dev_instance);
-int vhost_user_delete_if(vnet_main_t * vnm, vlib_main_t * vm, u32 sw_if_index);
+int vhost_user_create_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 * sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance, u8 * hwaddr);
+int vhost_user_modify_if (vnet_main_t * vnm, vlib_main_t * vm,
+ const char *sock_filename, u8 is_server,
+ u32 sw_if_index, u64 feature_mask,
+ u8 renumber, u32 custom_dev_instance);
+int vhost_user_delete_if (vnet_main_t * vnm, vlib_main_t * vm,
+ u32 sw_if_index);
-typedef struct vhost_user_memory_region {
+typedef struct vhost_user_memory_region
+{
u64 guest_phys_addr;
u64 memory_size;
u64 userspace_addr;
u64 mmap_offset;
} vhost_user_memory_region_t;
-typedef struct vhost_user_memory {
+typedef struct vhost_user_memory
+{
u32 nregions;
u32 padding;
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
} vhost_user_memory_t;
-typedef struct {
+typedef struct
+{
unsigned int index, num;
} vhost_vring_state_t;
-typedef struct {
+typedef struct
+{
unsigned int index, flags;
u64 desc_user_addr, used_user_addr, avail_user_addr, log_guest_addr;
} vhost_vring_addr_t;
-typedef struct vhost_user_log {
+typedef struct vhost_user_log
+{
u64 size;
u64 offset;
} vhost_user_log_t;
-typedef enum vhost_user_req {
+typedef enum vhost_user_req
+{
VHOST_USER_NONE = 0,
VHOST_USER_GET_FEATURES = 1,
VHOST_USER_SET_FEATURES = 2,
@@ -119,29 +127,35 @@ typedef enum vhost_user_req {
} vhost_user_req_t;
// vring_desc I/O buffer descriptor
-typedef struct {
+/* *INDENT-OFF* */
+typedef struct
+{
uint64_t addr; // packet data buffer address
uint32_t len; // packet data buffer size
uint16_t flags; // (see below)
uint16_t next; // optional index next descriptor in chain
} __attribute ((packed)) vring_desc_t;
-typedef struct {
+typedef struct
+{
uint16_t flags;
uint16_t idx;
uint16_t ring[VHOST_VRING_MAX_SIZE];
} __attribute ((packed)) vring_avail_t;
-typedef struct {
+typedef struct
+{
uint16_t flags;
uint16_t idx;
- struct /* vring_used_elem */ {
- uint32_t id;
- uint32_t len;
- } ring[VHOST_VRING_MAX_SIZE];
+ struct /* vring_used_elem */
+ {
+ uint32_t id;
+ uint32_t len;
+ } ring[VHOST_VRING_MAX_SIZE];
} __attribute ((packed)) vring_used_t;
-typedef struct {
+typedef struct
+{
u8 flags;
u8 gso_type;
u16 hdr_len;
@@ -156,19 +170,22 @@ typedef struct {
} __attribute ((packed)) virtio_net_hdr_mrg_rxbuf_t;
typedef struct vhost_user_msg {
- vhost_user_req_t request;
- u32 flags;
- u32 size;
- union {
- u64 u64;
- vhost_vring_state_t state;
- vhost_vring_addr_t addr;
- vhost_user_memory_t memory;
- vhost_user_log_t log;
+ vhost_user_req_t request;
+ u32 flags;
+ u32 size;
+ union
+ {
+ u64 u64;
+ vhost_vring_state_t state;
+ vhost_vring_addr_t addr;
+ vhost_user_memory_t memory;
+ vhost_user_log_t log;
};
} __attribute ((packed)) vhost_user_msg_t;
+/* *INDENT-ON* */
-typedef struct {
+typedef struct
+{
u32 qsz;
u16 last_avail_idx;
u16 last_used_idx;
@@ -186,9 +203,10 @@ typedef struct {
f64 int_deadline;
} vhost_user_vring_t;
-typedef struct {
- CLIB_CACHE_LINE_ALIGN_MARK(cacheline0);
- volatile u32 * lockp;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *lockp;
u32 is_up;
u32 admin_up;
u32 unix_fd;
@@ -199,64 +217,71 @@ typedef struct {
u8 sock_is_server;
u32 hw_if_index, sw_if_index;
u8 active;
-
+
u32 nregions;
u64 features;
u64 feature_mask;
u64 protocol_features;
u32 num_vrings;
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS];
- void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS];
+ void *region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS];
u32 region_mmap_fd[VHOST_MEMORY_MAX_NREGIONS];
vhost_user_vring_t vrings[2];
int virtio_net_hdr_sz;
int is_any_layout;
- u32 * d_trace_buffers;
+ u32 *d_trace_buffers;
- void * log_base_addr;
+ void *log_base_addr;
u64 log_size;
} vhost_user_intf_t;
-typedef struct {
- u32 ** rx_buffers;
+typedef struct
+{
+ u32 **rx_buffers;
u32 mtu_bytes;
- vhost_user_intf_t * vhost_user_interfaces;
- u32 * vhost_user_inactive_interfaces_index;
- uword * vhost_user_interface_index_by_listener_fd;
- uword * vhost_user_interface_index_by_sock_fd;
- uword * vhost_user_interface_index_by_sw_if_index;
- u32 * show_dev_instance_by_real_dev_instance;
+ vhost_user_intf_t *vhost_user_interfaces;
+ u32 *vhost_user_inactive_interfaces_index;
+ uword *vhost_user_interface_index_by_listener_fd;
+ uword *vhost_user_interface_index_by_sock_fd;
+ uword *vhost_user_interface_index_by_sw_if_index;
+ u32 *show_dev_instance_by_real_dev_instance;
u32 coalesce_frames;
f64 coalesce_time;
int dont_dump_vhost_user_memory;
} vhost_user_main_t;
-typedef struct {
- u8 if_name[64];
- u32 sw_if_index;
- u32 virtio_net_hdr_sz;
- u64 features;
- u8 is_server;
- u8 sock_filename[256];
- u32 num_regions;
- int sock_errno;
+typedef struct
+{
+ u8 if_name[64];
+ u32 sw_if_index;
+ u32 virtio_net_hdr_sz;
+ u64 features;
+ u8 is_server;
+ u8 sock_filename[256];
+ u32 num_regions;
+ int sock_errno;
} vhost_user_intf_details_t;
-int vhost_user_dump_ifs(vnet_main_t * vnm, vlib_main_t * vm,
- vhost_user_intf_details_t **out_vuids);
+int vhost_user_dump_ifs (vnet_main_t * vnm, vlib_main_t * vm,
+ vhost_user_intf_details_t ** out_vuids);
// CLI commands to be used from dpdk
-clib_error_t *
-vhost_user_connect_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd);
-clib_error_t *
-vhost_user_delete_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd);
-clib_error_t *
-show_vhost_user_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd);
+clib_error_t *vhost_user_connect_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
+clib_error_t *vhost_user_delete_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
+clib_error_t *show_vhost_user_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd);
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */