summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMohsin Kazmi <sykazmi@cisco.com>2022-02-01 18:35:59 +0000
committerDamjan Marion <dmarion@me.com>2022-03-01 16:08:11 +0000
commit0f8912f0d6af49ec8093e3c478b48767a00b3710 (patch)
treec919de059799fb89029f470c96b81cffa9dacec3 /src
parent855ad3373ee9257ce7c917b90c638bd2f8a76da4 (diff)
virtio: refactor code
Type: refactor Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com> Change-Id: I3788cc857023fafcc8eb6d6ff4524425026a75d8
Diffstat (limited to 'src')
-rw-r--r--src/vnet/devices/tap/tap.c12
-rw-r--r--src/vnet/devices/virtio/device.c84
-rw-r--r--src/vnet/devices/virtio/node.c54
-rw-r--r--src/vnet/devices/virtio/pci.c129
-rw-r--r--src/vnet/devices/virtio/pci.h6
-rw-r--r--src/vnet/devices/virtio/vhost_user.c22
-rw-r--r--src/vnet/devices/virtio/vhost_user.h16
-rw-r--r--src/vnet/devices/virtio/vhost_user_inline.h23
-rw-r--r--src/vnet/devices/virtio/vhost_user_input.c48
-rw-r--r--src/vnet/devices/virtio/vhost_user_output.c35
-rw-r--r--src/vnet/devices/virtio/virtio.c83
-rw-r--r--src/vnet/devices/virtio/virtio.h60
-rw-r--r--src/vnet/devices/virtio/virtio_api.c6
-rw-r--r--src/vnet/devices/virtio/virtio_inline.h17
-rw-r--r--src/vnet/devices/virtio/virtio_pci_legacy.c7
-rw-r--r--src/vnet/devices/virtio/virtio_pci_modern.c18
-rw-r--r--src/vnet/devices/virtio/virtio_pre_input.c4
-rw-r--r--src/vnet/devices/virtio/virtio_process.c2
-rw-r--r--src/vnet/devices/virtio/virtio_std.h80
19 files changed, 351 insertions, 355 deletions
diff --git a/src/vnet/devices/tap/tap.c b/src/vnet/devices/tap/tap.c
index 0fa2e5d95af..7c8a9b04779 100644
--- a/src/vnet/devices/tap/tap.c
+++ b/src/vnet/devices/tap/tap.c
@@ -272,7 +272,7 @@ tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
else
ifr.ifr_flags |= IFF_MULTI_QUEUE;
- hdrsz = sizeof (virtio_net_hdr_v1_t);
+ hdrsz = sizeof (vnet_virtio_net_hdr_v1_t);
if (args->tap_flags & TAP_FLAG_GSO)
{
offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6;
@@ -577,7 +577,7 @@ tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
vhost_vring_addr_t addr = { 0 };
vhost_vring_state_t state = { 0 };
vhost_vring_file_t file = { 0 };
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u16 qp = i >> 1;
int fd = vif->vhost_fds[qp];
@@ -595,7 +595,7 @@ tap_create_if (vlib_main_t * vm, tap_create_if_args_t * args)
}
addr.index = state.index = file.index = vring->queue_id & 1;
- state.num = vring->size;
+ state.num = vring->queue_size;
virtio_log_debug (vif, "VHOST_SET_VRING_NUM fd %d index %u num %u", fd,
state.index, state.num);
_IOCTL (fd, VHOST_SET_VRING_NUM, &state);
@@ -880,7 +880,7 @@ tap_dump_ifs (tap_interface_details_t ** out_tapids)
vnet_main_t *vnm = vnet_get_main ();
virtio_main_t *mm = &virtio_main;
virtio_if_t *vif;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
vnet_hw_interface_t *hi;
tap_interface_details_t *r_tapids = NULL;
tap_interface_details_t *tapid = NULL;
@@ -898,9 +898,9 @@ tap_dump_ifs (tap_interface_details_t ** out_tapids)
clib_memcpy(tapid->dev_name, hi->name,
MIN (ARRAY_LEN (tapid->dev_name) - 1, vec_len (hi->name)));
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS(0));
- tapid->rx_ring_sz = vring->size;
+ tapid->rx_ring_sz = vring->queue_size;
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS(0));
- tapid->tx_ring_sz = vring->size;
+ tapid->tx_ring_sz = vring->queue_size;
tapid->tap_flags = vif->tap_flags;
clib_memcpy(&tapid->host_mac_addr, vif->host_mac_addr, 6);
if (vif->host_if_name)
diff --git a/src/vnet/devices/virtio/device.c b/src/vnet/devices/virtio/device.c
index 17b3a175e20..bd06d0fb648 100644
--- a/src/vnet/devices/virtio/device.c
+++ b/src/vnet/devices/virtio/device.c
@@ -166,11 +166,12 @@ virtio_memset_ring_u32 (u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
}
static void
-virtio_free_used_device_desc_split (vlib_main_t *vm, virtio_vring_t *vring,
+virtio_free_used_device_desc_split (vlib_main_t *vm,
+ vnet_virtio_vring_t *vring,
uword node_index)
{
u16 used = vring->desc_in_use;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 mask = sz - 1;
u16 last = vring->last_used_idx;
u16 n_left = vring->used->idx - last;
@@ -181,7 +182,7 @@ virtio_free_used_device_desc_split (vlib_main_t *vm, virtio_vring_t *vring,
while (n_left)
{
- vring_used_elem_t *e = &vring->used->ring[last & mask];
+ vnet_virtio_vring_used_elem_t *e = &vring->used->ring[last & mask];
u16 slot, n_buffers;
slot = n_buffers = e->id;
@@ -190,7 +191,7 @@ virtio_free_used_device_desc_split (vlib_main_t *vm, virtio_vring_t *vring,
n_left--;
last++;
n_buffers++;
- vring_desc_t *d = &vring->desc[e->id];
+ vnet_virtio_vring_desc_t *d = &vring->desc[e->id];
u16 next;
while (d->flags & VRING_DESC_F_NEXT)
{
@@ -232,11 +233,12 @@ virtio_free_used_device_desc_split (vlib_main_t *vm, virtio_vring_t *vring,
}
static void
-virtio_free_used_device_desc_packed (vlib_main_t *vm, virtio_vring_t *vring,
+virtio_free_used_device_desc_packed (vlib_main_t *vm,
+ vnet_virtio_vring_t *vring,
uword node_index)
{
- vring_packed_desc_t *d;
- u16 sz = vring->size;
+ vnet_virtio_vring_packed_desc_t *d;
+ u16 sz = vring->queue_size;
u16 last = vring->last_used_idx;
u16 n_buffers = 0, start;
u16 flags;
@@ -273,7 +275,7 @@ virtio_free_used_device_desc_packed (vlib_main_t *vm, virtio_vring_t *vring,
}
static void
-virtio_free_used_device_desc (vlib_main_t *vm, virtio_vring_t *vring,
+virtio_free_used_device_desc (vlib_main_t *vm, vnet_virtio_vring_t *vring,
uword node_index, int packed)
{
if (packed)
@@ -284,7 +286,7 @@ virtio_free_used_device_desc (vlib_main_t *vm, virtio_vring_t *vring,
}
static void
-set_checksum_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr,
+set_checksum_offsets (vlib_buffer_t *b, vnet_virtio_net_hdr_v1_t *hdr,
const int is_l2)
{
vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
@@ -357,7 +359,8 @@ set_checksum_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr,
}
static void
-set_gso_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
+set_gso_offsets (vlib_buffer_t *b, vnet_virtio_net_hdr_v1_t *hdr,
+ const int is_l2)
{
vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
@@ -398,17 +401,17 @@ set_gso_offsets (vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
static u16
add_buffer_to_slot (vlib_main_t *vm, vlib_node_runtime_t *node,
- virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
+ virtio_if_t *vif, vnet_virtio_vring_t *vring, u32 bi,
u16 free_desc_count, u16 avail, u16 next, u16 mask,
int hdr_sz, int do_gso, int csum_offload, int is_pci,
int is_tun, int is_indirect, int is_any_layout)
{
u16 n_added = 0;
- vring_desc_t *d;
+ vnet_virtio_vring_desc_t *d;
int is_l2 = !is_tun;
d = &vring->desc[next];
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
+ vnet_virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
u32 drop_inline = ~0;
clib_memset_u8 (hdr, 0, hdr_sz);
@@ -469,8 +472,8 @@ add_buffer_to_slot (vlib_main_t *vm, vlib_node_runtime_t *node,
indirect_desc->next_buffer = bi;
bi = indirect_buffer;
- vring_desc_t *id =
- (vring_desc_t *) vlib_buffer_get_current (indirect_desc);
+ vnet_virtio_vring_desc_t *id =
+ (vnet_virtio_vring_desc_t *) vlib_buffer_get_current (indirect_desc);
u32 count = 1;
if (is_pci)
{
@@ -539,7 +542,7 @@ add_buffer_to_slot (vlib_main_t *vm, vlib_node_runtime_t *node,
}
id->flags = 0;
id->next = 0;
- d->len = count * sizeof (vring_desc_t);
+ d->len = count * sizeof (vnet_virtio_vring_desc_t);
d->flags = VRING_DESC_F_INDIRECT;
}
else if (is_pci)
@@ -605,16 +608,16 @@ done:
static u16
add_buffer_to_slot_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
- virtio_if_t *vif, virtio_vring_t *vring, u32 bi,
- u16 next, int hdr_sz, int do_gso, int csum_offload,
- int is_pci, int is_tun, int is_indirect,
- int is_any_layout)
+ virtio_if_t *vif, vnet_virtio_vring_t *vring,
+ u32 bi, u16 next, int hdr_sz, int do_gso,
+ int csum_offload, int is_pci, int is_tun,
+ int is_indirect, int is_any_layout)
{
u16 n_added = 0, flags = 0;
int is_l2 = !is_tun;
- vring_packed_desc_t *d = &vring->packed_desc[next];
+ vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[next];
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
+ vnet_virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
u32 drop_inline = ~0;
clib_memset (hdr, 0, hdr_sz);
@@ -675,8 +678,9 @@ add_buffer_to_slot_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
indirect_desc->next_buffer = bi;
bi = indirect_buffer;
- vring_packed_desc_t *id =
- (vring_packed_desc_t *) vlib_buffer_get_current (indirect_desc);
+ vnet_virtio_vring_packed_desc_t *id =
+ (vnet_virtio_vring_packed_desc_t *) vlib_buffer_get_current (
+ indirect_desc);
u32 count = 1;
if (is_pci)
{
@@ -720,7 +724,7 @@ add_buffer_to_slot_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
}
id->flags = 0;
id->id = 0;
- d->len = count * sizeof (vring_packed_desc_t);
+ d->len = count * sizeof (vnet_virtio_vring_packed_desc_t);
flags = VRING_DESC_F_INDIRECT;
}
else
@@ -752,12 +756,10 @@ done:
}
static uword
-virtio_interface_tx_packed_gso_inline (vlib_main_t *vm,
- vlib_node_runtime_t *node,
- virtio_if_t *vif, virtio_if_type_t type,
- virtio_vring_t *vring, u32 *buffers,
- u16 n_left, const int do_gso,
- const int csum_offload)
+virtio_interface_tx_packed_gso_inline (
+ vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif,
+ virtio_if_type_t type, vnet_virtio_vring_t *vring, u32 *buffers, u16 n_left,
+ const int do_gso, const int csum_offload)
{
int is_pci = (type == VIRTIO_IF_TYPE_PCI);
int is_tun = (type == VIRTIO_IF_TYPE_TUN);
@@ -766,7 +768,7 @@ virtio_interface_tx_packed_gso_inline (vlib_main_t *vm,
int is_any_layout =
((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
const int hdr_sz = vif->virtio_net_hdr_sz;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 used, next, n_buffers = 0, n_buffers_left = 0;
u16 n_vectors = n_left;
@@ -838,7 +840,7 @@ virtio_interface_tx_packed_gso_inline (vlib_main_t *vm,
}
static void
-virtio_find_free_desc (virtio_vring_t *vring, u16 size, u16 mask, u16 req,
+virtio_find_free_desc (vnet_virtio_vring_t *vring, u16 size, u16 mask, u16 req,
u16 next, u32 *first_free_desc_index,
u16 *free_desc_count)
{
@@ -877,7 +879,7 @@ static u16
virtio_interface_tx_split_gso_inline (vlib_main_t *vm,
vlib_node_runtime_t *node,
virtio_if_t *vif, virtio_if_type_t type,
- virtio_vring_t *vring, u32 *buffers,
+ vnet_virtio_vring_t *vring, u32 *buffers,
u16 n_left, int do_gso, int csum_offload)
{
u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
@@ -887,7 +889,7 @@ virtio_interface_tx_split_gso_inline (vlib_main_t *vm,
((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
int is_any_layout =
((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
int hdr_sz = vif->virtio_net_hdr_sz;
u16 mask = sz - 1;
u16 n_vectors = n_left;
@@ -986,7 +988,7 @@ virtio_interface_tx_split_gso_inline (vlib_main_t *vm,
static u16
virtio_interface_tx_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
virtio_if_t *vif, virtio_if_type_t type,
- virtio_vring_t *vring, u32 *buffers,
+ vnet_virtio_vring_t *vring, u32 *buffers,
u16 n_left, int packed, int do_gso,
int csum_offload)
{
@@ -1002,7 +1004,7 @@ virtio_interface_tx_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
static u16
virtio_interface_tx_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
- virtio_if_t *vif, virtio_vring_t *vring,
+ virtio_if_t *vif, vnet_virtio_vring_t *vring,
virtio_if_type_t type, u32 *buffers, u16 n_left,
int packed)
{
@@ -1035,7 +1037,7 @@ VNET_DEVICE_CLASS_TX_FN (virtio_device_class) (vlib_main_t * vm,
virtio_if_t *vif = pool_elt_at_index (nm->interfaces, rund->dev_instance);
vnet_hw_if_tx_frame_t *tf = vlib_frame_scalar_args (frame);
u16 qid = tf->queue_id;
- virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
u16 n_left = frame->n_vectors;
u32 *buffers = vlib_frame_vector_args (frame);
u32 to[GRO_TO_VECTOR_SIZE (n_left)];
@@ -1127,7 +1129,7 @@ virtio_clear_hw_interface_counters (u32 instance)
}
static void
-virtio_set_rx_interrupt (virtio_if_t *vif, virtio_vring_t *vring)
+virtio_set_rx_interrupt (virtio_if_t *vif, vnet_virtio_vring_t *vring)
{
if (vif->is_packed)
vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
@@ -1136,7 +1138,7 @@ virtio_set_rx_interrupt (virtio_if_t *vif, virtio_vring_t *vring)
}
static void
-virtio_set_rx_polling (virtio_if_t *vif, virtio_vring_t *vring)
+virtio_set_rx_polling (virtio_if_t *vif, vnet_virtio_vring_t *vring)
{
if (vif->is_packed)
vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
@@ -1151,7 +1153,7 @@ virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
virtio_main_t *mm = &virtio_main;
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
virtio_if_t *vif = pool_elt_at_index (mm->interfaces, hw->dev_instance);
- virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
+ vnet_virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
{
diff --git a/src/vnet/devices/virtio/node.c b/src/vnet/devices/virtio/node.c
index 2d2da189d5d..1de4fb73f7d 100644
--- a/src/vnet/devices/virtio/node.c
+++ b/src/vnet/devices/virtio/node.c
@@ -47,7 +47,7 @@ typedef struct
u32 hw_if_index;
u16 ring;
u16 len;
- virtio_net_hdr_v1_t hdr;
+ vnet_virtio_net_hdr_v1_t hdr;
} virtio_input_trace_t;
static u8 *
@@ -69,8 +69,8 @@ format_virtio_input_trace (u8 * s, va_list * args)
}
static_always_inline void
-virtio_needs_csum (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
- u8 * l4_proto, u8 * l4_hdr_sz, virtio_if_type_t type)
+virtio_needs_csum (vlib_buffer_t *b0, vnet_virtio_net_hdr_v1_t *hdr,
+ u8 *l4_proto, u8 *l4_hdr_sz, virtio_if_type_t type)
{
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
{
@@ -161,7 +161,7 @@ virtio_needs_csum (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
}
static_always_inline void
-fill_gso_buffer_flags (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
+fill_gso_buffer_flags (vlib_buffer_t *b0, vnet_virtio_net_hdr_v1_t *hdr,
u8 l4_proto, u8 l4_hdr_sz)
{
if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
@@ -181,7 +181,7 @@ fill_gso_buffer_flags (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
}
static_always_inline u16
-virtio_n_left_to_process (virtio_vring_t * vring, const int packed)
+virtio_n_left_to_process (vnet_virtio_vring_t *vring, const int packed)
{
if (packed)
return vring->desc_in_use;
@@ -190,7 +190,7 @@ virtio_n_left_to_process (virtio_vring_t * vring, const int packed)
}
static_always_inline u16
-virtio_get_slot_id (virtio_vring_t * vring, const int packed, u16 last,
+virtio_get_slot_id (vnet_virtio_vring_t *vring, const int packed, u16 last,
u16 mask)
{
if (packed)
@@ -200,7 +200,7 @@ virtio_get_slot_id (virtio_vring_t * vring, const int packed, u16 last,
}
static_always_inline u16
-virtio_get_len (virtio_vring_t * vring, const int packed, const int hdr_sz,
+virtio_get_len (vnet_virtio_vring_t *vring, const int packed, const int hdr_sz,
u16 last, u16 mask)
{
if (packed)
@@ -209,22 +209,24 @@ virtio_get_len (virtio_vring_t * vring, const int packed, const int hdr_sz,
return vring->used->ring[last & mask].len - hdr_sz;
}
-#define increment_last(last, packed, vring) \
- do { \
- last++; \
- if (packed && last >= vring->size) \
- { \
- last = 0; \
- vring->used_wrap_counter ^= 1; \
- } \
- } while (0)
+#define increment_last(last, packed, vring) \
+ do \
+ { \
+ last++; \
+ if (packed && last >= vring->queue_size) \
+ { \
+ last = 0; \
+ vring->used_wrap_counter ^= 1; \
+ } \
+ } \
+ while (0)
static_always_inline uword
-virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, virtio_if_t * vif,
- virtio_vring_t * vring, virtio_if_type_t type,
- int gso_enabled, int checksum_offload_enabled,
- int packed)
+virtio_device_input_gso_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, virtio_if_t *vif,
+ vnet_virtio_vring_t *vring,
+ virtio_if_type_t type, int gso_enabled,
+ int checksum_offload_enabled, int packed)
{
vnet_main_t *vnm = vnet_get_main ();
u32 thread_index = vm->thread_index;
@@ -234,7 +236,7 @@ virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
u32 *to_next = 0;
u32 n_rx_packets = 0;
u32 n_rx_bytes = 0;
- u16 mask = vring->size - 1;
+ u16 mask = vring->queue_size - 1;
u16 last = vring->last_used_idx;
u16 n_left = virtio_n_left_to_process (vring, packed);
vlib_buffer_t bt = {};
@@ -267,7 +269,7 @@ virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
{
if (packed)
{
- vring_packed_desc_t *d = &vring->packed_desc[last];
+ vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[last];
u16 flags = d->flags;
if ((flags & VRING_DESC_F_AVAIL) !=
(vring->used_wrap_counter << 7)
@@ -280,13 +282,13 @@ virtio_device_input_gso_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
}
u8 l4_proto = 0, l4_hdr_sz = 0;
u16 num_buffers = 1;
- virtio_net_hdr_v1_t *hdr;
+ vnet_virtio_net_hdr_v1_t *hdr;
u16 slot = virtio_get_slot_id (vring, packed, last, mask);
u16 len = virtio_get_len (vring, packed, hdr_sz, last, mask);
u32 bi0 = vring->buffers[slot];
vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
hdr = vlib_buffer_get_current (b0);
- if (hdr_sz == sizeof (virtio_net_hdr_v1_t))
+ if (hdr_sz == sizeof (vnet_virtio_net_hdr_v1_t))
num_buffers = hdr->num_buffers;
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
@@ -408,7 +410,7 @@ virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
virtio_if_type_t type)
{
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
const int hdr_sz = vif->virtio_net_hdr_sz;
uword rv;
diff --git a/src/vnet/devices/virtio/pci.c b/src/vnet/devices/virtio/pci.c
index 1d2699bbb36..07744c20301 100644
--- a/src/vnet/devices/virtio/pci.c
+++ b/src/vnet/devices/virtio/pci.c
@@ -116,7 +116,7 @@ virtio_pci_irq_queue_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
line--;
u16 qid = line;
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
vnet_hw_if_rx_queue_set_int_pending (vnm, vring->queue_index);
}
@@ -198,18 +198,18 @@ static int
virtio_pci_send_ctrl_msg_packed (vlib_main_t * vm, virtio_if_t * vif,
virtio_ctrl_msg_t * data, u32 len)
{
- virtio_vring_t *vring = vif->cxq_vring;
+ vnet_virtio_vring_t *vring = vif->cxq_vring;
virtio_net_ctrl_ack_t status = VIRTIO_NET_ERR;
virtio_ctrl_msg_t result;
u32 buffer_index;
vlib_buffer_t *b;
u16 used, next;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 flags = 0, first_desc_flags = 0;
used = vring->desc_in_use;
next = vring->desc_next;
- vring_packed_desc_t *d = &vring->packed_desc[next];
+ vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[next];
if (vlib_buffer_alloc (vm, &buffer_index, 1))
b = vlib_get_buffer (vm, buffer_index);
@@ -317,9 +317,9 @@ virtio_pci_send_ctrl_msg_packed (vlib_main_t * vm, virtio_if_t * vif,
|| (flags & VRING_DESC_F_USED) != (vring->used_wrap_counter << 15));
last += 3;
- if (last >= vring->size)
+ if (last >= vring->queue_size)
{
- last = last - vring->size;
+ last = last - vring->queue_size;
vring->used_wrap_counter ^= 1;
}
vring->desc_in_use -= 3;
@@ -338,19 +338,19 @@ static int
virtio_pci_send_ctrl_msg_split (vlib_main_t * vm, virtio_if_t * vif,
virtio_ctrl_msg_t * data, u32 len)
{
- virtio_vring_t *vring = vif->cxq_vring;
+ vnet_virtio_vring_t *vring = vif->cxq_vring;
virtio_net_ctrl_ack_t status = VIRTIO_NET_ERR;
virtio_ctrl_msg_t result;
u32 buffer_index;
vlib_buffer_t *b;
u16 used, next, avail;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 mask = sz - 1;
used = vring->desc_in_use;
next = vring->desc_next;
avail = vring->avail->idx;
- vring_desc_t *d = &vring->desc[next];
+ vnet_virtio_vring_desc_t *d = &vring->desc[next];
if (vlib_buffer_alloc (vm, &buffer_index, 1))
b = vlib_get_buffer (vm, buffer_index);
@@ -403,7 +403,7 @@ virtio_pci_send_ctrl_msg_split (vlib_main_t * vm, virtio_if_t * vif,
while (n_left)
{
- vring_used_elem_t *e = &vring->used->ring[last & mask];
+ vnet_virtio_vring_used_elem_t *e = &vring->used->ring[last & mask];
u16 slot = e->id;
d = &vring->desc[slot];
@@ -600,7 +600,7 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -615,34 +615,35 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vec_validate_aligned (vif->cxq_vring, 0, CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->cxq_vring, 0);
- i =
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1)) + sizeof (vring_desc_event_t);
+ i = (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) + VNET_VIRTIO_PCI_VRING_ALIGN -
+ 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1)) +
+ sizeof (vnet_virtio_vring_desc_event_t);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
vring->packed_desc = ptr;
- vring->driver_event = ptr + (queue_size * sizeof (vring_packed_desc_t));
+ vring->driver_event =
+ ptr + (queue_size * sizeof (vnet_virtio_vring_packed_desc_t));
vring->driver_event->off_wrap = 0;
vring->driver_event->flags = VRING_EVENT_F_DISABLE;
vring->device_event =
- ptr +
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1));
+ ptr + (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) +
+ VNET_VIRTIO_PCI_VRING_ALIGN - 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1));
vring->device_event->off_wrap = 0;
vring->device_event->flags = 0;
vring->queue_id = queue_num;
- vring->size = queue_size;
+ vring->queue_size = queue_size;
vring->avail_wrap_counter = 1;
vring->used_wrap_counter = 1;
@@ -650,7 +651,7 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "control-queue: number %u, size %u", queue_num,
queue_size);
- vif->virtio_pci_func->setup_queue (vm, vif, queue_num, (void *) vring);
+ vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring);
vring->queue_notify_offset =
vif->notify_off_multiplier *
vif->virtio_pci_func->get_queue_notify_off (vm, vif, queue_num);
@@ -665,8 +666,7 @@ virtio_pci_control_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
- vring_t vr;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -685,27 +685,20 @@ virtio_pci_control_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
vec_validate_aligned (vif->cxq_vring, 0, CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->cxq_vring, 0);
- i = vring_size (queue_size, VIRTIO_PCI_VRING_ALIGN);
- i = round_pow2 (i, VIRTIO_PCI_VRING_ALIGN);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ i = vnet_virtio_vring_size (queue_size, VNET_VIRTIO_PCI_VRING_ALIGN);
+ i = round_pow2 (i, VNET_VIRTIO_PCI_VRING_ALIGN);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
- vring_init (&vr, queue_size, ptr, VIRTIO_PCI_VRING_ALIGN);
- vring->desc = vr.desc;
- vring->avail = vr.avail;
- vring->used = vr.used;
+ vnet_virtio_vring_init (vring, queue_size, ptr, VNET_VIRTIO_PCI_VRING_ALIGN);
vring->queue_id = queue_num;
- vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
ASSERT (vring->buffers == 0);
-
- vring->size = queue_size;
virtio_log_debug (vif, "control-queue: number %u, size %u", queue_num,
queue_size);
- vif->virtio_pci_func->setup_queue (vm, vif, queue_num, ptr);
+ vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring);
vring->queue_notify_offset =
vif->notify_off_multiplier *
vif->virtio_pci_func->get_queue_notify_off (vm, vif, queue_num);
@@ -731,8 +724,7 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
- vring_t vr;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -762,18 +754,14 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (queue_num));
}
- i = vring_size (queue_size, VIRTIO_PCI_VRING_ALIGN);
- i = round_pow2 (i, VIRTIO_PCI_VRING_ALIGN);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ i = vnet_virtio_vring_size (queue_size, VNET_VIRTIO_PCI_VRING_ALIGN);
+ i = round_pow2 (i, VNET_VIRTIO_PCI_VRING_ALIGN);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
- vring_init (&vr, queue_size, ptr, VIRTIO_PCI_VRING_ALIGN);
- vring->desc = vr.desc;
- vring->avail = vr.avail;
- vring->used = vr.used;
+ vnet_virtio_vring_init (vring, queue_size, ptr, VNET_VIRTIO_PCI_VRING_ALIGN);
vring->queue_id = queue_num;
vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
vring->flow_table = 0;
@@ -791,8 +779,8 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "rx-queue: number %u, size %u", queue_num,
queue_size);
}
- vring->size = queue_size;
- if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, ptr))
+ vring->queue_size = queue_size;
+ if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring))
return clib_error_return (0, "error in queue address setup");
vring->queue_notify_offset =
@@ -809,7 +797,7 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -835,29 +823,30 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (queue_num));
}
- i =
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1)) + sizeof (vring_desc_event_t);
+ i = (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) + VNET_VIRTIO_PCI_VRING_ALIGN -
+ 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1)) +
+ sizeof (vnet_virtio_vring_desc_event_t);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
vring->packed_desc = ptr;
- vring->driver_event = ptr + (queue_size * sizeof (vring_packed_desc_t));
+ vring->driver_event =
+ ptr + (queue_size * sizeof (vnet_virtio_vring_packed_desc_t));
vring->driver_event->off_wrap = 0;
vring->driver_event->flags = VRING_EVENT_F_DISABLE;
vring->device_event =
- ptr +
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1));
+ ptr + (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) +
+ VNET_VIRTIO_PCI_VRING_ALIGN - 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1));
vring->device_event->off_wrap = 0;
vring->device_event->flags = 0;
@@ -879,8 +868,8 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "rx-queue: number %u, size %u", queue_num,
queue_size);
}
- vring->size = queue_size;
- if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, (void *) vring))
+ vring->queue_size = queue_size;
+ if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring))
return clib_error_return (0, "error in queue address setup");
vring->queue_notify_offset =
@@ -1553,7 +1542,7 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
vec_foreach_index (i, vif->rxq_vrings)
{
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, i);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, i);
if (vring->used)
{
virtio_free_buffers (vm, vring);
@@ -1566,7 +1555,7 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
vec_foreach_index (i, vif->txq_vrings)
{
- virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, i);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, i);
if (vring->used)
{
virtio_free_buffers (vm, vring);
diff --git a/src/vnet/devices/virtio/pci.h b/src/vnet/devices/virtio/pci.h
index 70aa9833c2d..db20537bc3f 100644
--- a/src/vnet/devices/virtio/pci.h
+++ b/src/vnet/devices/virtio/pci.h
@@ -87,7 +87,7 @@ typedef enum
#define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
-#define VIRTIO_PCI_VRING_ALIGN 4096
+#define VNET_VIRTIO_PCI_VRING_ALIGN 4096
typedef enum
{
@@ -192,8 +192,8 @@ typedef struct _virtio_pci_func
u16 (*get_queue_size) (vlib_main_t * vm, virtio_if_t * vif, u16 queue_id);
void (*set_queue_size) (vlib_main_t * vm, virtio_if_t * vif, u16 queue_id,
u16 queue_size);
- u8 (*setup_queue) (vlib_main_t * vm, virtio_if_t * vif, u16 queue_id,
- void *p);
+ u8 (*setup_queue) (vlib_main_t *vm, virtio_if_t *vif, u16 queue_id,
+ vnet_virtio_vring_t *vring);
void (*del_queue) (vlib_main_t * vm, virtio_if_t * vif, u16 queue_id);
u16 (*get_queue_notify_off) (vlib_main_t * vm, virtio_if_t * vif,
u16 queue_id);
diff --git a/src/vnet/devices/virtio/vhost_user.c b/src/vnet/devices/virtio/vhost_user.c
index 0d24ad5aec8..9c585722b48 100644
--- a/src/vnet/devices/virtio/vhost_user.c
+++ b/src/vnet/devices/virtio/vhost_user.c
@@ -689,9 +689,12 @@ vhost_user_socket_read (clib_file_t * uf)
goto close_socket;
}
- vring_desc_t *desc = map_user_mem (vui, msg.addr.desc_user_addr);
- vring_used_t *used = map_user_mem (vui, msg.addr.used_user_addr);
- vring_avail_t *avail = map_user_mem (vui, msg.addr.avail_user_addr);
+ vnet_virtio_vring_desc_t *desc =
+ map_user_mem (vui, msg.addr.desc_user_addr);
+ vnet_virtio_vring_used_t *used =
+ map_user_mem (vui, msg.addr.used_user_addr);
+ vnet_virtio_vring_avail_t *avail =
+ map_user_mem (vui, msg.addr.avail_user_addr);
if ((desc == NULL) || (used == NULL) || (avail == NULL))
{
@@ -1930,7 +1933,8 @@ format_vhost_user_desc (u8 * s, va_list * args)
{
char *fmt = va_arg (*args, char *);
vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
- vring_desc_t *desc_table = va_arg (*args, vring_desc_t *);
+ vnet_virtio_vring_desc_t *desc_table =
+ va_arg (*args, vnet_virtio_vring_desc_t *);
int idx = va_arg (*args, int);
u32 *mem_hint = va_arg (*args, u32 *);
@@ -1959,7 +1963,7 @@ vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
u32 mem_hint = 0;
u32 idx;
u32 n_entries;
- vring_desc_t *desc_table;
+ vnet_virtio_vring_desc_t *desc_table;
vhost_user_vring_t *vq = &vui->vrings[q];
if (vq->avail && vq->used)
@@ -1989,7 +1993,8 @@ vhost_user_show_desc (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
desc_table, j, &mem_hint);
if (show_verbose && (desc_table[j].flags & VRING_DESC_F_INDIRECT))
{
- n_entries = desc_table[j].len / sizeof (vring_desc_t);
+ n_entries =
+ desc_table[j].len / sizeof (vnet_virtio_vring_desc_t);
desc_table = map_guest_mem (vui, desc_table[j].addr, &mem_hint);
if (desc_table)
{
@@ -2014,7 +2019,8 @@ format_vhost_user_packed_desc (u8 * s, va_list * args)
{
char *fmt = va_arg (*args, char *);
vhost_user_intf_t *vui = va_arg (*args, vhost_user_intf_t *);
- vring_packed_desc_t *desc_table = va_arg (*args, vring_packed_desc_t *);
+ vnet_virtio_vring_packed_desc_t *desc_table =
+ va_arg (*args, vnet_virtio_vring_packed_desc_t *);
int idx = va_arg (*args, int);
u32 *mem_hint = va_arg (*args, u32 *);
@@ -2056,7 +2062,7 @@ vhost_user_show_desc_packed (vlib_main_t * vm, vhost_user_intf_t * vui, int q,
u32 mem_hint = 0;
u32 idx;
u32 n_entries;
- vring_packed_desc_t *desc_table;
+ vnet_virtio_vring_packed_desc_t *desc_table;
vhost_user_vring_t *vq = &vui->vrings[q];
u16 off_wrap, event_idx;
diff --git a/src/vnet/devices/virtio/vhost_user.h b/src/vnet/devices/virtio/vhost_user.h
index 59db5b4c592..f44951e030a 100644
--- a/src/vnet/devices/virtio/vhost_user.h
+++ b/src/vnet/devices/virtio/vhost_user.h
@@ -186,18 +186,18 @@ typedef struct
u16 n_since_last_int;
union
{
- vring_desc_t *desc;
- vring_packed_desc_t *packed_desc;
+ vnet_virtio_vring_desc_t *desc;
+ vnet_virtio_vring_packed_desc_t *packed_desc;
};
union
{
- vring_avail_t *avail;
- vring_desc_event_t *avail_event;
+ vnet_virtio_vring_avail_t *avail;
+ vnet_virtio_vring_desc_event_t *avail_event;
};
union
{
- vring_used_t *used;
- vring_desc_event_t *used_event;
+ vnet_virtio_vring_used_t *used;
+ vnet_virtio_vring_desc_event_t *used_event;
};
uword desc_user_addr;
uword used_user_addr;
@@ -306,7 +306,7 @@ typedef struct
u16 device_index; /** The device index */
u32 virtio_ring_flags; /** Runtime queue flags **/
u16 first_desc_len; /** Length of the first data descriptor **/
- virtio_net_hdr_mrg_rxbuf_t hdr; /** Virtio header **/
+ vnet_virtio_net_hdr_mrg_rxbuf_t hdr; /** Virtio header **/
} vhost_trace_t;
#define VHOST_USER_RX_BUFFERS_N (2 * VLIB_FRAME_SIZE + 2)
@@ -317,7 +317,7 @@ typedef struct
u32 rx_buffers_len;
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N];
- virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE];
+ vnet_virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE];
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N];
/* This is here so it doesn't end-up
diff --git a/src/vnet/devices/virtio/vhost_user_inline.h b/src/vnet/devices/virtio/vhost_user_inline.h
index 5297453c317..8bdff3733a7 100644
--- a/src/vnet/devices/virtio/vhost_user_inline.h
+++ b/src/vnet/devices/virtio/vhost_user_inline.h
@@ -199,12 +199,15 @@ vhost_user_log_dirty_pages_2 (vhost_user_intf_t * vui,
}
}
-
-#define vhost_user_log_dirty_ring(vui, vq, member) \
- if (PREDICT_FALSE(vq->log_used)) { \
- vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
- sizeof(vq->used->member), 0); \
- }
+#define vhost_user_log_dirty_ring(vui, vq, member) \
+ if (PREDICT_FALSE (vq->log_used)) \
+ { \
+ vhost_user_log_dirty_pages_2 ( \
+ vui, \
+ vq->log_guest_addr + \
+ STRUCT_OFFSET_OF (vnet_virtio_vring_used_t, member), \
+ sizeof (vq->used->member), 0); \
+ }
static_always_inline u8 *
format_vhost_trace (u8 * s, va_list * va)
@@ -425,7 +428,7 @@ vhost_user_advance_last_avail_table_idx (vhost_user_intf_t * vui,
{
if (chained)
{
- vring_packed_desc_t *desc_table = vring->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = vring->packed_desc;
/* pick up the slot of the next avail idx */
while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
@@ -449,9 +452,9 @@ vhost_user_undo_advanced_last_avail_idx (vhost_user_vring_t * vring)
}
static_always_inline void
-vhost_user_dequeue_descs (vhost_user_vring_t * rxvq,
- virtio_net_hdr_mrg_rxbuf_t * hdr,
- u16 * n_descs_processed)
+vhost_user_dequeue_descs (vhost_user_vring_t *rxvq,
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr,
+ u16 *n_descs_processed)
{
u16 i;
diff --git a/src/vnet/devices/virtio/vhost_user_input.c b/src/vnet/devices/virtio/vhost_user_input.c
index ffa2d374abf..841a9798212 100644
--- a/src/vnet/devices/virtio/vhost_user_input.c
+++ b/src/vnet/devices/virtio/vhost_user_input.c
@@ -102,8 +102,8 @@ vhost_user_rx_trace (vhost_trace_t * t,
{
vhost_user_main_t *vum = &vhost_user_main;
u32 desc_current = txvq->avail->ring[last_avail_idx & txvq->qsz_mask];
- vring_desc_t *hdr_desc = 0;
- virtio_net_hdr_mrg_rxbuf_t *hdr;
+ vnet_virtio_vring_desc_t *hdr_desc = 0;
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
u32 hint = 0;
clib_memset (t, 0, sizeof (*t));
@@ -249,8 +249,8 @@ vhost_user_input_rewind_buffers (vlib_main_t * vm,
}
static_always_inline void
-vhost_user_handle_rx_offload (vlib_buffer_t * b0, u8 * b0_data,
- virtio_net_hdr_t * hdr)
+vhost_user_handle_rx_offload (vlib_buffer_t *b0, u8 *b0_data,
+ vnet_virtio_net_hdr_t *hdr)
{
u8 l4_hdr_sz = 0;
u8 l4_proto = 0;
@@ -517,7 +517,7 @@ vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
u32 bi_current;
u16 desc_current;
u32 desc_data_offset;
- vring_desc_t *desc_table = txvq->desc;
+ vnet_virtio_vring_desc_t *desc_table = txvq->desc;
if (PREDICT_FALSE (cpu->rx_buffers_len <= 1))
{
@@ -580,7 +580,7 @@ vhost_user_if_input (vlib_main_t *vm, vhost_user_main_t *vum,
if (enable_csum)
{
- virtio_net_hdr_mrg_rxbuf_t *hdr;
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
u8 *b_data;
u16 current;
@@ -769,7 +769,7 @@ vhost_user_mark_desc_consumed (vhost_user_intf_t * vui,
vhost_user_vring_t * txvq, u16 desc_head,
u16 n_descs_processed)
{
- vring_packed_desc_t *desc_table = txvq->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
u16 desc_idx;
u16 mask = txvq->qsz_mask;
@@ -791,8 +791,8 @@ vhost_user_rx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
u16 desc_current)
{
vhost_user_main_t *vum = &vhost_user_main;
- vring_packed_desc_t *hdr_desc;
- virtio_net_hdr_mrg_rxbuf_t *hdr;
+ vnet_virtio_vring_packed_desc_t *hdr_desc;
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
u32 hint = 0;
clib_memset (t, 0, sizeof (*t));
@@ -923,12 +923,13 @@ one_by_one:
}
static_always_inline u32
-vhost_user_do_offload (vhost_user_intf_t * vui,
- vring_packed_desc_t * desc_table, u16 desc_current,
- u16 mask, vlib_buffer_t * b_head, u32 * map_hint)
+vhost_user_do_offload (vhost_user_intf_t *vui,
+ vnet_virtio_vring_packed_desc_t *desc_table,
+ u16 desc_current, u16 mask, vlib_buffer_t *b_head,
+ u32 *map_hint)
{
u32 rc = VHOST_USER_INPUT_FUNC_ERROR_NO_ERROR;
- virtio_net_hdr_mrg_rxbuf_t *hdr;
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr;
u8 *b_data;
u32 desc_data_offset = vui->virtio_net_hdr_sz;
@@ -989,7 +990,7 @@ vhost_user_compute_indirect_desc_len (vhost_user_intf_t * vui,
u32 buffer_data_size, u16 desc_current,
u32 * map_hint)
{
- vring_packed_desc_t *desc_table = txvq->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
u32 desc_len = 0;
u16 desc_data_offset = vui->virtio_net_hdr_sz;
u16 desc_idx = desc_current;
@@ -1015,7 +1016,7 @@ vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
u32 buffer_data_size, u16 * current,
u16 * n_left)
{
- vring_packed_desc_t *desc_table = txvq->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = txvq->packed_desc;
u32 desc_len = 0;
u16 mask = txvq->qsz_mask;
@@ -1038,14 +1039,13 @@ vhost_user_compute_chained_desc_len (vhost_user_intf_t * vui,
}
static_always_inline void
-vhost_user_assemble_packet (vring_packed_desc_t * desc_table,
- u16 * desc_idx, vlib_buffer_t * b_head,
- vlib_buffer_t ** b_current, u32 ** next,
- vlib_buffer_t *** b, u32 * bi_current,
- vhost_cpu_t * cpu, u16 * copy_len,
- u32 * buffers_used, u32 buffers_required,
- u32 * desc_data_offset, u32 buffer_data_size,
- u16 mask)
+vhost_user_assemble_packet (vnet_virtio_vring_packed_desc_t *desc_table,
+ u16 *desc_idx, vlib_buffer_t *b_head,
+ vlib_buffer_t **b_current, u32 **next,
+ vlib_buffer_t ***b, u32 *bi_current,
+ vhost_cpu_t *cpu, u16 *copy_len, u32 *buffers_used,
+ u32 buffers_required, u32 *desc_data_offset,
+ u32 buffer_data_size, u16 mask)
{
u32 desc_data_l;
@@ -1108,7 +1108,7 @@ vhost_user_if_input_packed (vlib_main_t *vm, vhost_user_main_t *vum,
u32 current_config_index = ~0;
u16 mask = txvq->qsz_mask;
u16 desc_current, desc_head, last_used_idx;
- vring_packed_desc_t *desc_table = 0;
+ vnet_virtio_vring_packed_desc_t *desc_table = 0;
u32 n_descs_processed = 0;
u32 rv;
vlib_buffer_t **b;
diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c
index 15e39a11692..3b7bf97c3f8 100644
--- a/src/vnet/devices/virtio/vhost_user_output.c
+++ b/src/vnet/devices/virtio/vhost_user_output.c
@@ -127,7 +127,7 @@ vhost_user_tx_trace (vhost_trace_t * t,
vhost_user_main_t *vum = &vhost_user_main;
u32 last_avail_idx = rxvq->last_avail_idx;
u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
- vring_desc_t *hdr_desc = 0;
+ vnet_virtio_vring_desc_t *hdr_desc = 0;
u32 hint = 0;
clib_memset (t, 0, sizeof (*t));
@@ -202,8 +202,8 @@ vhost_user_tx_copy (vhost_user_intf_t * vui, vhost_copy_t * cpy,
}
static_always_inline void
-vhost_user_handle_tx_offload (vhost_user_intf_t * vui, vlib_buffer_t * b,
- virtio_net_hdr_t * hdr)
+vhost_user_handle_tx_offload (vhost_user_intf_t *vui, vlib_buffer_t *b,
+ vnet_virtio_net_hdr_t *hdr)
{
generic_header_offset_t gho = { 0 };
int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
@@ -282,7 +282,7 @@ vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
vlib_frame_t * frame, u32 n_left)
{
u16 desc_idx, flags;
- vring_packed_desc_t *desc_table = rxvq->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = rxvq->packed_desc;
u16 last_used_idx = rxvq->last_used_idx;
if (PREDICT_FALSE (*n_descs_processed == 0))
@@ -314,7 +314,7 @@ vhost_user_mark_desc_available (vlib_main_t * vm, vhost_user_intf_t * vui,
if (chained)
{
- vring_packed_desc_t *desc_table = rxvq->packed_desc;
+ vnet_virtio_vring_packed_desc_t *desc_table = rxvq->packed_desc;
while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
VRING_DESC_F_NEXT)
@@ -344,7 +344,7 @@ vhost_user_tx_trace_packed (vhost_trace_t * t, vhost_user_intf_t * vui,
vhost_user_main_t *vum = &vhost_user_main;
u32 last_avail_idx = rxvq->last_avail_idx;
u32 desc_current = last_avail_idx & rxvq->qsz_mask;
- vring_packed_desc_t *hdr_desc = 0;
+ vnet_virtio_vring_packed_desc_t *hdr_desc = 0;
u32 hint = 0;
clib_memset (t, 0, sizeof (*t));
@@ -388,7 +388,7 @@ vhost_user_device_class_packed (vlib_main_t *vm, vlib_node_runtime_t *node,
u8 retry = 8;
u16 copy_len;
u16 tx_headers_len;
- vring_packed_desc_t *desc_table;
+ vnet_virtio_vring_packed_desc_t *desc_table;
u32 or_flags;
u16 desc_head, desc_index, desc_len;
u16 n_descs_processed;
@@ -438,7 +438,7 @@ retry:
{
indirect = 1;
if (PREDICT_FALSE (desc_table[desc_head].len <
- sizeof (vring_packed_desc_t)))
+ sizeof (vnet_virtio_vring_packed_desc_t)))
{
error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
goto done;
@@ -461,7 +461,7 @@ retry:
buffer_len = desc_table[desc_index].len;
/* Get a header from the header array */
- virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
tx_headers_len++;
hdr->hdr.flags = 0;
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
@@ -545,7 +545,7 @@ retry:
* MRG is available
* This is the default setting for the guest VM
*/
- virtio_net_hdr_mrg_rxbuf_t *hdr =
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr =
&cpu->tx_headers[tx_headers_len - 1];
desc_table[desc_index].len = desc_len;
@@ -742,7 +742,7 @@ retry:
{
vlib_buffer_t *b0, *current_b0;
u16 desc_head, desc_index, desc_len;
- vring_desc_t *desc_table;
+ vnet_virtio_vring_desc_t *desc_table;
uword buffer_map_addr;
u32 buffer_len;
u16 bytes_left;
@@ -773,8 +773,8 @@ retry:
* I don't know of any driver providing indirect for RX. */
if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
{
- if (PREDICT_FALSE
- (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ if (PREDICT_FALSE (rxvq->desc[desc_head].len <
+ sizeof (vnet_virtio_vring_desc_t)))
{
error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
goto done;
@@ -796,7 +796,8 @@ retry:
{
// Get a header from the header array
- virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr =
+ &cpu->tx_headers[tx_headers_len];
tx_headers_len++;
hdr->hdr.flags = 0;
hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
@@ -835,7 +836,7 @@ retry:
}
else if (vui->virtio_net_hdr_sz == 12) //MRG is available
{
- virtio_net_hdr_mrg_rxbuf_t *hdr =
+ vnet_virtio_net_hdr_mrg_rxbuf_t *hdr =
&cpu->tx_headers[tx_headers_len - 1];
//Move from available to used buffer
@@ -870,8 +871,8 @@ retry:
{
//It is seriously unlikely that a driver will put indirect descriptor
//after non-indirect descriptor.
- if (PREDICT_FALSE
- (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
+ if (PREDICT_FALSE (rxvq->desc[desc_head].len <
+ sizeof (vnet_virtio_vring_desc_t)))
{
error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
goto done;
diff --git a/src/vnet/devices/virtio/virtio.c b/src/vnet/devices/virtio/virtio.c
index bbca81c2d02..ee149350a70 100644
--- a/src/vnet/devices/virtio/virtio.c
+++ b/src/vnet/devices/virtio/virtio.c
@@ -60,7 +60,7 @@ call_read_ready (clib_file_t * uf)
clib_error_t *
virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx, u16 sz)
{
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
int i;
if (!is_pow2 (sz))
@@ -85,19 +85,20 @@ virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx, u16 sz)
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
}
- i = sizeof (vring_desc_t) * sz;
+ i = sizeof (vnet_virtio_vring_desc_t) * sz;
i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
vring->desc = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
clib_memset (vring->desc, 0, i);
- i = sizeof (vring_avail_t) + sz * sizeof (vring->avail->ring[0]);
+ i = sizeof (vnet_virtio_vring_avail_t) + sz * sizeof (vring->avail->ring[0]);
i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
vring->avail = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
clib_memset (vring->avail, 0, i);
// tell kernel that we don't need interrupt
vring->avail->flags = VRING_AVAIL_F_NO_INTERRUPT;
- i = sizeof (vring_used_t) + sz * sizeof (vring_used_elem_t);
+ i = sizeof (vnet_virtio_vring_used_t) +
+ sz * sizeof (vnet_virtio_vring_used_elem_t);
i = round_pow2 (i, CLIB_CACHE_LINE_BYTES);
vring->used = clib_mem_alloc_aligned (i, CLIB_CACHE_LINE_BYTES);
clib_memset (vring->used, 0, i);
@@ -115,20 +116,20 @@ virtio_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 idx, u16 sz)
else
vring->call_fd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
- vring->size = sz;
+ vring->queue_size = sz;
vring->kick_fd = eventfd (0, EFD_NONBLOCK | EFD_CLOEXEC);
virtio_log_debug (vif, "vring %u size %u call_fd %d kick_fd %d", idx,
- vring->size, vring->call_fd, vring->kick_fd);
+ vring->queue_size, vring->call_fd, vring->kick_fd);
return 0;
}
inline void
-virtio_free_buffers (vlib_main_t * vm, virtio_vring_t * vring)
+virtio_free_buffers (vlib_main_t *vm, vnet_virtio_vring_t *vring)
{
u16 used = vring->desc_in_use;
u16 last = vring->last_used_idx;
- u16 mask = vring->size - 1;
+ u16 mask = vring->queue_size - 1;
while (used)
{
@@ -141,7 +142,7 @@ virtio_free_buffers (vlib_main_t * vm, virtio_vring_t * vring)
clib_error_t *
virtio_vring_free_rx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
{
- virtio_vring_t *vring =
+ vnet_virtio_vring_t *vring =
vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (idx));
clib_file_del_by_index (&file_main, vring->call_file_index);
@@ -163,7 +164,7 @@ virtio_vring_free_rx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
clib_error_t *
virtio_vring_free_tx (vlib_main_t * vm, virtio_if_t * vif, u32 idx)
{
- virtio_vring_t *vring =
+ vnet_virtio_vring_t *vring =
vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (idx));
close (vring->kick_fd);
@@ -188,7 +189,7 @@ virtio_set_packet_coalesce (virtio_if_t * vif)
{
vnet_main_t *vnm = vnet_get_main ();
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
vif->packet_coalesce = 1;
vec_foreach (vring, vif->txq_vrings)
{
@@ -203,7 +204,7 @@ virtio_set_packet_buffering (virtio_if_t * vif, u16 buffering_size)
{
vnet_main_t *vnm = vnet_get_main ();
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
clib_error_t *error = 0;
vif->packet_buffering = 1;
@@ -221,7 +222,8 @@ virtio_set_packet_buffering (virtio_if_t * vif, u16 buffering_size)
}
static void
-virtio_vring_fill (vlib_main_t *vm, virtio_if_t *vif, virtio_vring_t *vring)
+virtio_vring_fill (vlib_main_t *vm, virtio_if_t *vif,
+ vnet_virtio_vring_t *vring)
{
if (vif->is_packed)
virtio_refill_vring_packed (vm, vif, vif->type, vring,
@@ -237,7 +239,7 @@ void
virtio_vring_set_rx_queues (vlib_main_t *vm, virtio_if_t *vif)
{
vnet_main_t *vnm = vnet_get_main ();
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
vnet_hw_if_set_input_node (vnm, vif->hw_if_index, virtio_input_node.index);
@@ -287,7 +289,7 @@ void
virtio_vring_set_tx_queues (vlib_main_t *vm, virtio_if_t *vif)
{
vnet_main_t *vnm = vnet_get_main ();
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
vec_foreach (vring, vif->txq_vrings)
{
@@ -316,9 +318,9 @@ virtio_set_net_hdr_size (virtio_if_t * vif)
{
if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_MRG_RXBUF) ||
vif->features & VIRTIO_FEATURE (VIRTIO_F_VERSION_1))
- vif->virtio_net_hdr_sz = sizeof (virtio_net_hdr_v1_t);
+ vif->virtio_net_hdr_sz = sizeof (vnet_virtio_net_hdr_v1_t);
else
- vif->virtio_net_hdr_sz = sizeof (virtio_net_hdr_t);
+ vif->virtio_net_hdr_sz = sizeof (vnet_virtio_net_hdr_t);
}
inline void
@@ -329,7 +331,7 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
virtio_if_t *vif;
vnet_main_t *vnm = &vnet_main;
virtio_main_t *mm = &virtio_main;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
struct feat_struct
{
u8 bit;
@@ -442,10 +444,10 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
{
vring = vec_elt_at_index (vif->rxq_vrings, i);
vlib_cli_output (vm, " Virtqueue (RX) %d", vring->queue_id);
- vlib_cli_output (vm,
- " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
- vring->size, vring->last_used_idx, vring->desc_next,
- vring->desc_in_use);
+ vlib_cli_output (
+ vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
+ vring->queue_size, vring->last_used_idx, vring->desc_next,
+ vring->desc_in_use);
if (vif->is_packed)
{
vlib_cli_output (vm,
@@ -476,11 +478,12 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
" id addr len flags next/id user_addr\n");
vlib_cli_output (vm,
" ===== ================== ===== ====== ======= ==================\n");
- for (j = 0; j < vring->size; j++)
+ for (j = 0; j < vring->queue_size; j++)
{
if (vif->is_packed)
{
- vring_packed_desc_t *desc = &vring->packed_desc[j];
+ vnet_virtio_vring_packed_desc_t *desc =
+ &vring->packed_desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
@@ -489,7 +492,7 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
}
else
{
- vring_desc_t *desc = &vring->desc[j];
+ vnet_virtio_vring_desc_t *desc = &vring->desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
@@ -503,10 +506,10 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
{
vring = vec_elt_at_index (vif->txq_vrings, i);
vlib_cli_output (vm, " Virtqueue (TX) %d", vring->queue_id);
- vlib_cli_output (vm,
- " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
- vring->size, vring->last_used_idx, vring->desc_next,
- vring->desc_in_use);
+ vlib_cli_output (
+ vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
+ vring->queue_size, vring->last_used_idx, vring->desc_next,
+ vring->desc_in_use);
if (vif->is_packed)
{
vlib_cli_output (vm,
@@ -547,11 +550,12 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
" id addr len flags next/id user_addr\n");
vlib_cli_output (vm,
" ===== ================== ===== ====== ======== ==================\n");
- for (j = 0; j < vring->size; j++)
+ for (j = 0; j < vring->queue_size; j++)
{
if (vif->is_packed)
{
- vring_packed_desc_t *desc = &vring->packed_desc[j];
+ vnet_virtio_vring_packed_desc_t *desc =
+ &vring->packed_desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
@@ -560,7 +564,7 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
}
else
{
- vring_desc_t *desc = &vring->desc[j];
+ vnet_virtio_vring_desc_t *desc = &vring->desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
@@ -575,10 +579,10 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
{
vring = vif->cxq_vring;
vlib_cli_output (vm, " Virtqueue (CTRL) %d", vring->queue_id);
- vlib_cli_output (vm,
- " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
- vring->size, vring->last_used_idx,
- vring->desc_next, vring->desc_in_use);
+ vlib_cli_output (
+ vm, " qsz %d, last_used_idx %d, desc_next %d, desc_in_use %d",
+ vring->queue_size, vring->last_used_idx, vring->desc_next,
+ vring->desc_in_use);
if (vif->is_packed)
{
vlib_cli_output (vm,
@@ -606,11 +610,12 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
" id addr len flags next/id user_addr\n");
vlib_cli_output (vm,
" ===== ================== ===== ====== ======== ==================\n");
- for (j = 0; j < vring->size; j++)
+ for (j = 0; j < vring->queue_size; j++)
{
if (vif->is_packed)
{
- vring_packed_desc_t *desc = &vring->packed_desc[j];
+ vnet_virtio_vring_packed_desc_t *desc =
+ &vring->packed_desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
@@ -619,7 +624,7 @@ virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
}
else
{
- vring_desc_t *desc = &vring->desc[j];
+ vnet_virtio_vring_desc_t *desc = &vring->desc[j];
vlib_cli_output (vm,
" %-5d 0x%016lx %-5d 0x%04x %-8d 0x%016lx\n",
j, desc->addr,
diff --git a/src/vnet/devices/virtio/virtio.h b/src/vnet/devices/virtio/virtio.h
index 3784a1bf2e9..e12198c3bf8 100644
--- a/src/vnet/devices/virtio/virtio.h
+++ b/src/vnet/devices/virtio/virtio.h
@@ -69,19 +69,19 @@ typedef struct
{
struct
{
- vring_desc_t *desc;
- vring_used_t *used;
- vring_avail_t *avail;
+ vnet_virtio_vring_desc_t *desc;
+ vnet_virtio_vring_used_t *used;
+ vnet_virtio_vring_avail_t *avail;
};
struct
{
- vring_packed_desc_t *packed_desc;
- vring_desc_event_t *driver_event;
- vring_desc_event_t *device_event;
+ vnet_virtio_vring_packed_desc_t *packed_desc;
+ vnet_virtio_vring_desc_event_t *driver_event;
+ vnet_virtio_vring_desc_event_t *device_event;
};
};
u32 *buffers;
- u16 size;
+ u16 queue_size;
u16 queue_id;
u32 queue_index;
u16 desc_in_use;
@@ -110,7 +110,7 @@ typedef struct
vnet_hw_if_rx_mode mode;
virtio_vring_buffering_t *buffering;
gro_flow_table_t *flow_table;
-} virtio_vring_t;
+} vnet_virtio_vring_t;
typedef union
{
@@ -135,8 +135,8 @@ typedef struct
u32 per_interface_next_index;
u16 num_rxqs;
u16 num_txqs;
- virtio_vring_t *rxq_vrings;
- virtio_vring_t *txq_vrings;
+ vnet_virtio_vring_t *rxq_vrings;
+ vnet_virtio_vring_t *txq_vrings;
int gso_enabled;
int csum_offload_enabled;
union
@@ -194,7 +194,7 @@ typedef struct
struct /* native virtio */
{
void *bar;
- virtio_vring_t *cxq_vring;
+ vnet_virtio_vring_t *cxq_vring;
pci_addr_t pci_addr;
u32 bar_id;
u32 notify_off_multiplier;
@@ -235,7 +235,7 @@ clib_error_t *virtio_vring_free_tx (vlib_main_t * vm, virtio_if_t * vif,
u32 idx);
void virtio_vring_set_rx_queues (vlib_main_t *vm, virtio_if_t *vif);
void virtio_vring_set_tx_queues (vlib_main_t *vm, virtio_if_t *vif);
-extern void virtio_free_buffers (vlib_main_t * vm, virtio_vring_t * vring);
+extern void virtio_free_buffers (vlib_main_t *vm, vnet_virtio_vring_t *vring);
extern void virtio_set_net_hdr_size (virtio_if_t * vif);
extern void virtio_show (vlib_main_t *vm, u32 *hw_if_indices, u8 show_descr,
virtio_if_type_t type);
@@ -254,7 +254,7 @@ format_function_t format_virtio_device_name;
format_function_t format_virtio_log_name;
static_always_inline void
-virtio_kick (vlib_main_t * vm, virtio_vring_t * vring, virtio_if_t * vif)
+virtio_kick (vlib_main_t *vm, vnet_virtio_vring_t *vring, virtio_if_t *vif)
{
if (vif->type == VIRTIO_IF_TYPE_PCI)
{
@@ -276,7 +276,7 @@ virtio_kick (vlib_main_t * vm, virtio_vring_t * vring, virtio_if_t * vif)
}
static_always_inline u8
-virtio_txq_is_scheduled (virtio_vring_t *vring)
+virtio_txq_is_scheduled (vnet_virtio_vring_t *vring)
{
if (vring)
return (vring->flags & VRING_TX_SCHEDULED);
@@ -284,19 +284,47 @@ virtio_txq_is_scheduled (virtio_vring_t *vring)
}
static_always_inline void
-virtio_txq_set_scheduled (virtio_vring_t *vring)
+virtio_txq_set_scheduled (vnet_virtio_vring_t *vring)
{
if (vring)
vring->flags |= VRING_TX_SCHEDULED;
}
static_always_inline void
-virtio_txq_clear_scheduled (virtio_vring_t *vring)
+virtio_txq_clear_scheduled (vnet_virtio_vring_t *vring)
{
if (vring)
vring->flags &= ~VRING_TX_SCHEDULED;
}
+static_always_inline void
+vnet_virtio_vring_init (vnet_virtio_vring_t *vring, u16 queue_size, void *p,
+ u32 align)
+{
+ vring->queue_size = queue_size;
+ vring->desc = p;
+ vring->avail =
+ (vnet_virtio_vring_avail_t *) ((char *) p +
+ queue_size *
+ sizeof (vnet_virtio_vring_desc_t));
+ vring->used =
+ (vnet_virtio_vring_used_t
+ *) ((char *) p + ((sizeof (vnet_virtio_vring_desc_t) * queue_size +
+ sizeof (u16) * (3 + queue_size) + align - 1) &
+ ~(align - 1)));
+ vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
+}
+
+static_always_inline u16
+vnet_virtio_vring_size (u16 queue_size, u32 align)
+{
+ return ((sizeof (vnet_virtio_vring_desc_t) * queue_size +
+ sizeof (u16) * (3 + queue_size) + align - 1) &
+ ~(align - 1)) +
+ sizeof (u16) * 3 +
+ sizeof (vnet_virtio_vring_used_elem_t) * queue_size;
+}
+
#define virtio_log_debug(vif, f, ...) \
{ \
vlib_log(VLIB_LOG_LEVEL_DEBUG, virtio_main.log_default, \
diff --git a/src/vnet/devices/virtio/virtio_api.c b/src/vnet/devices/virtio/virtio_api.c
index 11514c75c59..3197a2fab6d 100644
--- a/src/vnet/devices/virtio/virtio_api.c
+++ b/src/vnet/devices/virtio/virtio_api.c
@@ -193,10 +193,10 @@ virtio_pci_send_sw_interface_details (vpe_api_main_t * am,
pci_address_encode ((vlib_pci_addr_t *) & vif->pci_addr.as_u32,
&mp->pci_addr);
mp->sw_if_index = htonl (vif->sw_if_index);
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, 0);
- mp->rx_ring_sz = htons (vring->size);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, 0);
+ mp->rx_ring_sz = htons (vring->queue_size);
vring = vec_elt_at_index (vif->txq_vrings, 0);
- mp->tx_ring_sz = htons (vring->size);
+ mp->tx_ring_sz = htons (vring->queue_size);
clib_memcpy (mp->mac_addr, vif->mac_addr, 6);
mp->features = clib_host_to_net_u64 (vif->features);
diff --git a/src/vnet/devices/virtio/virtio_inline.h b/src/vnet/devices/virtio/virtio_inline.h
index 209817d48c7..179f319aa4c 100644
--- a/src/vnet/devices/virtio/virtio_inline.h
+++ b/src/vnet/devices/virtio/virtio_inline.h
@@ -29,11 +29,11 @@ typedef enum
static_always_inline void
virtio_refill_vring_split (vlib_main_t *vm, virtio_if_t *vif,
- virtio_if_type_t type, virtio_vring_t *vring,
+ virtio_if_type_t type, vnet_virtio_vring_t *vring,
const int hdr_sz, u32 node_index)
{
u16 used, next, avail, n_slots, n_refill;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 mask = sz - 1;
more:
@@ -47,8 +47,9 @@ more:
next = vring->desc_next;
avail = vring->avail->idx;
- n_slots = vlib_buffer_alloc_to_ring_from_pool (
- vm, vring->buffers, next, vring->size, n_refill, vring->buffer_pool_index);
+ n_slots = vlib_buffer_alloc_to_ring_from_pool (vm, vring->buffers, next,
+ vring->queue_size, n_refill,
+ vring->buffer_pool_index);
if (PREDICT_FALSE (n_slots != n_refill))
{
@@ -60,7 +61,7 @@ more:
while (n_slots)
{
- vring_desc_t *d = &vring->desc[next];
+ vnet_virtio_vring_desc_t *d = &vring->desc[next];
;
vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
/*
@@ -94,11 +95,11 @@ more:
static_always_inline void
virtio_refill_vring_packed (vlib_main_t *vm, virtio_if_t *vif,
- virtio_if_type_t type, virtio_vring_t *vring,
+ virtio_if_type_t type, vnet_virtio_vring_t *vring,
const int hdr_sz, u32 node_index)
{
u16 used, next, n_slots, n_refill, flags = 0, first_desc_flags;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
more:
used = vring->desc_in_use;
@@ -124,7 +125,7 @@ more:
while (n_slots)
{
- vring_packed_desc_t *d = &vring->packed_desc[next];
+ vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[next];
vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
/*
* current_data may not be initialized with 0 and may contain
diff --git a/src/vnet/devices/virtio/virtio_pci_legacy.c b/src/vnet/devices/virtio/virtio_pci_legacy.c
index 1426a7035a2..d7a1c982413 100644
--- a/src/vnet/devices/virtio/virtio_pci_legacy.c
+++ b/src/vnet/devices/virtio/virtio_pci_legacy.c
@@ -176,10 +176,11 @@ virtio_pci_legacy_set_queue_num (vlib_main_t * vm, virtio_if_t * vif,
}
static u8
-virtio_pci_legacy_setup_queue (vlib_main_t * vm, virtio_if_t * vif,
- u16 queue_id, void *p)
+virtio_pci_legacy_setup_queue (vlib_main_t *vm, virtio_if_t *vif, u16 queue_id,
+ vnet_virtio_vring_t *vring)
{
- u64 addr = vlib_physmem_get_pa (vm, p) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+ u64 addr =
+ vlib_physmem_get_pa (vm, vring->desc) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
u32 addr2 = 0, a = (u32) addr;
vlib_pci_write_io_u16 (vm, vif->pci_dev_handle, VIRTIO_PCI_QUEUE_SEL,
&queue_id);
diff --git a/src/vnet/devices/virtio/virtio_pci_modern.c b/src/vnet/devices/virtio/virtio_pci_modern.c
index 8e090ffed3a..f7313d84bbd 100644
--- a/src/vnet/devices/virtio/virtio_pci_modern.c
+++ b/src/vnet/devices/virtio/virtio_pci_modern.c
@@ -265,32 +265,24 @@ virtio_pci_modern_set_queue_device (virtio_if_t * vif, u64 queue_device)
}
static u8
-virtio_pci_modern_setup_queue (vlib_main_t * vm, virtio_if_t * vif,
- u16 queue_id, void *p)
+virtio_pci_modern_setup_queue (vlib_main_t *vm, virtio_if_t *vif, u16 queue_id,
+ vnet_virtio_vring_t *vring)
{
u64 desc, avail, used;
- u16 queue_size = 0;
virtio_pci_modern_set_queue_select (vif, queue_id);
- queue_size = virtio_pci_modern_get_queue_size (vm, vif, queue_id);
if (vif->is_packed)
{
- virtio_vring_t *vring = (virtio_vring_t *) p;
-
desc = vlib_physmem_get_pa (vm, vring->packed_desc);
avail = vlib_physmem_get_pa (vm, vring->driver_event);
used = vlib_physmem_get_pa (vm, vring->device_event);
}
else
{
- vring_t vr;
-
- vring_init (&vr, queue_size, p, VIRTIO_PCI_VRING_ALIGN);
-
- desc = vlib_physmem_get_pa (vm, vr.desc);
- avail = vlib_physmem_get_pa (vm, vr.avail);
- used = vlib_physmem_get_pa (vm, vr.used);
+ desc = vlib_physmem_get_pa (vm, vring->desc);
+ avail = vlib_physmem_get_pa (vm, vring->avail);
+ used = vlib_physmem_get_pa (vm, vring->used);
}
virtio_pci_modern_set_queue_desc (vif, desc);
diff --git a/src/vnet/devices/virtio/virtio_pre_input.c b/src/vnet/devices/virtio/virtio_pre_input.c
index ee6e848310d..eb208fd3a39 100644
--- a/src/vnet/devices/virtio/virtio_pre_input.c
+++ b/src/vnet/devices/virtio/virtio_pre_input.c
@@ -22,7 +22,7 @@
#include <vnet/devices/virtio/virtio_inline.h>
static_always_inline uword
-virtio_pre_input_inline (vlib_main_t *vm, virtio_vring_t *txq_vring,
+virtio_pre_input_inline (vlib_main_t *vm, vnet_virtio_vring_t *txq_vring,
vnet_hw_if_tx_queue_t *txq, u8 packet_coalesce,
u8 packet_buffering)
{
@@ -66,7 +66,7 @@ virtio_pre_input (vlib_main_t *vm, vlib_node_runtime_t *node,
{
if (vif->packet_coalesce || vif->packet_buffering)
{
- virtio_vring_t *txq_vring;
+ vnet_virtio_vring_t *txq_vring;
vec_foreach (txq_vring, vif->txq_vrings)
{
vnet_hw_if_tx_queue_t *txq =
diff --git a/src/vnet/devices/virtio/virtio_process.c b/src/vnet/devices/virtio/virtio_process.c
index f347ef2ab57..18b34e0aa62 100644
--- a/src/vnet/devices/virtio/virtio_process.c
+++ b/src/vnet/devices/virtio/virtio_process.c
@@ -50,7 +50,7 @@ virtio_send_interrupt_process (vlib_main_t * vm,
{
if (vif->packet_coalesce || vif->packet_buffering)
{
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
vec_foreach (vring, vif->rxq_vrings)
{
if (vring->mode == VNET_HW_IF_RX_MODE_INTERRUPT ||
diff --git a/src/vnet/devices/virtio/virtio_std.h b/src/vnet/devices/virtio/virtio_std.h
index 619dd66d5ed..86984339bc2 100644
--- a/src/vnet/devices/virtio/virtio_std.h
+++ b/src/vnet/devices/virtio/virtio_std.h
@@ -64,7 +64,7 @@ typedef enum
#define _(f,n) f = n,
foreach_virtio_net_features
#undef _
-} virtio_net_feature_t;
+} vnet_virtio_net_feature_t;
#define VIRTIO_FEATURE(X) (1ULL << X)
@@ -87,7 +87,7 @@ typedef enum
#define _(f,n) f = n,
foreach_virtio_event_idx_flags
#undef _
-} virtio_event_idx_flags_t;
+} vnet_virtio_event_idx_flags_t;
#define VRING_USED_F_NO_NOTIFY 1
#define VRING_AVAIL_F_NO_INTERRUPT 1
@@ -98,7 +98,7 @@ typedef struct
u32 len;
u16 flags;
u16 next;
-} vring_desc_t;
+} vnet_virtio_vring_desc_t;
typedef struct
{
@@ -106,38 +106,36 @@ typedef struct
u16 idx;
u16 ring[0];
/* u16 used_event; */
-} vring_avail_t;
+} vnet_virtio_vring_avail_t;
typedef struct
{
u32 id;
u32 len;
-} vring_used_elem_t;
+} vnet_virtio_vring_used_elem_t;
typedef struct
{
u16 flags;
u16 idx;
- vring_used_elem_t ring[0];
+ vnet_virtio_vring_used_elem_t ring[0];
/* u16 avail_event; */
-} vring_used_t;
+} vnet_virtio_vring_used_t;
/* *INDENT-OFF* */
-typedef CLIB_PACKED (struct
-{
- u64 addr; // packet data buffer address
- u32 len; // packet data buffer size
- u16 id; // buffer id
- u16 flags; // flags
-}) vring_packed_desc_t;
+typedef CLIB_PACKED (struct {
+ u64 addr; // packet data buffer address
+ u32 len; // packet data buffer size
+ u16 id; // buffer id
+ u16 flags; // flags
+}) vnet_virtio_vring_packed_desc_t;
-STATIC_ASSERT_SIZEOF (vring_packed_desc_t, 16);
+STATIC_ASSERT_SIZEOF (vnet_virtio_vring_packed_desc_t, 16);
-typedef CLIB_PACKED (struct
-{
+typedef CLIB_PACKED (struct {
u16 off_wrap;
u16 flags;
-}) vring_desc_event_t;
+}) vnet_virtio_vring_desc_event_t;
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */
#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */
@@ -148,8 +146,7 @@ typedef CLIB_PACKED (struct
#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */
#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */
-typedef CLIB_PACKED (struct
-{
+typedef CLIB_PACKED (struct {
u8 flags;
u8 gso_type;
u16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */
@@ -157,54 +154,23 @@ typedef CLIB_PACKED (struct
u16 csum_start; /* Position to start checksumming from */
u16 csum_offset; /* Offset after that to place checksum */
u16 num_buffers; /* Number of merged rx buffers */
-}) virtio_net_hdr_v1_t;
+}) vnet_virtio_net_hdr_v1_t;
-typedef CLIB_PACKED (struct
-{
+typedef CLIB_PACKED (struct {
u8 flags;
u8 gso_type;
u16 hdr_len;
u16 gso_size;
u16 csum_start;
u16 csum_offset;
-}) virtio_net_hdr_t;
+}) vnet_virtio_net_hdr_t;
-typedef CLIB_PACKED (struct
-{
- virtio_net_hdr_t hdr;
+typedef CLIB_PACKED (struct {
+ vnet_virtio_net_hdr_t hdr;
u16 num_buffers;
-}) virtio_net_hdr_mrg_rxbuf_t;
+}) vnet_virtio_net_hdr_mrg_rxbuf_t;
/* *INDENT-ON* */
-
-typedef struct
-{
- u16 num;
- vring_desc_t *desc;
- vring_avail_t *avail;
- vring_used_t *used;
-} vring_t;
-
-static_always_inline void
-vring_init (vring_t * vr, u32 num, void *p, u32 align)
-{
- vr->num = num;
- vr->desc = p;
- vr->avail = (vring_avail_t *) ((char *) p + num * sizeof (vring_desc_t));
- vr->used =
- (vring_used_t *) ((char *) p +
- ((sizeof (vring_desc_t) * num +
- sizeof (u16) * (3 + num) + align - 1) & ~(align -
- 1)));
-}
-
-static_always_inline u16
-vring_size (u32 num, u32 align)
-{
- return ((sizeof (vring_desc_t) * num + sizeof (u16) * (3 + num)
- + align - 1) & ~(align - 1))
- + sizeof (u16) * 3 + sizeof (vring_used_elem_t) * num;
-}
#endif
/*