aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/devices/virtio/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet/devices/virtio/pci.c')
-rw-r--r--src/vnet/devices/virtio/pci.c281
1 files changed, 145 insertions, 136 deletions
diff --git a/src/vnet/devices/virtio/pci.c b/src/vnet/devices/virtio/pci.c
index 7ef4b2a52dd..6234f64fcfb 100644
--- a/src/vnet/devices/virtio/pci.c
+++ b/src/vnet/devices/virtio/pci.c
@@ -116,7 +116,7 @@ virtio_pci_irq_queue_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
line--;
u16 qid = line;
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
vnet_hw_if_rx_queue_set_int_pending (vnm, vring->queue_index);
}
@@ -131,13 +131,11 @@ virtio_pci_irq_config_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
if (virtio_pci_is_link_up (vm, vif) & VIRTIO_NET_S_LINK_UP)
{
- vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
vnet_hw_interface_set_flags (vnm, vif->hw_if_index,
VNET_HW_INTERFACE_FLAG_LINK_UP);
}
else
{
- vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
vnet_hw_interface_set_flags (vnm, vif->hw_if_index, 0);
}
}
@@ -200,18 +198,18 @@ static int
virtio_pci_send_ctrl_msg_packed (vlib_main_t * vm, virtio_if_t * vif,
virtio_ctrl_msg_t * data, u32 len)
{
- virtio_vring_t *vring = vif->cxq_vring;
+ vnet_virtio_vring_t *vring = vif->cxq_vring;
virtio_net_ctrl_ack_t status = VIRTIO_NET_ERR;
virtio_ctrl_msg_t result;
u32 buffer_index;
vlib_buffer_t *b;
u16 used, next;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 flags = 0, first_desc_flags = 0;
used = vring->desc_in_use;
next = vring->desc_next;
- vring_packed_desc_t *d = &vring->packed_desc[next];
+ vnet_virtio_vring_packed_desc_t *d = &vring->packed_desc[next];
if (vlib_buffer_alloc (vm, &buffer_index, 1))
b = vlib_get_buffer (vm, buffer_index);
@@ -319,9 +317,9 @@ virtio_pci_send_ctrl_msg_packed (vlib_main_t * vm, virtio_if_t * vif,
|| (flags & VRING_DESC_F_USED) != (vring->used_wrap_counter << 15));
last += 3;
- if (last >= vring->size)
+ if (last >= vring->queue_size)
{
- last = last - vring->size;
+ last = last - vring->queue_size;
vring->used_wrap_counter ^= 1;
}
vring->desc_in_use -= 3;
@@ -340,19 +338,19 @@ static int
virtio_pci_send_ctrl_msg_split (vlib_main_t * vm, virtio_if_t * vif,
virtio_ctrl_msg_t * data, u32 len)
{
- virtio_vring_t *vring = vif->cxq_vring;
+ vnet_virtio_vring_t *vring = vif->cxq_vring;
virtio_net_ctrl_ack_t status = VIRTIO_NET_ERR;
virtio_ctrl_msg_t result;
u32 buffer_index;
vlib_buffer_t *b;
u16 used, next, avail;
- u16 sz = vring->size;
+ u16 sz = vring->queue_size;
u16 mask = sz - 1;
used = vring->desc_in_use;
next = vring->desc_next;
avail = vring->avail->idx;
- vring_desc_t *d = &vring->desc[next];
+ vnet_virtio_vring_desc_t *d = &vring->desc[next];
if (vlib_buffer_alloc (vm, &buffer_index, 1))
b = vlib_get_buffer (vm, buffer_index);
@@ -405,7 +403,7 @@ virtio_pci_send_ctrl_msg_split (vlib_main_t * vm, virtio_if_t * vif,
while (n_left)
{
- vring_used_elem_t *e = &vring->used->ring[last & mask];
+ vnet_virtio_vring_used_elem_t *e = &vring->used->ring[last & mask];
u16 slot = e->id;
d = &vring->desc[slot];
@@ -508,7 +506,7 @@ virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
int csum_offload_enabled)
{
vnet_main_t *vnm = vnet_get_main ();
- vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
+ vnet_hw_if_caps_change_t cc = {};
if ((vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ)) &&
(vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)))
@@ -524,10 +522,10 @@ virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
else
{
vif->gso_enabled = 1;
- vif->csum_offload_enabled = 0;
- hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO |
- VNET_HW_INTERFACE_CAP_SUPPORTS_TX_TCP_CKSUM |
- VNET_HW_INTERFACE_CAP_SUPPORTS_TX_UDP_CKSUM;
+ vif->csum_offload_enabled = 1;
+ cc.val = cc.mask = VNET_HW_IF_CAP_TCP_GSO |
+ VNET_HW_IF_CAP_TX_TCP_CKSUM |
+ VNET_HW_IF_CAP_TX_UDP_CKSUM;
}
}
else if (csum_offload_enabled
@@ -541,9 +539,10 @@ virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
{
vif->csum_offload_enabled = 1;
vif->gso_enabled = 0;
- hw->caps &= ~VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO;
- hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_TX_TCP_CKSUM |
- VNET_HW_INTERFACE_CAP_SUPPORTS_TX_UDP_CKSUM;
+ cc.val =
+ VNET_HW_IF_CAP_TX_TCP_CKSUM | VNET_HW_IF_CAP_TX_UDP_CKSUM;
+ cc.mask = VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_TX_TCP_CKSUM |
+ VNET_HW_IF_CAP_TX_UDP_CKSUM;
}
}
else
@@ -556,12 +555,15 @@ virtio_pci_offloads (vlib_main_t * vm, virtio_if_t * vif, int gso_enabled,
{
vif->csum_offload_enabled = 0;
vif->gso_enabled = 0;
- hw->caps &= ~(VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM |
- VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO);
+ cc.val = 0;
+ cc.mask = VNET_HW_IF_CAP_L4_TX_CKSUM | VNET_HW_IF_CAP_TCP_GSO;
}
}
}
+ if (cc.mask)
+ vnet_hw_if_change_caps (vnm, vif->hw_if_index, &cc);
+
return 0;
}
@@ -598,7 +600,7 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -613,34 +615,36 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vec_validate_aligned (vif->cxq_vring, 0, CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->cxq_vring, 0);
- i =
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1)) + sizeof (vring_desc_event_t);
+ i = (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) + VNET_VIRTIO_PCI_VRING_ALIGN -
+ 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1)) +
+ sizeof (vnet_virtio_vring_desc_event_t);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
vring->packed_desc = ptr;
- vring->driver_event = ptr + (queue_size * sizeof (vring_packed_desc_t));
+ vring->driver_event =
+ ptr + (queue_size * sizeof (vnet_virtio_vring_packed_desc_t));
vring->driver_event->off_wrap = 0;
vring->driver_event->flags = VRING_EVENT_F_DISABLE;
vring->device_event =
- ptr +
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1));
+ ptr + (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) +
+ VNET_VIRTIO_PCI_VRING_ALIGN - 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1));
vring->device_event->off_wrap = 0;
vring->device_event->flags = 0;
+ vring->total_packets = 0;
vring->queue_id = queue_num;
- vring->size = queue_size;
+ vring->queue_size = queue_size;
vring->avail_wrap_counter = 1;
vring->used_wrap_counter = 1;
@@ -648,7 +652,7 @@ virtio_pci_control_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "control-queue: number %u, size %u", queue_num,
queue_size);
- vif->virtio_pci_func->setup_queue (vm, vif, queue_num, (void *) vring);
+ vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring);
vring->queue_notify_offset =
vif->notify_off_multiplier *
vif->virtio_pci_func->get_queue_notify_off (vm, vif, queue_num);
@@ -663,8 +667,7 @@ virtio_pci_control_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
{
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
- vring_t vr;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -683,27 +686,21 @@ virtio_pci_control_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
vec_validate_aligned (vif->cxq_vring, 0, CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->cxq_vring, 0);
- i = vring_size (queue_size, VIRTIO_PCI_VRING_ALIGN);
- i = round_pow2 (i, VIRTIO_PCI_VRING_ALIGN);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ i = vnet_virtio_vring_size (queue_size, VNET_VIRTIO_PCI_VRING_ALIGN);
+ i = round_pow2 (i, VNET_VIRTIO_PCI_VRING_ALIGN);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
- vring_init (&vr, queue_size, ptr, VIRTIO_PCI_VRING_ALIGN);
- vring->desc = vr.desc;
- vring->avail = vr.avail;
- vring->used = vr.used;
+ vnet_virtio_vring_init (vring, queue_size, ptr, VNET_VIRTIO_PCI_VRING_ALIGN);
vring->queue_id = queue_num;
- vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
+ vring->total_packets = 0;
ASSERT (vring->buffers == 0);
-
- vring->size = queue_size;
virtio_log_debug (vif, "control-queue: number %u, size %u", queue_num,
queue_size);
- vif->virtio_pci_func->setup_queue (vm, vif, queue_num, ptr);
+ vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring);
vring->queue_notify_offset =
vif->notify_off_multiplier *
vif->virtio_pci_func->get_queue_notify_off (vm, vif, queue_num);
@@ -724,14 +721,12 @@ virtio_pci_control_vring_init (vlib_main_t * vm, virtio_if_t * vif,
}
clib_error_t *
-virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
- u16 queue_num)
+virtio_pci_vring_split_init (vlib_main_t *vm, virtio_if_t *vif, u16 queue_num,
+ u16 txq_size)
{
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
- vring_t vr;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -750,11 +745,20 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
if (queue_num % 2)
{
+ if (txq_size)
+ {
+ virtio_log_debug (vif, "tx-queue: number %u, default-size %u",
+ queue_num, queue_size);
+ vif->virtio_pci_func->set_queue_size (vm, vif, queue_num, txq_size);
+ queue_size =
+ vif->virtio_pci_func->get_queue_size (vm, vif, queue_num);
+ virtio_log_debug (vif, "tx-queue: number %u, new size %u", queue_num,
+ queue_size);
+ }
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num));
- if (vif->max_queue_pairs < vtm->n_vlib_mains)
- clib_spinlock_init (&vring->lockp);
+ clib_spinlock_init (&vring->lockp);
}
else
{
@@ -762,21 +766,18 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (queue_num));
}
- i = vring_size (queue_size, VIRTIO_PCI_VRING_ALIGN);
- i = round_pow2 (i, VIRTIO_PCI_VRING_ALIGN);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ i = vnet_virtio_vring_size (queue_size, VNET_VIRTIO_PCI_VRING_ALIGN);
+ i = round_pow2 (i, VNET_VIRTIO_PCI_VRING_ALIGN);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
- vring_init (&vr, queue_size, ptr, VIRTIO_PCI_VRING_ALIGN);
- vring->desc = vr.desc;
- vring->avail = vr.avail;
- vring->used = vr.used;
+ vnet_virtio_vring_init (vring, queue_size, ptr, VNET_VIRTIO_PCI_VRING_ALIGN);
vring->queue_id = queue_num;
vring->avail->flags = VIRTIO_RING_FLAG_MASK_INT;
vring->flow_table = 0;
+ vring->total_packets = 0;
ASSERT (vring->buffers == 0);
vec_validate_aligned (vring->buffers, queue_size, CLIB_CACHE_LINE_BYTES);
@@ -791,8 +792,8 @@ virtio_pci_vring_split_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "rx-queue: number %u, size %u", queue_num,
queue_size);
}
- vring->size = queue_size;
- if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, ptr))
+ vring->queue_size = queue_size;
+ if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring))
return clib_error_return (0, "error in queue address setup");
vring->queue_notify_offset =
@@ -807,10 +808,9 @@ clib_error_t *
virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
u16 queue_num)
{
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
clib_error_t *error = 0;
u16 queue_size = 0;
- virtio_vring_t *vring;
+ vnet_virtio_vring_t *vring;
u32 i = 0;
void *ptr = NULL;
@@ -827,8 +827,7 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vec_validate_aligned (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num),
CLIB_CACHE_LINE_BYTES);
vring = vec_elt_at_index (vif->txq_vrings, TX_QUEUE_ACCESS (queue_num));
- if (vif->max_queue_pairs < vtm->n_vlib_mains)
- clib_spinlock_init (&vring->lockp);
+ clib_spinlock_init (&vring->lockp);
}
else
{
@@ -837,29 +836,30 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vring = vec_elt_at_index (vif->rxq_vrings, RX_QUEUE_ACCESS (queue_num));
}
- i =
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1)) + sizeof (vring_desc_event_t);
+ i = (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) + VNET_VIRTIO_PCI_VRING_ALIGN -
+ 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1)) +
+ sizeof (vnet_virtio_vring_desc_event_t);
- ptr =
- vlib_physmem_alloc_aligned_on_numa (vm, i, VIRTIO_PCI_VRING_ALIGN,
- vif->numa_node);
+ ptr = vlib_physmem_alloc_aligned_on_numa (vm, i, VNET_VIRTIO_PCI_VRING_ALIGN,
+ vif->numa_node);
if (!ptr)
return vlib_physmem_last_error (vm);
clib_memset (ptr, 0, i);
vring->packed_desc = ptr;
- vring->driver_event = ptr + (queue_size * sizeof (vring_packed_desc_t));
+ vring->driver_event =
+ ptr + (queue_size * sizeof (vnet_virtio_vring_packed_desc_t));
vring->driver_event->off_wrap = 0;
vring->driver_event->flags = VRING_EVENT_F_DISABLE;
vring->device_event =
- ptr +
- (((queue_size * sizeof (vring_packed_desc_t)) +
- sizeof (vring_desc_event_t) + VIRTIO_PCI_VRING_ALIGN -
- 1) & ~(VIRTIO_PCI_VRING_ALIGN - 1));
+ ptr + (((queue_size * sizeof (vnet_virtio_vring_packed_desc_t)) +
+ sizeof (vnet_virtio_vring_desc_event_t) +
+ VNET_VIRTIO_PCI_VRING_ALIGN - 1) &
+ ~(VNET_VIRTIO_PCI_VRING_ALIGN - 1));
vring->device_event->off_wrap = 0;
vring->device_event->flags = 0;
@@ -867,6 +867,7 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
vring->avail_wrap_counter = 1;
vring->used_wrap_counter = 1;
+ vring->total_packets = 0;
ASSERT (vring->buffers == 0);
vec_validate_aligned (vring->buffers, queue_size, CLIB_CACHE_LINE_BYTES);
@@ -881,8 +882,8 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_log_debug (vif, "rx-queue: number %u, size %u", queue_num,
queue_size);
}
- vring->size = queue_size;
- if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, (void *) vring))
+ vring->queue_size = queue_size;
+ if (vif->virtio_pci_func->setup_queue (vm, vif, queue_num, vring))
return clib_error_return (0, "error in queue address setup");
vring->queue_notify_offset =
@@ -895,12 +896,13 @@ virtio_pci_vring_packed_init (vlib_main_t * vm, virtio_if_t * vif,
}
clib_error_t *
-virtio_pci_vring_init (vlib_main_t * vm, virtio_if_t * vif, u16 queue_num)
+virtio_pci_vring_init (vlib_main_t *vm, virtio_if_t *vif, u16 queue_num,
+ u16 txq_size)
{
if (vif->is_packed)
return virtio_pci_vring_packed_init (vm, vif, queue_num);
else
- return virtio_pci_vring_split_init (vm, vif, queue_num);
+ return virtio_pci_vring_split_init (vm, vif, queue_num, txq_size);
}
static void
@@ -1147,7 +1149,6 @@ virtio_pci_device_init (vlib_main_t * vm, virtio_if_t * vif,
virtio_pci_create_if_args_t * args, void **bar)
{
clib_error_t *error = 0;
- vlib_thread_main_t *vtm = vlib_get_thread_main ();
u8 status = 0;
if ((error = virtio_pci_read_caps (vm, vif, bar)))
@@ -1239,7 +1240,7 @@ virtio_pci_device_init (vlib_main_t * vm, virtio_if_t * vif,
for (int i = 0; i < vif->max_queue_pairs; i++)
{
- if ((error = virtio_pci_vring_init (vm, vif, RX_QUEUE (i))))
+ if ((error = virtio_pci_vring_init (vm, vif, RX_QUEUE (i), 0)))
{
args->rv = VNET_API_ERROR_INIT_FAILED;
virtio_log_error (vif, "%s (%u) %s", "error in rxq-queue",
@@ -1254,23 +1255,8 @@ virtio_pci_device_init (vlib_main_t * vm, virtio_if_t * vif,
vif->num_rxqs++;
}
- if (i >= vtm->n_vlib_mains)
- {
- /*
- * There is 1:1 mapping between tx queue and vpp worker thread.
- * tx queue 0 is bind with thread index 0, tx queue 1 on thread
- * index 1 and so on.
- * Multiple worker threads can poll same tx queue when number of
- * workers are more than tx queues. In this case, 1:N mapping
- * between tx queue and vpp worker thread.
- */
- virtio_log_debug (vif, "%s %u, %s", "tx-queue: number",
- TX_QUEUE (i),
- "no VPP worker thread is available");
- continue;
- }
-
- if ((error = virtio_pci_vring_init (vm, vif, TX_QUEUE (i))))
+ if ((error = virtio_pci_vring_init (vm, vif, TX_QUEUE (i),
+ args->tx_queue_size)))
{
args->rv = VNET_API_ERROR_INIT_FAILED;
virtio_log_error (vif, "%s (%u) %s", "error in txq-queue",
@@ -1354,7 +1340,6 @@ virtio_pci_create_if (vlib_main_t * vm, virtio_pci_create_if_args_t * args)
clib_error_t *error = 0;
u32 interrupt_count = 0;
- /* *INDENT-OFF* */
pool_foreach (vif, vim->interfaces) {
if (vif->pci_addr.as_u32 == args->addr)
{
@@ -1367,7 +1352,24 @@ virtio_pci_create_if (vlib_main_t * vm, virtio_pci_create_if_args_t * args)
return;
}
}
- /* *INDENT-ON* */
+
+ if (args->bind)
+ {
+ vlib_pci_addr_t pci = { .as_u32 = args->addr };
+ error = vlib_pci_bind_to_uio (vm, &pci, (char *) "auto",
+ VIRTIO_BIND_FORCE == args->bind);
+ if (error)
+ {
+ args->rv = VNET_API_ERROR_INVALID_INTERFACE;
+ args->error =
+ clib_error_return (error, "%U: %s", format_vlib_pci_addr, &pci,
+ "error encountered on binding pci device");
+ vlib_log (VLIB_LOG_LEVEL_ERR, vim->log_default, "%U: %s",
+ format_vlib_pci_addr, &pci,
+ "error encountered on binding pci devicee");
+ return;
+ }
+ }
pool_get (vim->interfaces, vif);
vif->dev_instance = vif - vim->interfaces;
@@ -1483,25 +1485,18 @@ virtio_pci_create_if (vlib_main_t * vm, virtio_pci_create_if_args_t * args)
}
/* create interface */
- error = ethernet_register_interface (vnm, virtio_device_class.index,
- vif->dev_instance, vif->mac_addr,
- &vif->hw_if_index,
- virtio_pci_flag_change);
-
- if (error)
- {
- args->rv = VNET_API_ERROR_INVALID_REGISTRATION;
- virtio_log_error (vif,
- "error encountered on ethernet register interface");
- goto error;
- }
+ vnet_eth_interface_registration_t eir = {};
+ eir.dev_class_index = virtio_device_class.index;
+ eir.dev_instance = vif->dev_instance;
+ eir.address = vif->mac_addr;
+ eir.cb.flag_change = virtio_pci_flag_change;
+ vif->hw_if_index = vnet_eth_register_interface (vnm, &eir);
vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, vif->hw_if_index);
vif->sw_if_index = sw->sw_if_index;
args->sw_if_index = sw->sw_if_index;
- vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vif->hw_if_index);
- hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
+ vnet_hw_if_set_caps (vnm, vif->hw_if_index, VNET_HW_IF_CAP_INT_MODE);
if (args->virtio_flags & VIRTIO_FLAG_BUFFERING)
{
@@ -1513,13 +1508,22 @@ virtio_pci_create_if (vlib_main_t * vm, virtio_pci_create_if_args_t * args)
"error encountered during packet buffering init");
goto error;
}
+ /*
+ * packet buffering flag needs to be set 1 before calling the
+ * virtio_pre_input_node_enable but after the successful initialization
+ * of buffering queues above.
+ * Packet buffering flag set to 0 if there will be any error during
+ * buffering initialization.
+ */
+ vif->packet_buffering = 1;
+ virtio_pre_input_node_enable (vm, vif);
}
virtio_vring_set_rx_queues (vm, vif);
+ virtio_vring_set_tx_queues (vm, vif);
if (virtio_pci_is_link_up (vm, vif) & VIRTIO_NET_S_LINK_UP)
{
- vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
vnet_hw_interface_set_flags (vnm, vif->hw_if_index,
VNET_HW_INTERFACE_FLAG_LINK_UP);
}
@@ -1556,17 +1560,19 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
vlib_pci_intr_disable (vm, vif->pci_dev_handle);
- for (i = 0; i < vif->max_queue_pairs; i++)
+ if (vif->virtio_pci_func)
{
- vif->virtio_pci_func->del_queue (vm, vif, RX_QUEUE (i));
- vif->virtio_pci_func->del_queue (vm, vif, TX_QUEUE (i));
- }
+ for (i = 0; i < vif->max_queue_pairs; i++)
+ {
+ vif->virtio_pci_func->del_queue (vm, vif, RX_QUEUE (i));
+ vif->virtio_pci_func->del_queue (vm, vif, TX_QUEUE (i));
+ }
- if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
- vif->virtio_pci_func->del_queue (vm, vif, vif->max_queue_pairs * 2);
+ if (vif->features & VIRTIO_FEATURE (VIRTIO_NET_F_CTRL_VQ))
+ vif->virtio_pci_func->del_queue (vm, vif, vif->max_queue_pairs * 2);
- if (vif->virtio_pci_func)
- vif->virtio_pci_func->device_reset (vm, vif);
+ vif->virtio_pci_func->device_reset (vm, vif);
+ }
if (vif->hw_if_index)
{
@@ -1578,7 +1584,7 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
vec_foreach_index (i, vif->rxq_vrings)
{
- virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, i);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, i);
if (vring->used)
{
virtio_free_buffers (vm, vring);
@@ -1587,9 +1593,12 @@ virtio_pci_delete_if (vlib_main_t * vm, virtio_if_t * vif)
vlib_physmem_free (vm, vring->desc);
}
+ if (vif->packet_buffering)
+ virtio_pre_input_node_disable (vm, vif);
+
vec_foreach_index (i, vif->txq_vrings)
{
- virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, i);
+ vnet_virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, i);
if (vring->used)
{
virtio_free_buffers (vm, vring);