aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ifc/ifcvf_vdpa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ifc/ifcvf_vdpa.c')
-rw-r--r--drivers/net/ifc/ifcvf_vdpa.c102
1 files changed, 85 insertions, 17 deletions
diff --git a/drivers/net/ifc/ifcvf_vdpa.c b/drivers/net/ifc/ifcvf_vdpa.c
index 88d81403..97a57f18 100644
--- a/drivers/net/ifc/ifcvf_vdpa.c
+++ b/drivers/net/ifc/ifcvf_vdpa.c
@@ -7,6 +7,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/epoll.h>
+#include <linux/virtio_net.h>
#include <rte_malloc.h>
#include <rte_memory.h>
@@ -111,7 +112,6 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal)
struct rte_pci_device *dev = internal->pdev;
char devname[RTE_DEV_NAME_MAX_LEN] = {0};
int iommu_group_num;
- int ret = 0;
int i;
internal->vfio_dev_fd = -1;
@@ -145,9 +145,8 @@ ifcvf_vfio_setup(struct ifcvf_internal *internal)
internal->hw.mem_resource[i].len =
internal->pdev->mem_resource[i].len;
}
- ret = ifcvf_init_hw(&internal->hw, internal->pdev);
- return ret;
+ return 0;
err:
rte_vfio_container_destroy(internal->vfio_container_fd);
@@ -205,7 +204,7 @@ exit:
}
static uint64_t
-qva_to_gpa(int vid, uint64_t qva)
+hva_to_gpa(int vid, uint64_t hva)
{
struct rte_vhost_memory *mem = NULL;
struct rte_vhost_mem_region *reg;
@@ -218,9 +217,9 @@ qva_to_gpa(int vid, uint64_t qva)
for (i = 0; i < mem->nregions; i++) {
reg = &mem->regions[i];
- if (qva >= reg->host_user_addr &&
- qva < reg->host_user_addr + reg->size) {
- gpa = qva - reg->host_user_addr + reg->guest_phys_addr;
+ if (hva >= reg->host_user_addr &&
+ hva < reg->host_user_addr + reg->size) {
+ gpa = hva - reg->host_user_addr + reg->guest_phys_addr;
break;
}
}
@@ -246,21 +245,21 @@ vdpa_ifcvf_start(struct ifcvf_internal *internal)
for (i = 0; i < nr_vring; i++) {
rte_vhost_get_vhost_vring(vid, i, &vq);
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.desc);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for descriptor ring.");
return -1;
}
hw->vring[i].desc = gpa;
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.avail);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for available ring.");
return -1;
}
hw->vring[i].avail = gpa;
- gpa = qva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
+ gpa = hva_to_gpa(vid, (uint64_t)(uintptr_t)vq.used);
if (gpa == 0) {
DRV_LOG(ERR, "Fail to get GPA for used ring.");
return -1;
@@ -277,11 +276,29 @@ vdpa_ifcvf_start(struct ifcvf_internal *internal)
}
static void
+ifcvf_used_ring_log(struct ifcvf_hw *hw, uint32_t queue, uint8_t *log_buf)
+{
+ uint32_t i, size;
+ uint64_t pfn;
+
+ pfn = hw->vring[queue].used / PAGE_SIZE;
+ size = hw->vring[queue].size * sizeof(struct vring_used_elem) +
+ sizeof(uint16_t) * 3;
+
+ for (i = 0; i <= size / PAGE_SIZE; i++)
+ __sync_fetch_and_or_8(&log_buf[(pfn + i) / 8],
+ 1 << ((pfn + i) % 8));
+}
+
+static void
vdpa_ifcvf_stop(struct ifcvf_internal *internal)
{
struct ifcvf_hw *hw = &internal->hw;
uint32_t i;
int vid;
+ uint64_t features;
+ uint64_t log_base, log_size;
+ uint8_t *log_buf;
vid = internal->vid;
ifcvf_stop_hw(hw);
@@ -289,6 +306,21 @@ vdpa_ifcvf_stop(struct ifcvf_internal *internal)
for (i = 0; i < hw->nr_vring; i++)
rte_vhost_set_vring_base(vid, i, hw->vring[i].last_avail_idx,
hw->vring[i].last_used_idx);
+
+ rte_vhost_get_negotiated_features(vid, &features);
+ if (RTE_VHOST_NEED_LOG(features)) {
+ ifcvf_disable_logging(hw);
+ rte_vhost_get_log_base(internal->vid, &log_base, &log_size);
+ rte_vfio_container_dma_unmap(internal->vfio_container_fd,
+ log_base, IFCVF_LOG_BASE, log_size);
+ /*
+ * IFCVF marks dirty memory pages for only packet buffer,
+ * SW helps to mark the used ring as dirty after device stops.
+ */
+ log_buf = (uint8_t *)(uintptr_t)log_base;
+ for (i = 0; i < hw->nr_vring; i++)
+ ifcvf_used_ring_log(hw, i, log_buf);
+ }
}
#define MSIX_IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + \
@@ -469,11 +501,11 @@ update_datapath(struct ifcvf_internal *internal)
if (ret)
goto err;
- ret = setup_notify_relay(internal);
+ ret = vdpa_ifcvf_start(internal);
if (ret)
goto err;
- ret = vdpa_ifcvf_start(internal);
+ ret = setup_notify_relay(internal);
if (ret)
goto err;
@@ -481,12 +513,12 @@ update_datapath(struct ifcvf_internal *internal)
} else if (rte_atomic32_read(&internal->running) &&
(!rte_atomic32_read(&internal->started) ||
!rte_atomic32_read(&internal->dev_attached))) {
- vdpa_ifcvf_stop(internal);
-
ret = unset_notify_relay(internal);
if (ret)
goto err;
+ vdpa_ifcvf_stop(internal);
+
ret = vdpa_disable_vfio_intr(internal);
if (ret)
goto err;
@@ -549,6 +581,35 @@ ifcvf_dev_close(int vid)
}
static int
+ifcvf_set_features(int vid)
+{
+ uint64_t features;
+ int did;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ uint64_t log_base, log_size;
+
+ did = rte_vhost_get_vdpa_device_id(vid);
+ list = find_internal_resource_by_did(did);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid device id: %d", did);
+ return -1;
+ }
+
+ internal = list->internal;
+ rte_vhost_get_negotiated_features(vid, &features);
+
+ if (RTE_VHOST_NEED_LOG(features)) {
+ rte_vhost_get_log_base(vid, &log_base, &log_size);
+ rte_vfio_container_dma_map(internal->vfio_container_fd,
+ log_base, IFCVF_LOG_BASE, log_size);
+ ifcvf_enable_logging(&internal->hw, IFCVF_LOG_BASE, log_size);
+ }
+
+ return 0;
+}
+
+static int
ifcvf_get_vfio_group_fd(int vid)
{
int did;
@@ -657,14 +718,14 @@ ifcvf_get_protocol_features(int did __rte_unused, uint64_t *features)
return 0;
}
-struct rte_vdpa_dev_ops ifcvf_ops = {
+static struct rte_vdpa_dev_ops ifcvf_ops = {
.get_queue_num = ifcvf_get_queue_num,
.get_features = ifcvf_get_vdpa_features,
.get_protocol_features = ifcvf_get_protocol_features,
.dev_conf = ifcvf_dev_config,
.dev_close = ifcvf_dev_close,
.set_vring_state = NULL,
- .set_features = NULL,
+ .set_features = ifcvf_set_features,
.migration_done = NULL,
.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
.get_vfio_device_fd = ifcvf_get_vfio_device_fd,
@@ -695,11 +756,18 @@ ifcvf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
if (ifcvf_vfio_setup(internal) < 0)
return -1;
+ if (ifcvf_init_hw(&internal->hw, internal->pdev) < 0)
+ return -1;
+
internal->max_queues = IFCVF_MAX_QUEUES;
features = ifcvf_get_features(&internal->hw);
internal->features = (features &
~(1ULL << VIRTIO_F_IOMMU_PLATFORM)) |
- (1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
+ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) |
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) |
+ (1ULL << VIRTIO_NET_F_STATUS) |
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) |
+ (1ULL << VHOST_F_LOG_ALL);
internal->dev_addr.pci_addr = pci_dev->addr;
internal->dev_addr.type = PCI_ADDR;