Implement support for RamDiscardMgr, to prepare for virtio-mem
support. Instead of mapping the whole memory section, we only map
"populated" parts and update the mapping when notified about
discarding/population of memory via the RamDiscardListener. Similarly, when
syncing the dirty bitmaps, sync only the actually mapped (populated) parts
by replaying via the notifier.
Using virtio-mem with vfio is still blocked via
ram_block_discard_disable()/ram_block_discard_require() after this patch.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: Auger Eric <eric.auger@redhat.com>
Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
Cc: teawater <teawaterz@linux.alibaba.com>
Cc: Marek Kedzierski <mkedzier@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
hw/vfio/common.c | 200 ++++++++++++++++++++++++++++++++++
include/hw/vfio/vfio-common.h | 12 ++
2 files changed, 212 insertions(+)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 6ff1daa763..2bd219cf1d 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -654,6 +654,136 @@ out:
rcu_read_unlock();
}
+static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
+ const MemoryRegion *mr,
+ ram_addr_t offset, ram_addr_t size)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
+ const hwaddr mr_end = MIN(offset + size,
+ vrdl->offset_within_region + vrdl->size);
+ const hwaddr iova = mr_start - vrdl->offset_within_region +
+ vrdl->offset_within_address_space;
+ int ret;
+
+ if (mr_start >= mr_end) {
+ return;
+ }
+
+ /* Unmap with a single call. */
+ ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
+ if (ret) {
+ error_report("%s: vfio_dma_unmap() failed: %s", __func__,
+ strerror(-ret));
+ }
+}
+
+static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
+ const MemoryRegion *mr,
+ ram_addr_t offset, ram_addr_t size)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ const hwaddr mr_end = MIN(offset + size,
+ vrdl->offset_within_region + vrdl->size);
+ hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
+ hwaddr mr_next, iova;
+ void *vaddr;
+ int ret;
+
+ /*
+ * Map in (aligned within memory region) minimum granularity, so we can
+ * unmap in minimum granularity later.
+ */
+ for (; mr_start < mr_end; mr_start = mr_next) {
+ mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
+ mr_next = MIN(mr_next, mr_end);
+
+ iova = mr_start - vrdl->offset_within_region +
+ vrdl->offset_within_address_space;
+ vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
+
+ ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
+ vaddr, mr->readonly);
+ if (ret) {
+ /* Rollback */
+ vfio_ram_discard_notify_discard(rdl, mr, offset, size);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
+ const MemoryRegion *mr)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ int ret;
+
+ /* Unmap with a single call. */
+ ret = vfio_dma_unmap(vrdl->container, vrdl->offset_within_address_space,
+ vrdl->size, NULL);
+ if (ret) {
+ error_report("%s: vfio_dma_unmap() failed: %s", __func__,
+ strerror(-ret));
+ }
+}
+
+static void vfio_register_ram_discard_notifier(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
+ RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
+ VFIORamDiscardListener *vrdl;
+
+ vrdl = g_new0(VFIORamDiscardListener, 1);
+ vrdl->container = container;
+ vrdl->mr = section->mr;
+ vrdl->offset_within_region = section->offset_within_region;
+ vrdl->offset_within_address_space = section->offset_within_address_space;
+ vrdl->size = int128_get64(section->size);
+ vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
+
+ /* Ignore some corner cases not relevant in practice. */
+ g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, TARGET_PAGE_SIZE));
+ g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
+ TARGET_PAGE_SIZE));
+ g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
+
+ ram_discard_listener_init(&vrdl->listener,
+ vfio_ram_discard_notify_populate,
+ vfio_ram_discard_notify_discard,
+ vfio_ram_discard_notify_discard_all);
+ rdmc->register_listener(rdm, section->mr, &vrdl->listener);
+ QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
+}
+
+static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
+ RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
+ VFIORamDiscardListener *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_region == section->offset_within_region) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to unregister missing RAM discard listener");
+ }
+
+ rdmc->unregister_listener(rdm, section->mr, &vrdl->listener);
+ QLIST_REMOVE(vrdl, next);
+
+ g_free(vrdl);
+}
+
static void vfio_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
@@ -814,6 +944,16 @@ static void vfio_listener_region_add(MemoryListener *listener,
/* Here we assume that memory_region_is_ram(section->mr)==true */
+ /*
+ * For RAM memory regions with a RamDiscardMgr, we only want to map the
+ * actually populated parts - and update the mapping whenever we're notified
+ * about changes.
+ */
+ if (memory_region_has_ram_discard_mgr(section->mr)) {
+ vfio_register_ram_discard_notifier(container, section);
+ return;
+ }
+
vaddr = memory_region_get_ram_ptr(section->mr) +
section->offset_within_region +
(iova - section->offset_within_address_space);
@@ -950,6 +1090,10 @@ static void vfio_listener_region_del(MemoryListener *listener,
pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
+ } else if (memory_region_has_ram_discard_mgr(section->mr)) {
+ vfio_unregister_ram_discard_listener(container, section);
+ /* Unregistering will trigger an unmap. */
+ try_unmap = false;
}
if (try_unmap) {
@@ -1077,6 +1221,59 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
rcu_read_unlock();
}
+static int vfio_ram_discard_notify_dirty_bitmap(RamDiscardListener *rdl,
+ const MemoryRegion *mr,
+ ram_addr_t offset,
+ ram_addr_t size)
+{
+ VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
+ listener);
+ const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
+ const hwaddr mr_end = MIN(offset + size,
+ vrdl->offset_within_region + vrdl->size);
+ const hwaddr iova = mr_start - vrdl->offset_within_region +
+ vrdl->offset_within_address_space;
+ ram_addr_t ram_addr;
+ int ret;
+
+ if (mr_start >= mr_end) {
+ return 0;
+ }
+
+ /*
+ * Sync the whole mapped region (spanning multiple individual mappings)
+ * in one go.
+ */
+ ram_addr = memory_region_get_ram_addr(vrdl->mr) + mr_start;
+ ret = vfio_get_dirty_bitmap(vrdl->container, iova, mr_end - mr_start,
+ ram_addr);
+ return ret;
+}
+
+static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
+ MemoryRegionSection *section)
+{
+ RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
+ RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
+ VFIORamDiscardListener tmp_vrdl, *vrdl = NULL;
+
+ QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ if (vrdl->mr == section->mr &&
+ vrdl->offset_within_region == section->offset_within_region) {
+ break;
+ }
+ }
+
+ if (!vrdl) {
+ hw_error("vfio: Trying to sync missing RAM discard listener");
+ }
+
+ tmp_vrdl = *vrdl;
+ ram_discard_listener_init(&tmp_vrdl.listener,
+ vfio_ram_discard_notify_dirty_bitmap, NULL, NULL);
+ return rdmc->replay_populated(rdm, section->mr, &tmp_vrdl.listener);
+}
+
static int vfio_sync_dirty_bitmap(VFIOContainer *container,
MemoryRegionSection *section)
{
@@ -1108,6 +1305,8 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
}
}
return 0;
+ } else if (memory_region_has_ram_discard_mgr(section->mr)) {
+ return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
}
ram_addr = memory_region_get_ram_addr(section->mr) +
@@ -1737,6 +1936,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container->dirty_pages_supported = false;
QLIST_INIT(&container->giommu_list);
QLIST_INIT(&container->hostwin_list);
+ QLIST_INIT(&container->vrdl_list);
ret = vfio_init_container(container, group->fd, errp);
if (ret) {
diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index 6141162d7a..af6f8d1b22 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -91,6 +91,7 @@ typedef struct VFIOContainer {
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
+ QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
QLIST_ENTRY(VFIOContainer) next;
} VFIOContainer;
@@ -102,6 +103,17 @@ typedef struct VFIOGuestIOMMU {
QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
} VFIOGuestIOMMU;
+typedef struct VFIORamDiscardListener {
+ VFIOContainer *container;
+ MemoryRegion *mr;
+ hwaddr offset_within_region;
+ hwaddr offset_within_address_space;
+ hwaddr size;
+ uint64_t granularity;
+ RamDiscardListener listener;
+ QLIST_ENTRY(VFIORamDiscardListener) next;
+} VFIORamDiscardListener;
+
typedef struct VFIOHostDMAWindow {
hwaddr min_iova;
hwaddr max_iova;
--
2.29.2
On Thu, 7 Jan 2021 14:34:16 +0100
David Hildenbrand <david@redhat.com> wrote:
> Implement support for RamDiscardMgr, to prepare for virtio-mem
> support. Instead of mapping the whole memory section, we only map
> "populated" parts and update the mapping when notified about
> discarding/population of memory via the RamDiscardListener. Similarly, when
> syncing the dirty bitmaps, sync only the actually mapped (populated) parts
> by replaying via the notifier.
>
> Using virtio-mem with vfio is still blocked via
> ram_block_discard_disable()/ram_block_discard_require() after this patch.
>
> Cc: Paolo Bonzini <pbonzini@redhat.com>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Alex Williamson <alex.williamson@redhat.com>
> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
> Cc: Igor Mammedov <imammedo@redhat.com>
> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
> Cc: Peter Xu <peterx@redhat.com>
> Cc: Auger Eric <eric.auger@redhat.com>
> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
> Cc: teawater <teawaterz@linux.alibaba.com>
> Cc: Marek Kedzierski <mkedzier@redhat.com>
> Signed-off-by: David Hildenbrand <david@redhat.com>
> ---
> hw/vfio/common.c | 200 ++++++++++++++++++++++++++++++++++
> include/hw/vfio/vfio-common.h | 12 ++
> 2 files changed, 212 insertions(+)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 6ff1daa763..2bd219cf1d 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -654,6 +654,136 @@ out:
> rcu_read_unlock();
> }
>
> +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
> + const MemoryRegion *mr,
> + ram_addr_t offset, ram_addr_t size)
> +{
> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
> + listener);
> + const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
> + const hwaddr mr_end = MIN(offset + size,
> + vrdl->offset_within_region + vrdl->size);
> + const hwaddr iova = mr_start - vrdl->offset_within_region +
> + vrdl->offset_within_address_space;
> + int ret;
> +
> + if (mr_start >= mr_end) {
> + return;
> + }
> +
> + /* Unmap with a single call. */
> + ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
> + if (ret) {
> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
> + strerror(-ret));
> + }
> +}
> +
> +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
> + const MemoryRegion *mr,
> + ram_addr_t offset, ram_addr_t size)
> +{
> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
> + listener);
> + const hwaddr mr_end = MIN(offset + size,
> + vrdl->offset_within_region + vrdl->size);
> + hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
> + hwaddr mr_next, iova;
> + void *vaddr;
> + int ret;
> +
> + /*
> + * Map in (aligned within memory region) minimum granularity, so we can
> + * unmap in minimum granularity later.
> + */
> + for (; mr_start < mr_end; mr_start = mr_next) {
> + mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
> + mr_next = MIN(mr_next, mr_end);
> +
> + iova = mr_start - vrdl->offset_within_region +
> + vrdl->offset_within_address_space;
> + vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
> +
> + ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
> + vaddr, mr->readonly);
> + if (ret) {
> + /* Rollback */
> + vfio_ram_discard_notify_discard(rdl, mr, offset, size);
> + return ret;
> + }
> + }
> + return 0;
> +}
> +
> +static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
> + const MemoryRegion *mr)
> +{
> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
> + listener);
> + int ret;
> +
> + /* Unmap with a single call. */
> + ret = vfio_dma_unmap(vrdl->container, vrdl->offset_within_address_space,
> + vrdl->size, NULL);
> + if (ret) {
> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
> + strerror(-ret));
> + }
> +}
> +
> +static void vfio_register_ram_discard_notifier(VFIOContainer *container,
> + MemoryRegionSection *section)
> +{
> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
> + VFIORamDiscardListener *vrdl;
> +
> + vrdl = g_new0(VFIORamDiscardListener, 1);
> + vrdl->container = container;
> + vrdl->mr = section->mr;
> + vrdl->offset_within_region = section->offset_within_region;
> + vrdl->offset_within_address_space = section->offset_within_address_space;
> + vrdl->size = int128_get64(section->size);
> + vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
> +
> + /* Ignore some corner cases not relevant in practice. */
> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, TARGET_PAGE_SIZE));
> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
> + TARGET_PAGE_SIZE));
> + g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
Should probably toss in a test of vrdl->granularity vs
container->pgsizes too, right? Looks good otherwise:
Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
Acked-by: Alex Williamson <alex.williamson@redhat.com>
> +
> + ram_discard_listener_init(&vrdl->listener,
> + vfio_ram_discard_notify_populate,
> + vfio_ram_discard_notify_discard,
> + vfio_ram_discard_notify_discard_all);
> + rdmc->register_listener(rdm, section->mr, &vrdl->listener);
> + QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
> +}
> +
> +static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
> + MemoryRegionSection *section)
> +{
> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
> + VFIORamDiscardListener *vrdl = NULL;
> +
> + QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
> + if (vrdl->mr == section->mr &&
> + vrdl->offset_within_region == section->offset_within_region) {
> + break;
> + }
> + }
> +
> + if (!vrdl) {
> + hw_error("vfio: Trying to unregister missing RAM discard listener");
> + }
> +
> + rdmc->unregister_listener(rdm, section->mr, &vrdl->listener);
> + QLIST_REMOVE(vrdl, next);
> +
> + g_free(vrdl);
> +}
> +
> static void vfio_listener_region_add(MemoryListener *listener,
> MemoryRegionSection *section)
> {
> @@ -814,6 +944,16 @@ static void vfio_listener_region_add(MemoryListener *listener,
>
> /* Here we assume that memory_region_is_ram(section->mr)==true */
>
> + /*
> + * For RAM memory regions with a RamDiscardMgr, we only want to map the
> + * actually populated parts - and update the mapping whenever we're notified
> + * about changes.
> + */
> + if (memory_region_has_ram_discard_mgr(section->mr)) {
> + vfio_register_ram_discard_notifier(container, section);
> + return;
> + }
> +
> vaddr = memory_region_get_ram_ptr(section->mr) +
> section->offset_within_region +
> (iova - section->offset_within_address_space);
> @@ -950,6 +1090,10 @@ static void vfio_listener_region_del(MemoryListener *listener,
>
> pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1;
> try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
> + } else if (memory_region_has_ram_discard_mgr(section->mr)) {
> + vfio_unregister_ram_discard_listener(container, section);
> + /* Unregistering will trigger an unmap. */
> + try_unmap = false;
> }
>
> if (try_unmap) {
> @@ -1077,6 +1221,59 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> rcu_read_unlock();
> }
>
> +static int vfio_ram_discard_notify_dirty_bitmap(RamDiscardListener *rdl,
> + const MemoryRegion *mr,
> + ram_addr_t offset,
> + ram_addr_t size)
> +{
> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
> + listener);
> + const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
> + const hwaddr mr_end = MIN(offset + size,
> + vrdl->offset_within_region + vrdl->size);
> + const hwaddr iova = mr_start - vrdl->offset_within_region +
> + vrdl->offset_within_address_space;
> + ram_addr_t ram_addr;
> + int ret;
> +
> + if (mr_start >= mr_end) {
> + return 0;
> + }
> +
> + /*
> + * Sync the whole mapped region (spanning multiple individual mappings)
> + * in one go.
> + */
> + ram_addr = memory_region_get_ram_addr(vrdl->mr) + mr_start;
> + ret = vfio_get_dirty_bitmap(vrdl->container, iova, mr_end - mr_start,
> + ram_addr);
> + return ret;
> +}
> +
> +static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
> + MemoryRegionSection *section)
> +{
> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
> + VFIORamDiscardListener tmp_vrdl, *vrdl = NULL;
> +
> + QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
> + if (vrdl->mr == section->mr &&
> + vrdl->offset_within_region == section->offset_within_region) {
> + break;
> + }
> + }
> +
> + if (!vrdl) {
> + hw_error("vfio: Trying to sync missing RAM discard listener");
> + }
> +
> + tmp_vrdl = *vrdl;
> + ram_discard_listener_init(&tmp_vrdl.listener,
> + vfio_ram_discard_notify_dirty_bitmap, NULL, NULL);
> + return rdmc->replay_populated(rdm, section->mr, &tmp_vrdl.listener);
> +}
> +
> static int vfio_sync_dirty_bitmap(VFIOContainer *container,
> MemoryRegionSection *section)
> {
> @@ -1108,6 +1305,8 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
> }
> }
> return 0;
> + } else if (memory_region_has_ram_discard_mgr(section->mr)) {
> + return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
> }
>
> ram_addr = memory_region_get_ram_addr(section->mr) +
> @@ -1737,6 +1936,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
> container->dirty_pages_supported = false;
> QLIST_INIT(&container->giommu_list);
> QLIST_INIT(&container->hostwin_list);
> + QLIST_INIT(&container->vrdl_list);
>
> ret = vfio_init_container(container, group->fd, errp);
> if (ret) {
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index 6141162d7a..af6f8d1b22 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -91,6 +91,7 @@ typedef struct VFIOContainer {
> QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
> QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
> QLIST_HEAD(, VFIOGroup) group_list;
> + QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
> QLIST_ENTRY(VFIOContainer) next;
> } VFIOContainer;
>
> @@ -102,6 +103,17 @@ typedef struct VFIOGuestIOMMU {
> QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
> } VFIOGuestIOMMU;
>
> +typedef struct VFIORamDiscardListener {
> + VFIOContainer *container;
> + MemoryRegion *mr;
> + hwaddr offset_within_region;
> + hwaddr offset_within_address_space;
> + hwaddr size;
> + uint64_t granularity;
> + RamDiscardListener listener;
> + QLIST_ENTRY(VFIORamDiscardListener) next;
> +} VFIORamDiscardListener;
> +
> typedef struct VFIOHostDMAWindow {
> hwaddr min_iova;
> hwaddr max_iova;
On 14.01.21 00:27, Alex Williamson wrote:
> On Thu, 7 Jan 2021 14:34:16 +0100
> David Hildenbrand <david@redhat.com> wrote:
>
>> Implement support for RamDiscardMgr, to prepare for virtio-mem
>> support. Instead of mapping the whole memory section, we only map
>> "populated" parts and update the mapping when notified about
>> discarding/population of memory via the RamDiscardListener. Similarly, when
>> syncing the dirty bitmaps, sync only the actually mapped (populated) parts
>> by replaying via the notifier.
>>
>> Using virtio-mem with vfio is still blocked via
>> ram_block_discard_disable()/ram_block_discard_require() after this patch.
>>
>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>> Cc: Alex Williamson <alex.williamson@redhat.com>
>> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
>> Cc: Igor Mammedov <imammedo@redhat.com>
>> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
>> Cc: Peter Xu <peterx@redhat.com>
>> Cc: Auger Eric <eric.auger@redhat.com>
>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>> Cc: teawater <teawaterz@linux.alibaba.com>
>> Cc: Marek Kedzierski <mkedzier@redhat.com>
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>> ---
>> hw/vfio/common.c | 200 ++++++++++++++++++++++++++++++++++
>> include/hw/vfio/vfio-common.h | 12 ++
>> 2 files changed, 212 insertions(+)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 6ff1daa763..2bd219cf1d 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -654,6 +654,136 @@ out:
>> rcu_read_unlock();
>> }
>>
>> +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
>> + const MemoryRegion *mr,
>> + ram_addr_t offset, ram_addr_t size)
>> +{
>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>> + listener);
>> + const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>> + const hwaddr mr_end = MIN(offset + size,
>> + vrdl->offset_within_region + vrdl->size);
>> + const hwaddr iova = mr_start - vrdl->offset_within_region +
>> + vrdl->offset_within_address_space;
>> + int ret;
>> +
>> + if (mr_start >= mr_end) {
>> + return;
>> + }
>> +
>> + /* Unmap with a single call. */
>> + ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
>> + if (ret) {
>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>> + strerror(-ret));
>> + }
>> +}
>> +
>> +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
>> + const MemoryRegion *mr,
>> + ram_addr_t offset, ram_addr_t size)
>> +{
>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>> + listener);
>> + const hwaddr mr_end = MIN(offset + size,
>> + vrdl->offset_within_region + vrdl->size);
>> + hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>> + hwaddr mr_next, iova;
>> + void *vaddr;
>> + int ret;
>> +
>> + /*
>> + * Map in (aligned within memory region) minimum granularity, so we can
>> + * unmap in minimum granularity later.
>> + */
>> + for (; mr_start < mr_end; mr_start = mr_next) {
>> + mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
>> + mr_next = MIN(mr_next, mr_end);
>> +
>> + iova = mr_start - vrdl->offset_within_region +
>> + vrdl->offset_within_address_space;
>> + vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
>> +
>> + ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
>> + vaddr, mr->readonly);
>> + if (ret) {
>> + /* Rollback */
>> + vfio_ram_discard_notify_discard(rdl, mr, offset, size);
>> + return ret;
>> + }
>> + }
>> + return 0;
>> +}
>> +
>> +static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
>> + const MemoryRegion *mr)
>> +{
>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>> + listener);
>> + int ret;
>> +
>> + /* Unmap with a single call. */
>> + ret = vfio_dma_unmap(vrdl->container, vrdl->offset_within_address_space,
>> + vrdl->size, NULL);
>> + if (ret) {
>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>> + strerror(-ret));
>> + }
>> +}
>> +
>> +static void vfio_register_ram_discard_notifier(VFIOContainer *container,
>> + MemoryRegionSection *section)
>> +{
>> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
>> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
>> + VFIORamDiscardListener *vrdl;
>> +
>> + vrdl = g_new0(VFIORamDiscardListener, 1);
>> + vrdl->container = container;
>> + vrdl->mr = section->mr;
>> + vrdl->offset_within_region = section->offset_within_region;
>> + vrdl->offset_within_address_space = section->offset_within_address_space;
>> + vrdl->size = int128_get64(section->size);
>> + vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
>> +
>> + /* Ignore some corner cases not relevant in practice. */
>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, TARGET_PAGE_SIZE));
>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
>> + TARGET_PAGE_SIZE));
>> + g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
>
> Should probably toss in a test of vrdl->granularity vs
> container->pgsizes too, right? Looks good otherwise:
Makes sense as a sanity check. What about
g_assert(vrdl->granularity && !is_power_of_2(vrdl->granularity));
g_assert(vrdl->granularity >= 1 << ctz64(container->pgsizes));
?
Thanks!
>
> Reviewed-by: Alex Williamson <alex.williamson@redhat.com>
> Acked-by: Alex Williamson <alex.williamson@redhat.com>
Thanks!
--
Thanks,
David / dhildenb
On 14.01.21 16:54, David Hildenbrand wrote:
> On 14.01.21 00:27, Alex Williamson wrote:
>> On Thu, 7 Jan 2021 14:34:16 +0100
>> David Hildenbrand <david@redhat.com> wrote:
>>
>>> Implement support for RamDiscardMgr, to prepare for virtio-mem
>>> support. Instead of mapping the whole memory section, we only map
>>> "populated" parts and update the mapping when notified about
>>> discarding/population of memory via the RamDiscardListener. Similarly, when
>>> syncing the dirty bitmaps, sync only the actually mapped (populated) parts
>>> by replaying via the notifier.
>>>
>>> Using virtio-mem with vfio is still blocked via
>>> ram_block_discard_disable()/ram_block_discard_require() after this patch.
>>>
>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>> Cc: Alex Williamson <alex.williamson@redhat.com>
>>> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
>>> Cc: Igor Mammedov <imammedo@redhat.com>
>>> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
>>> Cc: Peter Xu <peterx@redhat.com>
>>> Cc: Auger Eric <eric.auger@redhat.com>
>>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>>> Cc: teawater <teawaterz@linux.alibaba.com>
>>> Cc: Marek Kedzierski <mkedzier@redhat.com>
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>> ---
>>> hw/vfio/common.c | 200 ++++++++++++++++++++++++++++++++++
>>> include/hw/vfio/vfio-common.h | 12 ++
>>> 2 files changed, 212 insertions(+)
>>>
>>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>>> index 6ff1daa763..2bd219cf1d 100644
>>> --- a/hw/vfio/common.c
>>> +++ b/hw/vfio/common.c
>>> @@ -654,6 +654,136 @@ out:
>>> rcu_read_unlock();
>>> }
>>>
>>> +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
>>> + const MemoryRegion *mr,
>>> + ram_addr_t offset, ram_addr_t size)
>>> +{
>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>> + listener);
>>> + const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>> + const hwaddr mr_end = MIN(offset + size,
>>> + vrdl->offset_within_region + vrdl->size);
>>> + const hwaddr iova = mr_start - vrdl->offset_within_region +
>>> + vrdl->offset_within_address_space;
>>> + int ret;
>>> +
>>> + if (mr_start >= mr_end) {
>>> + return;
>>> + }
>>> +
>>> + /* Unmap with a single call. */
>>> + ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
>>> + if (ret) {
>>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>> + strerror(-ret));
>>> + }
>>> +}
>>> +
>>> +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
>>> + const MemoryRegion *mr,
>>> + ram_addr_t offset, ram_addr_t size)
>>> +{
>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>> + listener);
>>> + const hwaddr mr_end = MIN(offset + size,
>>> + vrdl->offset_within_region + vrdl->size);
>>> + hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>> + hwaddr mr_next, iova;
>>> + void *vaddr;
>>> + int ret;
>>> +
>>> + /*
>>> + * Map in (aligned within memory region) minimum granularity, so we can
>>> + * unmap in minimum granularity later.
>>> + */
>>> + for (; mr_start < mr_end; mr_start = mr_next) {
>>> + mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
>>> + mr_next = MIN(mr_next, mr_end);
>>> +
>>> + iova = mr_start - vrdl->offset_within_region +
>>> + vrdl->offset_within_address_space;
>>> + vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
>>> +
>>> + ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
>>> + vaddr, mr->readonly);
>>> + if (ret) {
>>> + /* Rollback */
>>> + vfio_ram_discard_notify_discard(rdl, mr, offset, size);
>>> + return ret;
>>> + }
>>> + }
>>> + return 0;
>>> +}
>>> +
>>> +static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
>>> + const MemoryRegion *mr)
>>> +{
>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>> + listener);
>>> + int ret;
>>> +
>>> + /* Unmap with a single call. */
>>> + ret = vfio_dma_unmap(vrdl->container, vrdl->offset_within_address_space,
>>> + vrdl->size, NULL);
>>> + if (ret) {
>>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>> + strerror(-ret));
>>> + }
>>> +}
>>> +
>>> +static void vfio_register_ram_discard_notifier(VFIOContainer *container,
>>> + MemoryRegionSection *section)
>>> +{
>>> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
>>> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
>>> + VFIORamDiscardListener *vrdl;
>>> +
>>> + vrdl = g_new0(VFIORamDiscardListener, 1);
>>> + vrdl->container = container;
>>> + vrdl->mr = section->mr;
>>> + vrdl->offset_within_region = section->offset_within_region;
>>> + vrdl->offset_within_address_space = section->offset_within_address_space;
>>> + vrdl->size = int128_get64(section->size);
>>> + vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
>>> +
>>> + /* Ignore some corner cases not relevant in practice. */
>>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, TARGET_PAGE_SIZE));
>>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
>>> + TARGET_PAGE_SIZE));
>>> + g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
>>
>> Should probably toss in a test of vrdl->granularity vs
>> container->pgsizes too, right? Looks good otherwise:
>
> Makes sense as a sanity check. What about
>
> g_assert(vrdl->granularity && !is_power_of_2(vrdl->granularity));
^ g_assert(vrdl->granularity >= 1 << ctz64(container->pgsizes));
--
Thanks,
David / dhildenb
On 14.01.21 16:57, David Hildenbrand wrote:
> On 14.01.21 16:54, David Hildenbrand wrote:
>> On 14.01.21 00:27, Alex Williamson wrote:
>>> On Thu, 7 Jan 2021 14:34:16 +0100
>>> David Hildenbrand <david@redhat.com> wrote:
>>>
>>>> Implement support for RamDiscardMgr, to prepare for virtio-mem
>>>> support. Instead of mapping the whole memory section, we only map
>>>> "populated" parts and update the mapping when notified about
>>>> discarding/population of memory via the RamDiscardListener. Similarly, when
>>>> syncing the dirty bitmaps, sync only the actually mapped (populated) parts
>>>> by replaying via the notifier.
>>>>
>>>> Using virtio-mem with vfio is still blocked via
>>>> ram_block_discard_disable()/ram_block_discard_require() after this patch.
>>>>
>>>> Cc: Paolo Bonzini <pbonzini@redhat.com>
>>>> Cc: "Michael S. Tsirkin" <mst@redhat.com>
>>>> Cc: Alex Williamson <alex.williamson@redhat.com>
>>>> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
>>>> Cc: Igor Mammedov <imammedo@redhat.com>
>>>> Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
>>>> Cc: Peter Xu <peterx@redhat.com>
>>>> Cc: Auger Eric <eric.auger@redhat.com>
>>>> Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
>>>> Cc: teawater <teawaterz@linux.alibaba.com>
>>>> Cc: Marek Kedzierski <mkedzier@redhat.com>
>>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>>> ---
>>>> hw/vfio/common.c | 200 ++++++++++++++++++++++++++++++++++
>>>> include/hw/vfio/vfio-common.h | 12 ++
>>>> 2 files changed, 212 insertions(+)
>>>>
>>>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>>>> index 6ff1daa763..2bd219cf1d 100644
>>>> --- a/hw/vfio/common.c
>>>> +++ b/hw/vfio/common.c
>>>> @@ -654,6 +654,136 @@ out:
>>>> rcu_read_unlock();
>>>> }
>>>>
>>>> +static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl,
>>>> + const MemoryRegion *mr,
>>>> + ram_addr_t offset, ram_addr_t size)
>>>> +{
>>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>>> + listener);
>>>> + const hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>>> + const hwaddr mr_end = MIN(offset + size,
>>>> + vrdl->offset_within_region + vrdl->size);
>>>> + const hwaddr iova = mr_start - vrdl->offset_within_region +
>>>> + vrdl->offset_within_address_space;
>>>> + int ret;
>>>> +
>>>> + if (mr_start >= mr_end) {
>>>> + return;
>>>> + }
>>>> +
>>>> + /* Unmap with a single call. */
>>>> + ret = vfio_dma_unmap(vrdl->container, iova, mr_end - mr_start, NULL);
>>>> + if (ret) {
>>>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>>> + strerror(-ret));
>>>> + }
>>>> +}
>>>> +
>>>> +static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl,
>>>> + const MemoryRegion *mr,
>>>> + ram_addr_t offset, ram_addr_t size)
>>>> +{
>>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>>> + listener);
>>>> + const hwaddr mr_end = MIN(offset + size,
>>>> + vrdl->offset_within_region + vrdl->size);
>>>> + hwaddr mr_start = MAX(offset, vrdl->offset_within_region);
>>>> + hwaddr mr_next, iova;
>>>> + void *vaddr;
>>>> + int ret;
>>>> +
>>>> + /*
>>>> + * Map in (aligned within memory region) minimum granularity, so we can
>>>> + * unmap in minimum granularity later.
>>>> + */
>>>> + for (; mr_start < mr_end; mr_start = mr_next) {
>>>> + mr_next = ROUND_UP(mr_start + 1, vrdl->granularity);
>>>> + mr_next = MIN(mr_next, mr_end);
>>>> +
>>>> + iova = mr_start - vrdl->offset_within_region +
>>>> + vrdl->offset_within_address_space;
>>>> + vaddr = memory_region_get_ram_ptr(vrdl->mr) + mr_start;
>>>> +
>>>> + ret = vfio_dma_map(vrdl->container, iova, mr_next - mr_start,
>>>> + vaddr, mr->readonly);
>>>> + if (ret) {
>>>> + /* Rollback */
>>>> + vfio_ram_discard_notify_discard(rdl, mr, offset, size);
>>>> + return ret;
>>>> + }
>>>> + }
>>>> + return 0;
>>>> +}
>>>> +
>>>> +static void vfio_ram_discard_notify_discard_all(RamDiscardListener *rdl,
>>>> + const MemoryRegion *mr)
>>>> +{
>>>> + VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
>>>> + listener);
>>>> + int ret;
>>>> +
>>>> + /* Unmap with a single call. */
>>>> + ret = vfio_dma_unmap(vrdl->container, vrdl->offset_within_address_space,
>>>> + vrdl->size, NULL);
>>>> + if (ret) {
>>>> + error_report("%s: vfio_dma_unmap() failed: %s", __func__,
>>>> + strerror(-ret));
>>>> + }
>>>> +}
>>>> +
>>>> +static void vfio_register_ram_discard_notifier(VFIOContainer *container,
>>>> + MemoryRegionSection *section)
>>>> +{
>>>> + RamDiscardMgr *rdm = memory_region_get_ram_discard_mgr(section->mr);
>>>> + RamDiscardMgrClass *rdmc = RAM_DISCARD_MGR_GET_CLASS(rdm);
>>>> + VFIORamDiscardListener *vrdl;
>>>> +
>>>> + vrdl = g_new0(VFIORamDiscardListener, 1);
>>>> + vrdl->container = container;
>>>> + vrdl->mr = section->mr;
>>>> + vrdl->offset_within_region = section->offset_within_region;
>>>> + vrdl->offset_within_address_space = section->offset_within_address_space;
>>>> + vrdl->size = int128_get64(section->size);
>>>> + vrdl->granularity = rdmc->get_min_granularity(rdm, section->mr);
>>>> +
>>>> + /* Ignore some corner cases not relevant in practice. */
>>>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_region, TARGET_PAGE_SIZE));
>>>> + g_assert(QEMU_IS_ALIGNED(vrdl->offset_within_address_space,
>>>> + TARGET_PAGE_SIZE));
>>>> + g_assert(QEMU_IS_ALIGNED(vrdl->size, TARGET_PAGE_SIZE));
>>>
>>> Should probably toss in a test of vrdl->granularity vs
>>> container->pgsizes too, right? Looks good otherwise:
>>
>> Makes sense as a sanity check. What about
>>
>> g_assert(vrdl->granularity && !is_power_of_2(vrdl->granularity));
>
> ^ g_assert(vrdl->granularity >= 1 << ctz64(container->pgsizes));
>
Ehm, I guess my brain needs some time to cool down over the weekend :D
+ g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
+ g_assert(vrdl->granularity >= 1 << ctz64(container->pgsizes));
--
Thanks,
David / dhildenb
© 2016 - 2025 Red Hat, Inc.