The 'ram_addr_t' type is described as:
a QEMU internal address space that maps guest RAM physical
addresses into an intermediate address space that can map
to host virtual address spaces.
vfio_container_query_dirty_bitmap() doesn't expect such QEMU
intermediate address, but a guest physical addresses. Use the
appropriate 'hwaddr' type, rename as @translated_addr for
clarity.
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/hw/vfio/vfio-container.h | 3 ++-
hw/vfio/container.c | 11 ++++++-----
hw/vfio/listener.c | 12 ++++++------
3 files changed, 14 insertions(+), 12 deletions(-)
diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h
index b8fb2b8b5d7..093c360f0ee 100644
--- a/include/hw/vfio/vfio-container.h
+++ b/include/hw/vfio/vfio-container.h
@@ -98,7 +98,8 @@ bool vfio_container_dirty_tracking_is_started(
bool vfio_container_devices_dirty_tracking_is_supported(
const VFIOContainer *bcontainer);
int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
- uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp);
+ uint64_t iova, uint64_t size,
+ hwaddr translated_addr, Error **errp);
GList *vfio_container_get_iova_ranges(const VFIOContainer *bcontainer);
diff --git a/hw/vfio/container.c b/hw/vfio/container.c
index 250b20f4245..9d694393714 100644
--- a/hw/vfio/container.c
+++ b/hw/vfio/container.c
@@ -246,7 +246,7 @@ static int vfio_container_devices_query_dirty_bitmap(
int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
uint64_t iova, uint64_t size,
- ram_addr_t ram_addr, Error **errp)
+ hwaddr translated_addr, Error **errp)
{
bool all_device_dirty_tracking =
vfio_container_devices_dirty_tracking_is_supported(bcontainer);
@@ -255,7 +255,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
int ret;
if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
- cpu_physical_memory_set_dirty_range(ram_addr, size,
+ cpu_physical_memory_set_dirty_range(translated_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
return 0;
@@ -280,11 +280,12 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
goto out;
}
- dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
+ dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
+ translated_addr,
vbmap.pages);
- trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr,
- dirty_pages);
+ trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size,
+ translated_addr, dirty_pages);
out:
g_free(vbmap.bitmap);
diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
index 3b6f17f0c3a..4e2565905e0 100644
--- a/hw/vfio/listener.c
+++ b/hw/vfio/listener.c
@@ -1059,7 +1059,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
VFIOGuestIOMMU *giommu = gdn->giommu;
VFIOContainer *bcontainer = giommu->bcontainer;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
- ram_addr_t translated_addr;
+ hwaddr translated_addr;
Error *local_err = NULL;
int ret = -EINVAL;
MemoryRegion *mr;
@@ -1108,7 +1108,7 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
{
const hwaddr size = int128_get64(section->size);
const hwaddr iova = section->offset_within_address_space;
- const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
+ const hwaddr addr = memory_region_get_ram_addr(section->mr) +
section->offset_within_region;
VFIORamDiscardListener *vrdl = opaque;
Error *local_err = NULL;
@@ -1118,7 +1118,7 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
* Sync the whole mapped region (spanning multiple individual mappings)
* in one go.
*/
- ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
+ ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, addr,
&local_err);
if (ret) {
error_report_err(local_err);
@@ -1183,7 +1183,7 @@ static int vfio_sync_iommu_dirty_bitmap(VFIOContainer *bcontainer,
static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
MemoryRegionSection *section, Error **errp)
{
- ram_addr_t ram_addr;
+ hwaddr addr;
if (memory_region_is_iommu(section->mr)) {
return vfio_sync_iommu_dirty_bitmap(bcontainer, section);
@@ -1198,12 +1198,12 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
return ret;
}
- ram_addr = memory_region_get_ram_addr(section->mr) +
+ addr = memory_region_get_ram_addr(section->mr) +
section->offset_within_region;
return vfio_container_query_dirty_bitmap(bcontainer,
REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
- int128_get64(section->size), ram_addr, errp);
+ int128_get64(section->size), addr, errp);
}
static void vfio_listener_log_sync(MemoryListener *listener,
--
2.51.0
On 9/30/25 11:14, Philippe Mathieu-Daudé wrote:
> The 'ram_addr_t' type is described as:
>
> a QEMU internal address space that maps guest RAM physical
> addresses into an intermediate address space that can map
> to host virtual address spaces.
>
> vfio_container_query_dirty_bitmap() doesn't expect such QEMU
> intermediate address, but a guest physical addresses. Use the
> appropriate 'hwaddr' type, rename as @translated_addr for
> clarity.
>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
> include/hw/vfio/vfio-container.h | 3 ++-
> hw/vfio/container.c | 11 ++++++-----
> hw/vfio/listener.c | 12 ++++++------
> 3 files changed, 14 insertions(+), 12 deletions(-)
>
> diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h
> index b8fb2b8b5d7..093c360f0ee 100644
> --- a/include/hw/vfio/vfio-container.h
> +++ b/include/hw/vfio/vfio-container.h
> @@ -98,7 +98,8 @@ bool vfio_container_dirty_tracking_is_started(
> bool vfio_container_devices_dirty_tracking_is_supported(
> const VFIOContainer *bcontainer);
> int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
> - uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp);
> + uint64_t iova, uint64_t size,
> + hwaddr translated_addr, Error **errp);
>
> GList *vfio_container_get_iova_ranges(const VFIOContainer *bcontainer);
>
> diff --git a/hw/vfio/container.c b/hw/vfio/container.c
> index 250b20f4245..9d694393714 100644
> --- a/hw/vfio/container.c
> +++ b/hw/vfio/container.c
> @@ -246,7 +246,7 @@ static int vfio_container_devices_query_dirty_bitmap(
>
> int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
> uint64_t iova, uint64_t size,
> - ram_addr_t ram_addr, Error **errp)
> + hwaddr translated_addr, Error **errp)
> {
> bool all_device_dirty_tracking =
> vfio_container_devices_dirty_tracking_is_supported(bcontainer);
> @@ -255,7 +255,7 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
> int ret;
>
> if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
> - cpu_physical_memory_set_dirty_range(ram_addr, size,
> + cpu_physical_memory_set_dirty_range(translated_addr, size,
> tcg_enabled() ? DIRTY_CLIENTS_ALL :
> DIRTY_CLIENTS_NOCODE);
> return 0;
> @@ -280,11 +280,12 @@ int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer,
> goto out;
> }
>
> - dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
> + dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
> + translated_addr,
> vbmap.pages);
>
> - trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr,
> - dirty_pages);
> + trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size,
> + translated_addr, dirty_pages);
> out:
> g_free(vbmap.bitmap);
>
> diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c
> index 3b6f17f0c3a..4e2565905e0 100644
> --- a/hw/vfio/listener.c
> +++ b/hw/vfio/listener.c
> @@ -1059,7 +1059,7 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
> VFIOGuestIOMMU *giommu = gdn->giommu;
> VFIOContainer *bcontainer = giommu->bcontainer;
> hwaddr iova = iotlb->iova + giommu->iommu_offset;
> - ram_addr_t translated_addr;
> + hwaddr translated_addr;
> Error *local_err = NULL;
> int ret = -EINVAL;
> MemoryRegion *mr;
> @@ -1108,7 +1108,7 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
> {
> const hwaddr size = int128_get64(section->size);
> const hwaddr iova = section->offset_within_address_space;
> - const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) +
> + const hwaddr addr = memory_region_get_ram_addr(section->mr) +
> section->offset_within_region;
While we're at it, could I ask, for consistency with other callers
of vfio_container_query_dirty_bitmap(), that you rename 'addr' to
'translated_addr' here and below as well ?
Feel free to ignore.
Reviewed-by: Cédric Le Goater <clg@redhat.com>
Thanks,
C.
> VFIORamDiscardListener *vrdl = opaque;
> Error *local_err = NULL;
> @@ -1118,7 +1118,7 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section,
> * Sync the whole mapped region (spanning multiple individual mappings)
> * in one go.
> */
> - ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr,
> + ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, addr,
> &local_err);
> if (ret) {
> error_report_err(local_err);
> @@ -1183,7 +1183,7 @@ static int vfio_sync_iommu_dirty_bitmap(VFIOContainer *bcontainer,
> static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
> MemoryRegionSection *section, Error **errp)
> {
> - ram_addr_t ram_addr;
> + hwaddr addr;
>
> if (memory_region_is_iommu(section->mr)) {
> return vfio_sync_iommu_dirty_bitmap(bcontainer, section);
> @@ -1198,12 +1198,12 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer,
> return ret;
> }
>
> - ram_addr = memory_region_get_ram_addr(section->mr) +
> + addr = memory_region_get_ram_addr(section->mr) +
> section->offset_within_region;
>
> return vfio_container_query_dirty_bitmap(bcontainer,
> REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
> - int128_get64(section->size), ram_addr, errp);
> + int128_get64(section->size), addr, errp);
> }
>
> static void vfio_listener_log_sync(MemoryListener *listener,
© 2016 - 2025 Red Hat, Inc.