On Intel, the DMA mapped through the host single stage. Instead
we set up the stage 2 and stage 1 separately in nested mode as there
is no "Caching Mode".
Legacy vfio_listener_log_sync cannot be used in nested stage as we
don't need to pay close attention to stage 1 mapping. This patch adds
vfio_prereg_listener_log_sync to mark dirty pages in nested mode.
Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
---
hw/vfio/common.c | 25 +++++++++++++++++++++++++
1 file changed, 25 insertions(+)
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 7c50905856..af333e0dee 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1216,6 +1216,22 @@ static int vfio_dma_sync_ram_section_dirty_bitmap(VFIOContainer *container,
int128_get64(section->size), ram_addr);
}
+static void vfio_prereg_listener_log_sync(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ VFIOContainer *container =
+ container_of(listener, VFIOContainer, prereg_listener);
+
+ if (!memory_region_is_ram(section->mr) ||
+ !container->dirty_pages_supported) {
+ return;
+ }
+
+ if (vfio_devices_all_saving(container)) {
+ vfio_dma_sync_ram_section_dirty_bitmap(container, section);
+ }
+}
+
typedef struct {
IOMMUNotifier n;
VFIOGuestIOMMU *giommu;
@@ -1260,6 +1276,14 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
if (memory_region_is_iommu(section->mr)) {
VFIOGuestIOMMU *giommu;
+ /*
+ * In nested mode, stage 2 and stage 1 are set up separately. We
+ * only need to focus on stage 2 mapping when marking dirty pages.
+ */
+ if (container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) {
+ return 0;
+ }
+
QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
if (MEMORY_REGION(giommu->iommu) == section->mr &&
giommu->n.start == section->offset_within_region) {
@@ -1312,6 +1336,7 @@ static const MemoryListener vfio_memory_listener = {
static MemoryListener vfio_memory_prereg_listener = {
.region_add = vfio_prereg_listener_region_add,
.region_del = vfio_prereg_listener_region_del,
+ .log_sync = vfio_prereg_listener_log_sync,
};
static void vfio_listener_release(VFIOContainer *container)
--
2.23.0
Hi Kunkun,
On 2/19/21 10:42 AM, Kunkun Jiang wrote:
> On Intel, the DMA mapped through the host single stage. Instead
> we set up the stage 2 and stage 1 separately in nested mode as there
> is no "Caching Mode".
You need to rewrite the above sentences, Missing ARM and also the 1st
sentences misses a verb.
>
> Legacy vfio_listener_log_sync cannot be used in nested stage as we
> don't need to pay close attention to stage 1 mapping. This patch adds
> vfio_prereg_listener_log_sync to mark dirty pages in nested mode.
>
> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
> ---
> hw/vfio/common.c | 25 +++++++++++++++++++++++++
> 1 file changed, 25 insertions(+)
>
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index 7c50905856..af333e0dee 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -1216,6 +1216,22 @@ static int vfio_dma_sync_ram_section_dirty_bitmap(VFIOContainer *container,
> int128_get64(section->size), ram_addr);
> }
>
> +static void vfio_prereg_listener_log_sync(MemoryListener *listener,
> + MemoryRegionSection *section)
> +{
> + VFIOContainer *container =
> + container_of(listener, VFIOContainer, prereg_listener);
> +
> + if (!memory_region_is_ram(section->mr) ||
> + !container->dirty_pages_supported) {
> + return;
> + }
> +
> + if (vfio_devices_all_saving(container)) {
I fail to see where is this defined?
> + vfio_dma_sync_ram_section_dirty_bitmap(container, section);
> + }
> +}
> +
> typedef struct {
> IOMMUNotifier n;
> VFIOGuestIOMMU *giommu;
> @@ -1260,6 +1276,14 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
> if (memory_region_is_iommu(section->mr)) {
> VFIOGuestIOMMU *giommu;
>
> + /*
> + * In nested mode, stage 2 and stage 1 are set up separately. We
> + * only need to focus on stage 2 mapping when marking dirty pages.
> + */
> + if (container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) {
> + return 0;
> + }
> +
> QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
> if (MEMORY_REGION(giommu->iommu) == section->mr &&
> giommu->n.start == section->offset_within_region) {
> @@ -1312,6 +1336,7 @@ static const MemoryListener vfio_memory_listener = {
> static MemoryListener vfio_memory_prereg_listener = {
> .region_add = vfio_prereg_listener_region_add,
> .region_del = vfio_prereg_listener_region_del,
> + .log_sync = vfio_prereg_listener_log_sync,
> };
>
> static void vfio_listener_release(VFIOContainer *container)
>
Thanks
Eric
Hi Eric,
On 2021/4/8 21:56, Auger Eric wrote:
> Hi Kunkun,
>
> On 2/19/21 10:42 AM, Kunkun Jiang wrote:
>> On Intel, the DMA mapped through the host single stage. Instead
>> we set up the stage 2 and stage 1 separately in nested mode as there
>> is no "Caching Mode".
> You need to rewrite the above sentences, Missing ARM and also the 1st
> sentences misses a verb.
Thanks for your review! I will fix it in the next version.
>> Legacy vfio_listener_log_sync cannot be used in nested stage as we
>> don't need to pay close attention to stage 1 mapping. This patch adds
>> vfio_prereg_listener_log_sync to mark dirty pages in nested mode.
>>
>> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com>
>> ---
>> hw/vfio/common.c | 25 +++++++++++++++++++++++++
>> 1 file changed, 25 insertions(+)
>>
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index 7c50905856..af333e0dee 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -1216,6 +1216,22 @@ static int vfio_dma_sync_ram_section_dirty_bitmap(VFIOContainer *container,
>> int128_get64(section->size), ram_addr);
>> }
>>
>> +static void vfio_prereg_listener_log_sync(MemoryListener *listener,
>> + MemoryRegionSection *section)
>> +{
>> + VFIOContainer *container =
>> + container_of(listener, VFIOContainer, prereg_listener);
>> +
>> + if (!memory_region_is_ram(section->mr) ||
>> + !container->dirty_pages_supported) {
>> + return;
>> + }
>> +
>> + if (vfio_devices_all_saving(container)) {
> I fail to see where is this defined?
Keqian modified vfio_devices_all_saving to vfio_devices_all_dirty_tracking
in 758b96b61d5.
When I posted this series patches, it was vfio_devices_all_saving. In
v2[1], I
have updated it based on the lasted qemu.
[1]
https://lore.kernel.org/qemu-devel/20210331101259.2153-3-jiangkunkun@huawei.com/
Thanks,
Kunkun Jiang
>> + vfio_dma_sync_ram_section_dirty_bitmap(container, section);
>> + }
>> +}
>> +
>> typedef struct {
>> IOMMUNotifier n;
>> VFIOGuestIOMMU *giommu;
>> @@ -1260,6 +1276,14 @@ static int vfio_sync_dirty_bitmap(VFIOContainer *container,
>> if (memory_region_is_iommu(section->mr)) {
>> VFIOGuestIOMMU *giommu;
>>
>> + /*
>> + * In nested mode, stage 2 and stage 1 are set up separately. We
>> + * only need to focus on stage 2 mapping when marking dirty pages.
>> + */
>> + if (container->iommu_type == VFIO_TYPE1_NESTING_IOMMU) {
>> + return 0;
>> + }
>> +
>> QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
>> if (MEMORY_REGION(giommu->iommu) == section->mr &&
>> giommu->n.start == section->offset_within_region) {
>> @@ -1312,6 +1336,7 @@ static const MemoryListener vfio_memory_listener = {
>> static MemoryListener vfio_memory_prereg_listener = {
>> .region_add = vfio_prereg_listener_region_add,
>> .region_del = vfio_prereg_listener_region_del,
>> + .log_sync = vfio_prereg_listener_log_sync,
>> };
>>
>> static void vfio_listener_release(VFIOContainer *container)
>>
> Thanks
>
> Eric
>
> .
© 2016 - 2026 Red Hat, Inc.