Previously we have two places that will create the per KVMSlot dirty
bitmap:
1. When a newly created KVMSlot has dirty logging enabled,
2. When the first log_sync() happens for a memory slot.
The 2nd case is lazy-init, while the 1st case is not (which is a fix
of what the 2nd case missed).
To do explicit initialization of dirty bitmaps, what we're missing is
to create the dirty bitmap when the slot changed from not-dirty-track
to dirty-track. Do that in kvm_slot_update_flags().
With that, we can safely remove the 2nd lazy-init.
This change will be needed for kvm dirty ring because kvm dirty ring
does not use the log_sync() interface at all.
Since at it, move all the pre-checks into kvm_slot_init_dirty_bitmap().
Signed-off-by: Peter Xu <peterx@redhat.com>
---
accel/kvm/kvm-all.c | 23 +++++++++--------------
1 file changed, 9 insertions(+), 14 deletions(-)
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 4be3cd2352..bb635c775f 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -162,6 +162,8 @@ static NotifierList kvm_irqchip_change_notifiers =
#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
+static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
+
int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_accel());
@@ -442,6 +444,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
return 0;
}
+ kvm_slot_init_dirty_bitmap(mem);
return kvm_set_user_memory_region(kml, mem, false);
}
@@ -526,8 +529,12 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
/* Allocate the dirty bitmap for a slot */
-static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
+static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
{
+ if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
+ return;
+ }
+
/*
* XXX bad kernel interface alert
* For dirty bitmap, kernel allocates array of size aligned to
@@ -578,11 +585,6 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
goto out;
}
- if (!mem->dirty_bmap) {
- /* Allocate on the first log_sync, once and for all */
- kvm_memslot_init_dirty_bitmap(mem);
- }
-
d.dirty_bitmap = mem->dirty_bmap;
d.slot = mem->slot | (kml->as_id << 16);
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
@@ -1079,14 +1081,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
mem->start_addr = start_addr;
mem->ram = ram;
mem->flags = kvm_mem_flags(mr);
-
- if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- /*
- * Reallocate the bmap; it means it doesn't disappear in
- * middle of a migrate.
- */
- kvm_memslot_init_dirty_bitmap(mem);
- }
+ kvm_slot_init_dirty_bitmap(mem);
err = kvm_set_user_memory_region(kml, mem, true);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
--
2.24.1
* Peter Xu (peterx@redhat.com) wrote:
> Previously we have two places that will create the per KVMSlot dirty
> bitmap:
>
> 1. When a newly created KVMSlot has dirty logging enabled,
> 2. When the first log_sync() happens for a memory slot.
>
> The 2nd case is lazy-init, while the 1st case is not (which is a fix
> of what the 2nd case missed).
>
> To do explicit initialization of dirty bitmaps, what we're missing is
> to create the dirty bitmap when the slot changed from not-dirty-track
> to dirty-track. Do that in kvm_slot_update_flags().
>
> With that, we can safely remove the 2nd lazy-init.
>
> This change will be needed for kvm dirty ring because kvm dirty ring
> does not use the log_sync() interface at all.
>
> Since at it, move all the pre-checks into kvm_slot_init_dirty_bitmap().
'While at it' or just Also
> Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> accel/kvm/kvm-all.c | 23 +++++++++--------------
> 1 file changed, 9 insertions(+), 14 deletions(-)
>
> diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
> index 4be3cd2352..bb635c775f 100644
> --- a/accel/kvm/kvm-all.c
> +++ b/accel/kvm/kvm-all.c
> @@ -162,6 +162,8 @@ static NotifierList kvm_irqchip_change_notifiers =
> #define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
> #define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
>
> +static void kvm_slot_init_dirty_bitmap(KVMSlot *mem);
> +
> int kvm_get_max_memslots(void)
> {
> KVMState *s = KVM_STATE(current_accel());
> @@ -442,6 +444,7 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
> return 0;
> }
>
> + kvm_slot_init_dirty_bitmap(mem);
> return kvm_set_user_memory_region(kml, mem, false);
> }
>
> @@ -526,8 +529,12 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
> #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
>
> /* Allocate the dirty bitmap for a slot */
> -static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
> +static void kvm_slot_init_dirty_bitmap(KVMSlot *mem)
> {
> + if (!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || mem->dirty_bmap) {
> + return;
> + }
> +
> /*
> * XXX bad kernel interface alert
> * For dirty bitmap, kernel allocates array of size aligned to
> @@ -578,11 +585,6 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
> goto out;
> }
>
> - if (!mem->dirty_bmap) {
> - /* Allocate on the first log_sync, once and for all */
> - kvm_memslot_init_dirty_bitmap(mem);
> - }
> -
> d.dirty_bitmap = mem->dirty_bmap;
> d.slot = mem->slot | (kml->as_id << 16);
> if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
> @@ -1079,14 +1081,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
> mem->start_addr = start_addr;
> mem->ram = ram;
> mem->flags = kvm_mem_flags(mr);
> -
> - if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
> - /*
> - * Reallocate the bmap; it means it doesn't disappear in
> - * middle of a migrate.
> - */
> - kvm_memslot_init_dirty_bitmap(mem);
> - }
> + kvm_slot_init_dirty_bitmap(mem);
> err = kvm_set_user_memory_region(kml, mem, true);
> if (err) {
> fprintf(stderr, "%s: error registering slot: %s\n", __func__,
> --
> 2.24.1
>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
© 2016 - 2026 Red Hat, Inc.