include/exec/ramlist.h | 1 + system/physmem.c | 35 +++++++++-------------------------- 2 files changed, 10 insertions(+), 26 deletions(-)
As reported by Peter, we might be leaking memory when removing the
highest RAMBlock (in the weird ram_addr_t space), and adding a new one.
We will fail to realize that we already allocated bitmaps for more
dirty memory blocks, and effectively discard the pointers to them.
Fix it by getting rid of last_ram_page() and by remembering the number
of dirty memory blocks that have been allocated already.
While at it, let's use "unsigned int" for the number of blocks, which
should be sufficient until we reach ~32 exabytes.
Looks like this leak was introduced as we switched from using a single
bitmap_zero_extend() to allocating multiple bitmaps:
bitmap_zero_extend() relies on g_renew() which should have taken care of
this.
Resolves: https://lkml.kernel.org/r/CAFEAcA-k7a+VObGAfCFNygQNfCKL=AfX6A4kScq=VSSK0peqPg@mail.gmail.com
Reported-by: Peter Maydell <peter.maydell@linaro.org>
Fixes: 5b82b703b69a ("memory: RCU ram_list.dirty_memory[] for safe RAM hotplug")
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Tested-by: Peter Maydell <peter.maydell@linaro.org>
Cc: qemu-stable@nongnu.org
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Xu <peterx@redhat.com>
Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
v1 -> v2:
* Move the counter to RAMList
* Use "unsigned int" instead of "ram_addr_t" as type for the number of
blocks
---
include/exec/ramlist.h | 1 +
system/physmem.c | 35 +++++++++--------------------------
2 files changed, 10 insertions(+), 26 deletions(-)
diff --git a/include/exec/ramlist.h b/include/exec/ramlist.h
index 2ad2a81acc..d9cfe530be 100644
--- a/include/exec/ramlist.h
+++ b/include/exec/ramlist.h
@@ -50,6 +50,7 @@ typedef struct RAMList {
/* RCU-enabled, writes protected by the ramlist lock. */
QLIST_HEAD(, RAMBlock) blocks;
DirtyMemoryBlocks *dirty_memory[DIRTY_MEMORY_NUM];
+ unsigned int num_dirty_blocks;
uint32_t version;
QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
} RAMList;
diff --git a/system/physmem.c b/system/physmem.c
index 94600a33ec..5e7f066762 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -1534,18 +1534,6 @@ static ram_addr_t find_ram_offset(ram_addr_t size)
return offset;
}
-static unsigned long last_ram_page(void)
-{
- RAMBlock *block;
- ram_addr_t last = 0;
-
- RCU_READ_LOCK_GUARD();
- RAMBLOCK_FOREACH(block) {
- last = MAX(last, block->offset + block->max_length);
- }
- return last >> TARGET_PAGE_BITS;
-}
-
static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
{
int ret;
@@ -1799,13 +1787,11 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length)
}
/* Called with ram_list.mutex held */
-static void dirty_memory_extend(ram_addr_t old_ram_size,
- ram_addr_t new_ram_size)
+static void dirty_memory_extend(ram_addr_t new_ram_size)
{
- ram_addr_t old_num_blocks = DIV_ROUND_UP(old_ram_size,
- DIRTY_MEMORY_BLOCK_SIZE);
- ram_addr_t new_num_blocks = DIV_ROUND_UP(new_ram_size,
- DIRTY_MEMORY_BLOCK_SIZE);
+ unsigned int old_num_blocks = ram_list.num_dirty_blocks;
+ unsigned int new_num_blocks = DIV_ROUND_UP(new_ram_size,
+ DIRTY_MEMORY_BLOCK_SIZE);
int i;
/* Only need to extend if block count increased */
@@ -1837,6 +1823,8 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
g_free_rcu(old_blocks, rcu);
}
}
+
+ ram_list.num_dirty_blocks = new_num_blocks;
}
static void ram_block_add(RAMBlock *new_block, Error **errp)
@@ -1846,11 +1834,9 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
RAMBlock *block;
RAMBlock *last_block = NULL;
bool free_on_error = false;
- ram_addr_t old_ram_size, new_ram_size;
+ ram_addr_t ram_size;
Error *err = NULL;
- old_ram_size = last_ram_page();
-
qemu_mutex_lock_ramlist();
new_block->offset = find_ram_offset(new_block->max_length);
@@ -1901,11 +1887,8 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
}
}
- new_ram_size = MAX(old_ram_size,
- (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
- if (new_ram_size > old_ram_size) {
- dirty_memory_extend(old_ram_size, new_ram_size);
- }
+ ram_size = (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS;
+ dirty_memory_extend(ram_size);
/* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
* QLIST (which has an RCU-friendly variant) does not have insertion at
* tail, so save the last element in last_block.
--
2.46.0
On Wed, Aug 28, 2024 at 11:07:43AM +0200, David Hildenbrand wrote: > As reported by Peter, we might be leaking memory when removing the > highest RAMBlock (in the weird ram_addr_t space), and adding a new one. > > We will fail to realize that we already allocated bitmaps for more > dirty memory blocks, and effectively discard the pointers to them. > > Fix it by getting rid of last_ram_page() and by remembering the number > of dirty memory blocks that have been allocated already. > > While at it, let's use "unsigned int" for the number of blocks, which > should be sufficient until we reach ~32 exabytes. > > Looks like this leak was introduced as we switched from using a single > bitmap_zero_extend() to allocating multiple bitmaps: > bitmap_zero_extend() relies on g_renew() which should have taken care of > this. > > Resolves: https://lkml.kernel.org/r/CAFEAcA-k7a+VObGAfCFNygQNfCKL=AfX6A4kScq=VSSK0peqPg@mail.gmail.com > Reported-by: Peter Maydell <peter.maydell@linaro.org> > Fixes: 5b82b703b69a ("memory: RCU ram_list.dirty_memory[] for safe RAM hotplug") > Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> > Reviewed-by: Peter Xu <peterx@redhat.com> > Tested-by: Peter Maydell <peter.maydell@linaro.org> > Cc: qemu-stable@nongnu.org > Cc: Stefan Hajnoczi <stefanha@redhat.com> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: Peter Xu <peterx@redhat.com> > Cc: "Philippe Mathieu-Daudé" <philmd@linaro.org> > Signed-off-by: David Hildenbrand <david@redhat.com> > --- > > v1 -> v2: > * Move the counter to RAMList > * Use "unsigned int" instead of "ram_addr_t" as type for the number of > blocks Thanks. I'll pick this in the next pull if nobody disagrees (or beat me to it). -- Peter Xu
© 2016 - 2024 Red Hat, Inc.