From: William Roche <william.roche@oracle.com>
Repair poisoned memory location(s), calling ram_block_discard_range():
punching a hole in the backend file when necessary and regenerating
a usable memory.
If the kernel doesn't support the madvise calls used by this function
and we are dealing with anonymous memory, fall back to remapping the
location(s).
Signed-off-by: William Roche <william.roche@oracle.com>
---
system/physmem.c | 54 ++++++++++++++++++++++++++++--------------------
1 file changed, 32 insertions(+), 22 deletions(-)
diff --git a/system/physmem.c b/system/physmem.c
index 3dd2adde73..3dc10ae27b 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -2167,6 +2167,23 @@ void qemu_ram_free(RAMBlock *block)
}
#ifndef _WIN32
+/* Simply remap the given VM memory location from start to start+length */
+static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length)
+{
+ int flags, prot;
+ void *area;
+ void *host_startaddr = block->host + start;
+
+ assert(block->fd < 0);
+ flags = MAP_FIXED | MAP_ANONYMOUS;
+ flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE;
+ flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
+ prot = PROT_READ;
+ prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
+ area = mmap(host_startaddr, length, prot, flags, -1, 0);
+ return area != host_startaddr ? -errno : 0;
+}
+
/*
* qemu_ram_remap - remap a single RAM page
*
@@ -2184,9 +2201,7 @@ void qemu_ram_remap(ram_addr_t addr)
{
RAMBlock *block;
uint64_t offset;
- int flags;
- void *area, *vaddr;
- int prot;
+ void *vaddr;
size_t page_size;
RAMBLOCK_FOREACH(block) {
@@ -2201,25 +2216,20 @@ void qemu_ram_remap(ram_addr_t addr)
;
} else if (xen_enabled()) {
abort();
- } else {
- flags = MAP_FIXED;
- flags |= block->flags & RAM_SHARED ?
- MAP_SHARED : MAP_PRIVATE;
- flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0;
- prot = PROT_READ;
- prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE;
- if (block->fd >= 0) {
- area = mmap(vaddr, page_size, prot, flags, block->fd,
- offset + block->fd_offset);
- } else {
- flags |= MAP_ANONYMOUS;
- area = mmap(vaddr, page_size, prot, flags, -1, 0);
- }
- if (area != vaddr) {
- error_report("Could not remap RAM %s:%" PRIx64 "+%" PRIx64
- " +%zx", block->idstr, offset,
- block->fd_offset, page_size);
- exit(1);
+ if (ram_block_discard_range(block, offset, page_size) != 0) {
+ /*
+ * Fall back to using mmap() only for anonymous mapping,
+ * as if a backing file is associated we may not be able
+ * to recover the memory in all cases.
+ * So don't take the risk of using only mmap and fail now.
+ */
+ if (block->fd >= 0 ||
+ qemu_ram_remap_mmap(block, offset, page_size) != 0) {
+ error_report("Could not remap RAM %s:%" PRIx64 "+%"
+ PRIx64 " +%zx", block->idstr, offset,
+ block->fd_offset, page_size);
+ exit(1);
+ }
}
memory_try_enable_merging(vaddr, page_size);
qemu_ram_setup_dump(vaddr, page_size);
--
2.43.5
On 27.01.25 22:31, “William Roche wrote: > From: William Roche <william.roche@oracle.com> > > Repair poisoned memory location(s), calling ram_block_discard_range(): > punching a hole in the backend file when necessary and regenerating > a usable memory. > If the kernel doesn't support the madvise calls used by this function > and we are dealing with anonymous memory, fall back to remapping the > location(s). > > Signed-off-by: William Roche <william.roche@oracle.com> > --- > system/physmem.c | 54 ++++++++++++++++++++++++++++-------------------- > 1 file changed, 32 insertions(+), 22 deletions(-) > > diff --git a/system/physmem.c b/system/physmem.c > index 3dd2adde73..3dc10ae27b 100644 > --- a/system/physmem.c > +++ b/system/physmem.c > @@ -2167,6 +2167,23 @@ void qemu_ram_free(RAMBlock *block) > } > > #ifndef _WIN32 > +/* Simply remap the given VM memory location from start to start+length */ > +static int qemu_ram_remap_mmap(RAMBlock *block, uint64_t start, size_t length) > +{ > + int flags, prot; > + void *area; > + void *host_startaddr = block->host + start; > + > + assert(block->fd < 0); > + flags = MAP_FIXED | MAP_ANONYMOUS; > + flags |= block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE; > + flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; > + prot = PROT_READ; > + prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; > + area = mmap(host_startaddr, length, prot, flags, -1, 0); > + return area != host_startaddr ? -errno : 0; > +} > + > /* > * qemu_ram_remap - remap a single RAM page > * > @@ -2184,9 +2201,7 @@ void qemu_ram_remap(ram_addr_t addr) > { > RAMBlock *block; > uint64_t offset; > - int flags; > - void *area, *vaddr; > - int prot; > + void *vaddr; > size_t page_size; > > RAMBLOCK_FOREACH(block) { > @@ -2201,25 +2216,20 @@ void qemu_ram_remap(ram_addr_t addr) > ; > } else if (xen_enabled()) { > abort(); > - } else { > - flags = MAP_FIXED; > - flags |= block->flags & RAM_SHARED ? > - MAP_SHARED : MAP_PRIVATE; > - flags |= block->flags & RAM_NORESERVE ? MAP_NORESERVE : 0; > - prot = PROT_READ; > - prot |= block->flags & RAM_READONLY ? 0 : PROT_WRITE; > - if (block->fd >= 0) { > - area = mmap(vaddr, page_size, prot, flags, block->fd, > - offset + block->fd_offset); > - } else { > - flags |= MAP_ANONYMOUS; > - area = mmap(vaddr, page_size, prot, flags, -1, 0); > - } > - if (area != vaddr) { > - error_report("Could not remap RAM %s:%" PRIx64 "+%" PRIx64 > - " +%zx", block->idstr, offset, > - block->fd_offset, page_size); > - exit(1); > + if (ram_block_discard_range(block, offset, page_size) != 0) { > + /* > + * Fall back to using mmap() only for anonymous mapping, > + * as if a backing file is associated we may not be able > + * to recover the memory in all cases. > + * So don't take the risk of using only mmap and fail now. > + */ > + if (block->fd >= 0 || > + qemu_ram_remap_mmap(block, offset, page_size) != 0) { > + error_report("Could not remap RAM %s:%" PRIx64 "+%" > + PRIx64 " +%zx", block->idstr, offset, > + block->fd_offset, page_size); > + exit(1); > + } > } > memory_try_enable_merging(vaddr, page_size); > qemu_ram_setup_dump(vaddr, page_size); Acked-by: David Hildenbrand <david@redhat.com> -- Cheers, David / dhildenb
© 2016 - 2025 Red Hat, Inc.