From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
Conditionalize use of the mapcache. This is in preparation
to optionally disable the mapcache at runtime.
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
---
system/physmem.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/system/physmem.c b/system/physmem.c
index e5ff26acec..64e6d50f8f 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -578,7 +578,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
is_write, true, &as, attrs);
mr = section.mr;
- if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) {
+ if (xen_map_cache_enabled() &&
+ memory_access_is_direct(mr, is_write, attrs)) {
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
*plen = MIN(page, *plen);
}
@@ -2577,7 +2578,7 @@ static void reclaim_ramblock(RAMBlock *block)
{
if (block->flags & RAM_PREALLOC) {
;
- } else if (xen_enabled()) {
+ } else if (xen_map_cache_enabled()) {
xen_invalidate_map_cache_entry(block->host);
#if !defined(_WIN32) && !defined(EMSCRIPTEN)
} else if (block->fd >= 0) {
@@ -2736,7 +2737,7 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
len = *size;
}
- if (xen_enabled() && block->host == NULL) {
+ if (xen_map_cache_enabled() && block->host == NULL) {
/* We need to check if the requested address is in the RAM
* because we don't want to map the entire memory in QEMU.
* In that case just map the requested area.
@@ -2785,7 +2786,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
RAMBlock *block;
uint8_t *host = ptr;
- if (xen_enabled()) {
+ if (xen_map_cache_enabled()) {
ram_addr_t ram_addr;
RCU_READ_LOCK_GUARD();
ram_addr = xen_ram_addr_from_mapcache(ptr);
@@ -3787,7 +3788,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
if (is_write) {
invalidate_and_set_dirty(mr, addr1, access_len);
}
- if (xen_enabled()) {
+ if (xen_map_cache_enabled()) {
xen_invalidate_map_cache_entry(buffer);
}
memory_region_unref(mr);
@@ -3898,7 +3899,7 @@ void address_space_cache_destroy(MemoryRegionCache *cache)
return;
}
- if (xen_enabled()) {
+ if (xen_map_cache_enabled()) {
xen_invalidate_map_cache_entry(cache->ptr);
}
memory_region_unref(cache->mrs.mr);
--
2.43.0
On Wed, 4 Mar 2026, Edgar E. Iglesias wrote:
> From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
>
> Conditionalize use of the mapcache. This is in preparation
> to optionally disable the mapcache at runtime.
>
> Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> ---
> system/physmem.c | 13 +++++++------
> 1 file changed, 7 insertions(+), 6 deletions(-)
>
> diff --git a/system/physmem.c b/system/physmem.c
> index e5ff26acec..64e6d50f8f 100644
> --- a/system/physmem.c
> +++ b/system/physmem.c
> @@ -578,7 +578,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
> is_write, true, &as, attrs);
> mr = section.mr;
>
> - if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) {
> + if (xen_map_cache_enabled() &&
> + memory_access_is_direct(mr, is_write, attrs)) {
> hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
> *plen = MIN(page, *plen);
> }
All the other changes make sense. For this one, the code inside the if
check is not strictly related to the mapcache. Are you sure it should be
changed?
> @@ -2577,7 +2578,7 @@ static void reclaim_ramblock(RAMBlock *block)
> {
> if (block->flags & RAM_PREALLOC) {
> ;
> - } else if (xen_enabled()) {
> + } else if (xen_map_cache_enabled()) {
> xen_invalidate_map_cache_entry(block->host);
> #if !defined(_WIN32) && !defined(EMSCRIPTEN)
> } else if (block->fd >= 0) {
> @@ -2736,7 +2737,7 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
> len = *size;
> }
>
> - if (xen_enabled() && block->host == NULL) {
> + if (xen_map_cache_enabled() && block->host == NULL) {
> /* We need to check if the requested address is in the RAM
> * because we don't want to map the entire memory in QEMU.
> * In that case just map the requested area.
> @@ -2785,7 +2786,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
> RAMBlock *block;
> uint8_t *host = ptr;
>
> - if (xen_enabled()) {
> + if (xen_map_cache_enabled()) {
> ram_addr_t ram_addr;
> RCU_READ_LOCK_GUARD();
> ram_addr = xen_ram_addr_from_mapcache(ptr);
> @@ -3787,7 +3788,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
> if (is_write) {
> invalidate_and_set_dirty(mr, addr1, access_len);
> }
> - if (xen_enabled()) {
> + if (xen_map_cache_enabled()) {
> xen_invalidate_map_cache_entry(buffer);
> }
> memory_region_unref(mr);
> @@ -3898,7 +3899,7 @@ void address_space_cache_destroy(MemoryRegionCache *cache)
> return;
> }
>
> - if (xen_enabled()) {
> + if (xen_map_cache_enabled()) {
> xen_invalidate_map_cache_entry(cache->ptr);
> }
> memory_region_unref(cache->mrs.mr);
> --
> 2.43.0
>
On Wed, Mar 04, 2026 at 04:07:23PM -0800, Stefano Stabellini wrote:
> On Wed, 4 Mar 2026, Edgar E. Iglesias wrote:
> > From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> >
> > Conditionalize use of the mapcache. This is in preparation
> > to optionally disable the mapcache at runtime.
> >
> > Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> > ---
> > system/physmem.c | 13 +++++++------
> > 1 file changed, 7 insertions(+), 6 deletions(-)
> >
> > diff --git a/system/physmem.c b/system/physmem.c
> > index e5ff26acec..64e6d50f8f 100644
> > --- a/system/physmem.c
> > +++ b/system/physmem.c
> > @@ -578,7 +578,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
> > is_write, true, &as, attrs);
> > mr = section.mr;
> >
> > - if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) {
> > + if (xen_map_cache_enabled() &&
> > + memory_access_is_direct(mr, is_write, attrs)) {
> > hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
> > *plen = MIN(page, *plen);
> > }
>
> All the other changes make sense. For this one, the code inside the if
> check is not strictly related to the mapcache. Are you sure it should be
> changed?
Hi, yes, when the mapcache is on, we limit translations to the current page
because the next page might not be mapped/locked or could be in a different
mapcache bucket with a different virtual address. When mapcache is off, guest
RAM is fully mapped (same as non‑Xen), so we can skip capping plen and allow a
larger mapping.
Perhaps we should add a comment to clarify?
Cheers,
Edgar
>
>
> > @@ -2577,7 +2578,7 @@ static void reclaim_ramblock(RAMBlock *block)
> > {
> > if (block->flags & RAM_PREALLOC) {
> > ;
> > - } else if (xen_enabled()) {
> > + } else if (xen_map_cache_enabled()) {
> > xen_invalidate_map_cache_entry(block->host);
> > #if !defined(_WIN32) && !defined(EMSCRIPTEN)
> > } else if (block->fd >= 0) {
> > @@ -2736,7 +2737,7 @@ static void *qemu_ram_ptr_length(RAMBlock *block, ram_addr_t addr,
> > len = *size;
> > }
> >
> > - if (xen_enabled() && block->host == NULL) {
> > + if (xen_map_cache_enabled() && block->host == NULL) {
> > /* We need to check if the requested address is in the RAM
> > * because we don't want to map the entire memory in QEMU.
> > * In that case just map the requested area.
> > @@ -2785,7 +2786,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
> > RAMBlock *block;
> > uint8_t *host = ptr;
> >
> > - if (xen_enabled()) {
> > + if (xen_map_cache_enabled()) {
> > ram_addr_t ram_addr;
> > RCU_READ_LOCK_GUARD();
> > ram_addr = xen_ram_addr_from_mapcache(ptr);
> > @@ -3787,7 +3788,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
> > if (is_write) {
> > invalidate_and_set_dirty(mr, addr1, access_len);
> > }
> > - if (xen_enabled()) {
> > + if (xen_map_cache_enabled()) {
> > xen_invalidate_map_cache_entry(buffer);
> > }
> > memory_region_unref(mr);
> > @@ -3898,7 +3899,7 @@ void address_space_cache_destroy(MemoryRegionCache *cache)
> > return;
> > }
> >
> > - if (xen_enabled()) {
> > + if (xen_map_cache_enabled()) {
> > xen_invalidate_map_cache_entry(cache->ptr);
> > }
> > memory_region_unref(cache->mrs.mr);
> > --
> > 2.43.0
> >
On Thu, 5 Mar 2026, Edgar E. Iglesias wrote:
> On Wed, Mar 04, 2026 at 04:07:23PM -0800, Stefano Stabellini wrote:
> > On Wed, 4 Mar 2026, Edgar E. Iglesias wrote:
> > > From: "Edgar E. Iglesias" <edgar.iglesias@amd.com>
> > >
> > > Conditionalize use of the mapcache. This is in preparation
> > > to optionally disable the mapcache at runtime.
> > >
> > > Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com>
> > > ---
> > > system/physmem.c | 13 +++++++------
> > > 1 file changed, 7 insertions(+), 6 deletions(-)
> > >
> > > diff --git a/system/physmem.c b/system/physmem.c
> > > index e5ff26acec..64e6d50f8f 100644
> > > --- a/system/physmem.c
> > > +++ b/system/physmem.c
> > > @@ -578,7 +578,8 @@ MemoryRegion *flatview_translate(FlatView *fv, hwaddr addr, hwaddr *xlat,
> > > is_write, true, &as, attrs);
> > > mr = section.mr;
> > >
> > > - if (xen_enabled() && memory_access_is_direct(mr, is_write, attrs)) {
> > > + if (xen_map_cache_enabled() &&
> > > + memory_access_is_direct(mr, is_write, attrs)) {
> > > hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
> > > *plen = MIN(page, *plen);
> > > }
> >
> > All the other changes make sense. For this one, the code inside the if
> > check is not strictly related to the mapcache. Are you sure it should be
> > changed?
>
> Hi, yes, when the mapcache is on, we limit translations to the current page
> because the next page might not be mapped/locked or could be in a different
> mapcache bucket with a different virtual address. When mapcache is off, guest
> RAM is fully mapped (same as non‑Xen), so we can skip capping plen and allow a
> larger mapping.
>
> Perhaps we should add a comment to clarify?
Yes please! Add a one-line in-code comment, and with that:
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
On Wed, Mar 04, 2026 at 02:52:19AM +0100, Edgar E. Iglesias wrote: > From: "Edgar E. Iglesias" <edgar.iglesias@amd.com> > > Conditionalize use of the mapcache. This is in preparation > to optionally disable the mapcache at runtime. > > Signed-off-by: Edgar E. Iglesias <edgar.iglesias@amd.com> Acked-by: Peter Xu <peterx@redhat.com> -- Peter Xu
© 2016 - 2026 Red Hat, Inc.