From: Paolo Bonzini <pbonzini@redhat.com>
The A20 mask is only applied to the final memory access. Nested
page tables are always walked with the raw guest-physical address.
Unlike the previous patch, in this one the masking must be kept, but
it was done too early.
Cc: qemu-stable@nongnu.org
Fixes: 4a1e9d4d11c ("target/i386: Use atomic operations for pte updates", 2022-10-18)
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
(cherry picked from commit b5a9de3259f4c791bde2faff086dd5737625e41e)
Signed-off-by: Michael Tokarev <mjt@tls.msk.ru>
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index 89df61eec6..e16d3a69d1 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -134,7 +134,6 @@ static inline bool ptw_setl(const PTETranslate *in, uint32_t old, uint32_t set)
static bool mmu_translate(CPUX86State *env, const TranslateParams *in,
TranslateResult *out, TranslateFault *err)
{
- const int32_t a20_mask = x86_get_a20_mask(env);
const target_ulong addr = in->addr;
const int pg_mode = in->pg_mode;
const bool is_user = (in->mmu_idx == MMU_USER_IDX);
@@ -417,10 +416,13 @@ do_check_protect_pse36:
}
}
- /* align to page_size */
- paddr = (pte & a20_mask & PG_ADDRESS_MASK & ~(page_size - 1))
- | (addr & (page_size - 1));
+ /* merge offset within page */
+ paddr = (pte & PG_ADDRESS_MASK & ~(page_size - 1)) | (addr & (page_size - 1));
+ /*
+ * Note that NPT is walked (for both paging structures and final guest
+ * addresses) using the address with the A20 bit set.
+ */
if (in->ptw_idx == MMU_NESTED_IDX) {
CPUTLBEntryFull *full;
int flags, nested_page_size;
@@ -459,7 +461,7 @@ do_check_protect_pse36:
}
}
- out->paddr = paddr;
+ out->paddr = paddr & x86_get_a20_mask(env);
out->prot = prot;
out->page_size = page_size;
return true;
--
2.39.2