Accesses from a 32-bit environment (32-bit code segment for instruction
accesses, EFER.LMA==0 for processor accesses) have to mask away the
upper 32 bits of the address. While a bit wasteful, the easiest way
to do so is to use separate MMU indexes. These days, QEMU anyway is
compiled with a fixed value for NB_MMU_MODES. Split MMU_USER_IDX,
MMU_KSMAP_IDX and MMU_KNOSMAP_IDX in two.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/cpu.h | 34 ++++++++++++++++++++--------
target/i386/cpu.c | 11 +++++----
target/i386/tcg/sysemu/excp_helper.c | 3 ++-
3 files changed, 33 insertions(+), 15 deletions(-)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 8c271ca62e5..ee4ad372021 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2299,27 +2299,41 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_list x86_cpu_list
/* MMU modes definitions */
-#define MMU_KSMAP_IDX 0
-#define MMU_USER_IDX 1
-#define MMU_KNOSMAP_IDX 2
-#define MMU_NESTED_IDX 3
-#define MMU_PHYS_IDX 4
+#define MMU_KSMAP64_IDX 0
+#define MMU_KSMAP32_IDX 1
+#define MMU_USER64_IDX 2
+#define MMU_USER32_IDX 3
+#define MMU_KNOSMAP64_IDX 4
+#define MMU_KNOSMAP32_IDX 5
+#define MMU_PHYS_IDX 6
+#define MMU_NESTED_IDX 7
+
+#ifdef CONFIG_USER_ONLY
+#ifdef TARGET_X86_64
+#define MMU_USER_IDX MMU_USER64_IDX
+#else
+#define MMU_USER_IDX MMU_USER32_IDX
+#endif
+#endif
static inline bool is_mmu_index_smap(int mmu_index)
{
- return mmu_index == MMU_KSMAP_IDX;
+ return (mmu_index & ~1) == MMU_KSMAP64_IDX;
}
static inline bool is_mmu_index_user(int mmu_index)
{
- return mmu_index == MMU_USER_IDX;
+ return (mmu_index & ~1) == MMU_USER64_IDX;
}
static inline int cpu_mmu_index_kernel(CPUX86State *env)
{
- return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
- ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 1 : 0;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
+
+ return mmu_index_base + mmu_index_32;
}
#define CC_DST (env->cc_dst)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 7f908236767..647371198c7 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -7732,13 +7732,16 @@ static bool x86_cpu_has_work(CPUState *cs)
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
-static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
+static int x86_cpu_mmu_index(CPUState *env, bool ifetch)
{
CPUX86State *env = cpu_env(cs);
+ int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 1 : 0;
+ int mmu_index_base =
+ (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER64_IDX :
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
- return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
- (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ return mmu_index_base + mmu_index_32;
}
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index a0d5ce39300..b2c525e1a92 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -545,7 +545,8 @@ static bool get_physical_address(CPUX86State *env, vaddr addr,
if (likely(use_stage2)) {
in.cr3 = env->nested_cr3;
in.pg_mode = env->nested_pg_mode;
- in.mmu_idx = MMU_USER_IDX;
+ in.mmu_idx =
+ env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX;
in.ptw_idx = MMU_PHYS_IDX;
if (!mmu_translate(env, &in, out, err)) {
--
2.43.0
On Fri, Feb 23, 2024 at 02:09:45PM +0100, Paolo Bonzini wrote:
> Date: Fri, 23 Feb 2024 14:09:45 +0100
> From: Paolo Bonzini <pbonzini@redhat.com>
> Subject: [PATCH v2 4/7] target/i386: use separate MMU indexes for 32-bit
> accesses
> X-Mailer: git-send-email 2.43.0
>
> Accesses from a 32-bit environment (32-bit code segment for instruction
> accesses, EFER.LMA==0 for processor accesses) have to mask away the
> upper 32 bits of the address. While a bit wasteful, the easiest way
> to do so is to use separate MMU indexes. These days, QEMU anyway is
> compiled with a fixed value for NB_MMU_MODES. Split MMU_USER_IDX,
> MMU_KSMAP_IDX and MMU_KNOSMAP_IDX in two.
Maybe s/in/into/ ?
>
> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
> ---
> target/i386/cpu.h | 34 ++++++++++++++++++++--------
> target/i386/cpu.c | 11 +++++----
> target/i386/tcg/sysemu/excp_helper.c | 3 ++-
> 3 files changed, 33 insertions(+), 15 deletions(-)
>
[snip]
>
> static inline int cpu_mmu_index_kernel(CPUX86State *env)
> {
> - return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
> - ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
> - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
> + int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 1 : 0;
> + int mmu_index_base =
> + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
> + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
Change the line?
On Mon, Feb 26, 2024 at 9:22 AM Zhao Liu <zhao1.liu@intel.com> wrote:
> On Fri, Feb 23, 2024 at 02:09:45PM +0100, Paolo Bonzini wrote:
> > Accesses from a 32-bit environment (32-bit code segment for instruction
> > accesses, EFER.LMA==0 for processor accesses) have to mask away the
> > upper 32 bits of the address. While a bit wasteful, the easiest way
> > to do so is to use separate MMU indexes. These days, QEMU anyway is
> > compiled with a fixed value for NB_MMU_MODES. Split MMU_USER_IDX,
> > MMU_KSMAP_IDX and MMU_KNOSMAP_IDX in two.
>
> Maybe s/in/into/ ?
Both are acceptable grammar.
> > static inline int cpu_mmu_index_kernel(CPUX86State *env)
> > {
> > - return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
> > - ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
> > - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
> > + int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 1 : 0;
> > + int mmu_index_base =
> > + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
> > + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
> Change the line?
It's reformatted but the logic is the same.
- if !SMAP -> MMU_KNOSMAP_IDX
- if CPL < 3 && EFLAGS.AC - MMU_KNOSMAP_IDX
- else MMU_KSMAP_IDX
The only change is adding the "64" suffix, which is then changed to
32-bit if needed via mmu_index_32.
Paolo
Hi Paolo,
On Mon, Feb 26, 2024 at 10:55:14AM +0100, Paolo Bonzini wrote:
> Date: Mon, 26 Feb 2024 10:55:14 +0100
> From: Paolo Bonzini <pbonzini@redhat.com>
> Subject: Re: [PATCH v2 4/7] target/i386: use separate MMU indexes for
> 32-bit accesses
>
> On Mon, Feb 26, 2024 at 9:22 AM Zhao Liu <zhao1.liu@intel.com> wrote:
> > On Fri, Feb 23, 2024 at 02:09:45PM +0100, Paolo Bonzini wrote:
> > > Accesses from a 32-bit environment (32-bit code segment for instruction
> > > accesses, EFER.LMA==0 for processor accesses) have to mask away the
> > > upper 32 bits of the address. While a bit wasteful, the easiest way
> > > to do so is to use separate MMU indexes. These days, QEMU anyway is
> > > compiled with a fixed value for NB_MMU_MODES. Split MMU_USER_IDX,
> > > MMU_KSMAP_IDX and MMU_KNOSMAP_IDX in two.
> >
> > Maybe s/in/into/ ?
>
> Both are acceptable grammar.
>
> > > static inline int cpu_mmu_index_kernel(CPUX86State *env)
> > > {
> > > - return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
> > > - ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
> > > - ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
> > > + int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 1 : 0;
> > > + int mmu_index_base =
> > > + !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
> > > + ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
>
> > Change the line?
>
> It's reformatted but the logic is the same.
>
> - if !SMAP -> MMU_KNOSMAP_IDX
>
> - if CPL < 3 && EFLAGS.AC - MMU_KNOSMAP_IDX
>
> - else MMU_KSMAP_IDX
>
> The only change is adding the "64" suffix, which is then changed to
> 32-bit if needed via mmu_index_32.
>
Thanks for the explanation, I get your point.
Similarly, I also understand your change in x86_cpu_mmu_index().
LGTM, please allow me to add my review tag:
Reviewed-by: Zhao Liu <zhao1.liu@intel.com>
© 2016 - 2026 Red Hat, Inc.