target/i386/cpu.h | 5 ++++ target/i386/tcg/sysemu/svm_helper.c | 40 +++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+)
EFER.SVME has to be set, and EFER reserved bits must
be zero.
In addition the combinations
* EFER.LMA or EFER.LME is non-zero and the processor does not support LM
* non-zero EFER.LME and CR0.PG and zero CR4.PAE
* non-zero EFER.LME and CR0.PG and zero CR0.PE
* non-zero EFER.LME, CR0.PG, CR4.PAE, CS.L and CS.D
are all invalid.
(AMD64 Architecture Programmer's Manual, V2, 15.5)
Signed-off-by: Lara Lazier <laramglazier@gmail.com>
---
target/i386/cpu.h | 5 ++++
target/i386/tcg/sysemu/svm_helper.c | 40 +++++++++++++++++++++++++++++
2 files changed, 45 insertions(+)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 5d98a4e7c0..0b3057bdb6 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -466,6 +466,11 @@ typedef enum X86Seg {
#define MSR_EFER_SVME (1 << 12)
#define MSR_EFER_FFXSR (1 << 14)
+#define MSR_EFER_RESERVED\
+ (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
+ | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
+ | MSR_EFER_FFXSR))
+
#define MSR_STAR 0xc0000081
#define MSR_LSTAR 0xc0000082
#define MSR_CSTAR 0xc0000083
diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
index 00618cff23..b6df36d4e5 100644
--- a/target/i386/tcg/sysemu/svm_helper.c
+++ b/target/i386/tcg/sysemu/svm_helper.c
@@ -65,6 +65,42 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
sc->base, sc->limit, sc->flags);
}
+static inline bool is_efer_invalid_state (CPUX86State *env)
+{
+ if (!(env->efer & MSR_EFER_SVME)) {
+ return true;
+ }
+
+ if (env->efer & MSR_EFER_RESERVED) {
+ return true;
+ }
+
+ if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
+ !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[4] & CR4_PAE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && !(env->cr[0] & CR0_PE_MASK)) {
+ return true;
+ }
+
+ if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
+ && (env->cr[4] & CR4_PAE_MASK)
+ && (env->segs[R_CS].flags & DESC_L_MASK)
+ && (env->segs[R_CS].flags & DESC_B_MASK)) {
+ return true;
+ }
+
+ return false;
+}
+
+
void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
{
CPUState *cs = env_cpu(env);
@@ -278,6 +314,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
}
#endif
+ if (is_efer_invalid_state(env)) {
+ cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
+ }
+
switch (x86_ldub_phys(cs,
env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
case TLB_CONTROL_DO_NOTHING:
--
2.25.1
On 21/07/21 17:26, Lara Lazier wrote:
> EFER.SVME has to be set, and EFER reserved bits must
> be zero.
> In addition the combinations
> * EFER.LMA or EFER.LME is non-zero and the processor does not support LM
> * non-zero EFER.LME and CR0.PG and zero CR4.PAE
> * non-zero EFER.LME and CR0.PG and zero CR0.PE
> * non-zero EFER.LME, CR0.PG, CR4.PAE, CS.L and CS.D
> are all invalid.
> (AMD64 Architecture Programmer's Manual, V2, 15.5)
>
> Signed-off-by: Lara Lazier <laramglazier@gmail.com>
> ---
> target/i386/cpu.h | 5 ++++
> target/i386/tcg/sysemu/svm_helper.c | 40 +++++++++++++++++++++++++++++
> 2 files changed, 45 insertions(+)
>
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index 5d98a4e7c0..0b3057bdb6 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -466,6 +466,11 @@ typedef enum X86Seg {
> #define MSR_EFER_SVME (1 << 12)
> #define MSR_EFER_FFXSR (1 << 14)
>
> +#define MSR_EFER_RESERVED\
> + (~(target_ulong)(MSR_EFER_SCE | MSR_EFER_LME\
> + | MSR_EFER_LMA | MSR_EFER_NXE | MSR_EFER_SVME\
> + | MSR_EFER_FFXSR))
> +
> #define MSR_STAR 0xc0000081
> #define MSR_LSTAR 0xc0000082
> #define MSR_CSTAR 0xc0000083
> diff --git a/target/i386/tcg/sysemu/svm_helper.c b/target/i386/tcg/sysemu/svm_helper.c
> index 00618cff23..b6df36d4e5 100644
> --- a/target/i386/tcg/sysemu/svm_helper.c
> +++ b/target/i386/tcg/sysemu/svm_helper.c
> @@ -65,6 +65,42 @@ static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
> sc->base, sc->limit, sc->flags);
> }
>
> +static inline bool is_efer_invalid_state (CPUX86State *env)
> +{
> + if (!(env->efer & MSR_EFER_SVME)) {
> + return true;
> + }
> +
> + if (env->efer & MSR_EFER_RESERVED) {
> + return true;
> + }
> +
> + if ((env->efer & (MSR_EFER_LMA | MSR_EFER_LME)) &&
> + !(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM)) {
> + return true;
> + }
> +
> + if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> + && !(env->cr[4] & CR4_PAE_MASK)) {
> + return true;
> + }
> +
> + if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> + && !(env->cr[0] & CR0_PE_MASK)) {
> + return true;
> + }
> +
> + if ((env->efer & MSR_EFER_LME) && (env->cr[0] & CR0_PG_MASK)
> + && (env->cr[4] & CR4_PAE_MASK)
> + && (env->segs[R_CS].flags & DESC_L_MASK)
> + && (env->segs[R_CS].flags & DESC_B_MASK)) {
> + return true;
> + }
> +
> + return false;
> +}
> +
> +
> void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> {
> CPUState *cs = env_cpu(env);
> @@ -278,6 +314,10 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
> }
> #endif
>
> + if (is_efer_invalid_state(env)) {
> + cpu_vmexit(env, SVM_EXIT_ERR, 0, GETPC());
> + }
> +
> switch (x86_ldub_phys(cs,
> env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
> case TLB_CONTROL_DO_NOTHING:
>
Queued all, thanks. However I modified the CR4 one to use a static
inline function instead of a macro (the KVM code you based it on reuses
the code for both the host and the guest CPUID, but this is not the case
in QEMU).
Paolo
© 2016 - 2026 Red Hat, Inc.