arch/x86/kvm/vmx/vmenter.S | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-)
Use memory operand in CMP instruction to avoid usage of a
temporary register. Use %eax register to hold VMX_spec_ctrl
and use it directly in the follow-up WRMSR.
The new code saves a few bytes by removing two MOV insns, from:
2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi
38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi
3f: 39 fe cmp %edi,%esi
41: 74 0b je 4e <...>
43: b9 48 00 00 00 mov $0x48,%ecx
48: 31 d2 xor %edx,%edx
4a: 89 f8 mov %edi,%eax
4c: 0f 30 wrmsr
to:
2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
3f: 74 09 je 4a <...>
41: b9 48 00 00 00 mov $0x48,%ecx
46: 31 d2 xor %edx,%edx
48: 0f 30 wrmsr
No functional change intended.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---
arch/x86/kvm/vmx/vmenter.S | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 0a6cf5bff2aa..c65de5de92ab 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
* and vmentry.
*/
mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
- movl VMX_spec_ctrl(%_ASM_DI), %edi
- movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
- cmp %edi, %esi
+ movl VMX_spec_ctrl(%_ASM_DI), %eax
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
je .Lspec_ctrl_done
mov $MSR_IA32_SPEC_CTRL, %ecx
xor %edx, %edx
- mov %edi, %eax
wrmsr
.Lspec_ctrl_done:
--
2.50.1
On Thu, Aug 07, 2025, Uros Bizjak wrote: > Use memory operand in CMP instruction to avoid usage of a > temporary register. Use %eax register to hold VMX_spec_ctrl > and use it directly in the follow-up WRMSR. > > The new code saves a few bytes by removing two MOV insns, from: > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi > 32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi > 38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi > 3f: 39 fe cmp %edi,%esi > 41: 74 0b je 4e <...> > 43: b9 48 00 00 00 mov $0x48,%ecx > 48: 31 d2 xor %edx,%edx > 4a: 89 f8 mov %edi,%eax > 4c: 0f 30 wrmsr > > to: > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax > 3f: 74 09 je 4a <...> > 41: b9 48 00 00 00 mov $0x48,%ecx > 46: 31 d2 xor %edx,%edx > 48: 0f 30 wrmsr > > No functional change intended. > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > Cc: Sean Christopherson <seanjc@google.com> > Cc: Paolo Bonzini <pbonzini@redhat.com> > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@kernel.org> > Cc: Borislav Petkov <bp@alien8.de> > Cc: Dave Hansen <dave.hansen@linux.intel.com> > Cc: "H. Peter Anvin" <hpa@zytor.com> > --- > arch/x86/kvm/vmx/vmenter.S | 6 ++---- > 1 file changed, 2 insertions(+), 4 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S > index 0a6cf5bff2aa..c65de5de92ab 100644 > --- a/arch/x86/kvm/vmx/vmenter.S > +++ b/arch/x86/kvm/vmx/vmenter.S > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run) > * and vmentry. > */ > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI > - movl VMX_spec_ctrl(%_ASM_DI), %edi > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi > - cmp %edi, %esi > + movl VMX_spec_ctrl(%_ASM_DI), %eax > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always zero. So, while this patch looks good, I'm leaning toward skipping it and going straight to a fix. > je .Lspec_ctrl_done > mov $MSR_IA32_SPEC_CTRL, %ecx > xor %edx, %edx > - mov %edi, %eax > wrmsr > > .Lspec_ctrl_done: > -- > 2.50.1 >
On Tue, Aug 19, 2025 at 5:00 PM Sean Christopherson <seanjc@google.com> wrote:
>
> On Thu, Aug 07, 2025, Uros Bizjak wrote:
> > Use memory operand in CMP instruction to avoid usage of a
> > temporary register. Use %eax register to hold VMX_spec_ctrl
> > and use it directly in the follow-up WRMSR.
> >
> > The new code saves a few bytes by removing two MOV insns, from:
> >
> > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > 32: 8b bf 48 18 00 00 mov 0x1848(%rdi),%edi
> > 38: 65 8b 35 00 00 00 00 mov %gs:0x0(%rip),%esi
> > 3f: 39 fe cmp %edi,%esi
> > 41: 74 0b je 4e <...>
> > 43: b9 48 00 00 00 mov $0x48,%ecx
> > 48: 31 d2 xor %edx,%edx
> > 4a: 89 f8 mov %edi,%eax
> > 4c: 0f 30 wrmsr
> >
> > to:
> >
> > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi
> > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax
> > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax
> > 3f: 74 09 je 4a <...>
> > 41: b9 48 00 00 00 mov $0x48,%ecx
> > 46: 31 d2 xor %edx,%edx
> > 48: 0f 30 wrmsr
> >
> > No functional change intended.
> >
> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> > Cc: Sean Christopherson <seanjc@google.com>
> > Cc: Paolo Bonzini <pbonzini@redhat.com>
> > Cc: Thomas Gleixner <tglx@linutronix.de>
> > Cc: Ingo Molnar <mingo@kernel.org>
> > Cc: Borislav Petkov <bp@alien8.de>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Cc: "H. Peter Anvin" <hpa@zytor.com>
> > ---
> > arch/x86/kvm/vmx/vmenter.S | 6 ++----
> > 1 file changed, 2 insertions(+), 4 deletions(-)
> >
> > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
> > index 0a6cf5bff2aa..c65de5de92ab 100644
> > --- a/arch/x86/kvm/vmx/vmenter.S
> > +++ b/arch/x86/kvm/vmx/vmenter.S
> > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run)
> > * and vmentry.
> > */
> > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI
> > - movl VMX_spec_ctrl(%_ASM_DI), %edi
> > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi
> > - cmp %edi, %esi
> > + movl VMX_spec_ctrl(%_ASM_DI), %eax
> > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
>
> Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL
> is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always
> zero.
But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as:
#define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */
and "movl $..., %eax" zero-extends the value to full 64-bit width.
FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S:
movl $MSR_IA32_PRED_CMD, %ecx
So, the insn is OK when MSR fits 32-bits. The insn is a bit smaller, too:
movl $0x01, %ecx
movq $0x01, %rcx
assembles to:
0: b9 01 00 00 00 mov $0x1,%ecx
5: 48 c7 c1 01 00 00 00 mov $0x1,%rcx
Rest assured that the assembler checks the immediate when 32-bit
registers are involved, e.g.:
mov.s: Assembler messages:
mov.s:1: Warning: 0xaaaaaaaaaa shortened to 0xaaaaaaaa
Uros.
On Tue, Aug 19, 2025, Uros Bizjak wrote: > > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi > > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax > > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax > > > 3f: 74 09 je 4a <...> > > > 41: b9 48 00 00 00 mov $0x48,%ecx > > > 46: 31 d2 xor %edx,%edx > > > 48: 0f 30 wrmsr > > > > > > No functional change intended. > > > > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > > > Cc: Sean Christopherson <seanjc@google.com> > > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > > Cc: Thomas Gleixner <tglx@linutronix.de> > > > Cc: Ingo Molnar <mingo@kernel.org> > > > Cc: Borislav Petkov <bp@alien8.de> > > > Cc: Dave Hansen <dave.hansen@linux.intel.com> > > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > > --- > > > arch/x86/kvm/vmx/vmenter.S | 6 ++---- > > > 1 file changed, 2 insertions(+), 4 deletions(-) > > > > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S > > > index 0a6cf5bff2aa..c65de5de92ab 100644 > > > --- a/arch/x86/kvm/vmx/vmenter.S > > > +++ b/arch/x86/kvm/vmx/vmenter.S > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run) > > > * and vmentry. > > > */ > > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI > > > - movl VMX_spec_ctrl(%_ASM_DI), %edi > > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi > > > - cmp %edi, %esi > > > + movl VMX_spec_ctrl(%_ASM_DI), %eax > > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax > > > > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always > > zero. > > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as: > > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ > > and "movl $..., %eax" zero-extends the value to full 64-bit width. > > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S: > > movl $MSR_IA32_PRED_CMD, %ecx That's the MSR index, not the value. I'm pointing out that: movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32] movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32] cmp %edi, %esi <== can get false negatives je .Lspec_ctrl_done mov $MSR_IA32_SPEC_CTRL, %ecx xor %edx, %edx <== can clobber guest value mov %edi, %eax wrmsr The bug is _currently_ benign because neither KVM nor the kernel support setting any of bits 63:32, but it's still a bug that needs to be fixed.
On Tue, Aug 19, 2025 at 8:56 PM Sean Christopherson <seanjc@google.com> wrote: > > On Tue, Aug 19, 2025, Uros Bizjak wrote: > > > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi > > > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax > > > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax > > > > 3f: 74 09 je 4a <...> > > > > 41: b9 48 00 00 00 mov $0x48,%ecx > > > > 46: 31 d2 xor %edx,%edx > > > > 48: 0f 30 wrmsr > > > > > > > > No functional change intended. > > > > > > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > > > > Cc: Sean Christopherson <seanjc@google.com> > > > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > > > Cc: Thomas Gleixner <tglx@linutronix.de> > > > > Cc: Ingo Molnar <mingo@kernel.org> > > > > Cc: Borislav Petkov <bp@alien8.de> > > > > Cc: Dave Hansen <dave.hansen@linux.intel.com> > > > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > > > --- > > > > arch/x86/kvm/vmx/vmenter.S | 6 ++---- > > > > 1 file changed, 2 insertions(+), 4 deletions(-) > > > > > > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S > > > > index 0a6cf5bff2aa..c65de5de92ab 100644 > > > > --- a/arch/x86/kvm/vmx/vmenter.S > > > > +++ b/arch/x86/kvm/vmx/vmenter.S > > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run) > > > > * and vmentry. > > > > */ > > > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI > > > > - movl VMX_spec_ctrl(%_ASM_DI), %edi > > > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi > > > > - cmp %edi, %esi > > > > + movl VMX_spec_ctrl(%_ASM_DI), %eax > > > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax > > > > > > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL > > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always > > > zero. > > > > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as: > > > > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ > > > > and "movl $..., %eax" zero-extends the value to full 64-bit width. > > > > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S: > > > > movl $MSR_IA32_PRED_CMD, %ecx > > That's the MSR index, not the value. I'm pointing out that: > > movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32] > movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32] > cmp %edi, %esi <== can get false negatives > je .Lspec_ctrl_done > mov $MSR_IA32_SPEC_CTRL, %ecx > xor %edx, %edx <== can clobber guest value > mov %edi, %eax > wrmsr > > The bug is _currently_ benign because neither KVM nor the kernel support setting > any of bits 63:32, but it's still a bug that needs to be fixed. Oh, I see it. Let me try to fix it in a new patch. Thanks, Uros.
On Wed, Aug 20, 2025 at 8:10 AM Uros Bizjak <ubizjak@gmail.com> wrote: > > On Tue, Aug 19, 2025 at 8:56 PM Sean Christopherson <seanjc@google.com> wrote: > > > > On Tue, Aug 19, 2025, Uros Bizjak wrote: > > > > > 2d: 48 8b 7c 24 10 mov 0x10(%rsp),%rdi > > > > > 32: 8b 87 48 18 00 00 mov 0x1848(%rdi),%eax > > > > > 38: 65 3b 05 00 00 00 00 cmp %gs:0x0(%rip),%eax > > > > > 3f: 74 09 je 4a <...> > > > > > 41: b9 48 00 00 00 mov $0x48,%ecx > > > > > 46: 31 d2 xor %edx,%edx > > > > > 48: 0f 30 wrmsr > > > > > > > > > > No functional change intended. > > > > > > > > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > > > > > Cc: Sean Christopherson <seanjc@google.com> > > > > > Cc: Paolo Bonzini <pbonzini@redhat.com> > > > > > Cc: Thomas Gleixner <tglx@linutronix.de> > > > > > Cc: Ingo Molnar <mingo@kernel.org> > > > > > Cc: Borislav Petkov <bp@alien8.de> > > > > > Cc: Dave Hansen <dave.hansen@linux.intel.com> > > > > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > > > > --- > > > > > arch/x86/kvm/vmx/vmenter.S | 6 ++---- > > > > > 1 file changed, 2 insertions(+), 4 deletions(-) > > > > > > > > > > diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S > > > > > index 0a6cf5bff2aa..c65de5de92ab 100644 > > > > > --- a/arch/x86/kvm/vmx/vmenter.S > > > > > +++ b/arch/x86/kvm/vmx/vmenter.S > > > > > @@ -118,13 +118,11 @@ SYM_FUNC_START(__vmx_vcpu_run) > > > > > * and vmentry. > > > > > */ > > > > > mov 2*WORD_SIZE(%_ASM_SP), %_ASM_DI > > > > > - movl VMX_spec_ctrl(%_ASM_DI), %edi > > > > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %esi > > > > > - cmp %edi, %esi > > > > > + movl VMX_spec_ctrl(%_ASM_DI), %eax > > > > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax > > > > > > > > Huh. There's a pre-existing bug lurking here, and in the SVM code. SPEC_CTRL > > > > is an MSR, i.e. a 64-bit value, but the assembly code assumes bits 63:32 are always > > > > zero. > > > > > > But MSBs are zero, MSR is defined in arch/x86/include/msr-index.h as: > > > > > > #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ > > > > > > and "movl $..., %eax" zero-extends the value to full 64-bit width. > > > > > > FWIW, MSR_IA32_SPEC_CTR is handled in the same way in arch/x86/entry/entry.S: > > > > > > movl $MSR_IA32_PRED_CMD, %ecx > > > > That's the MSR index, not the value. I'm pointing out that: > > > > movl VMX_spec_ctrl(%_ASM_DI), %edi <== drops vmx->spec_ctrl[63:32] > > movl PER_CPU_VAR(x86_spec_ctrl_current), %esi <== drop x86_spec_ctrl_current[63:32] > > cmp %edi, %esi <== can get false negatives > > je .Lspec_ctrl_done > > mov $MSR_IA32_SPEC_CTRL, %ecx > > xor %edx, %edx <== can clobber guest value > > mov %edi, %eax > > wrmsr > > > > The bug is _currently_ benign because neither KVM nor the kernel support setting > > any of bits 63:32, but it's still a bug that needs to be fixed. > > Oh, I see it. Let me try to fix it in a new patch. VMX patch is at [1]. SVM patch is a bit more involved, because new 32-bit code needs to clobber one additional register. The SVM patch is attached to this message, but while I compile tested it, I have no means of testing it with runtime tests. Can you please put it through your torture tests? [1] https://lore.kernel.org/lkml/20250820100007.356761-1-ubizjak@gmail.com/ Uros. diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 235c4af6b692..a1b9f2ac713c 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -52,11 +52,23 @@ * there must not be any returns or indirect branches between this code * and vmentry. */ - movl SVM_spec_ctrl(%_ASM_DI), %eax - cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax +#ifdef CONFIG_X86_64 + mov SVM_spec_ctrl(%rdi), %rdx + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx + je 801b + movl %edx, %eax + shr $32, %rdx +#else + mov SVM_spec_ctrl(%edi), %eax + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx + xor %eax, %ecx + mov SVM_spec_ctrl + 4(%edi), %edx + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi + xor %edx, %esi + or %esi, %ecx je 801b +#endif mov $MSR_IA32_SPEC_CTRL, %ecx - xor %edx, %edx wrmsr jmp 801b .endm @@ -80,14 +92,31 @@ cmpb $0, \spec_ctrl_intercepted jnz 998f rdmsr - movl %eax, SVM_spec_ctrl(%_ASM_DI) +#ifdef CONFIG_X86_64 + shl $32, %rdx + or %rax, %rdx + mov %rdx, SVM_spec_ctrl(%rdi) 998: - /* Now restore the host value of the MSR if different from the guest's. */ - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax - cmp SVM_spec_ctrl(%_ASM_DI), %eax + mov SVM_spec_ctrl(%rdi), %rdx + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx je 901b - xor %edx, %edx + movl %edx, %eax + shr $32, %rdx +#else + mov %eax, SVM_spec_ctrl(%edi) + mov %edx, SVM_spec_ctrl + 4(%edi) +998: + /* Now restore the host value of the MSR if different from the guest's. */ + mov SVM_spec_ctrl(%edi), %eax + mov PER_CPU_VAR(x86_spec_ctrl_current), %esi + xor %eax, %esi + mov SVM_spec_ctrl + 4(%edi), %edx + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi + xor %edx, %edi + or %edi, %esi + je 901b +#endif wrmsr jmp 901b .endm
On Wed, Aug 20, 2025, Uros Bizjak wrote: > On Wed, Aug 20, 2025 at 8:10 AM Uros Bizjak <ubizjak@gmail.com> wrote: ... > VMX patch is at [1]. SVM patch is a bit more involved, because new > 32-bit code needs to clobber one additional register. The SVM patch is > attached to this message, but while I compile tested it, I have no > means of testing it with runtime tests. Can you please put it through > your torture tests? > > [1] https://lore.kernel.org/lkml/20250820100007.356761-1-ubizjak@gmail.com/ Finally got around to testing this (such a pain to test this code). I hacked a host to allow any value for MSR_IA32_SPEC_CTRL, and then ran in a VM to verify KVM could actually save/restore 64-bit values (and that KVM elides the WRMSRs when possible). The VMX patch looks good (I'll get it applied shortly). The SVM version has a bug, but I've got it working and will post a patch shortly. > > Uros. > diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S > index 235c4af6b692..a1b9f2ac713c 100644 > --- a/arch/x86/kvm/svm/vmenter.S > +++ b/arch/x86/kvm/svm/vmenter.S > @@ -52,11 +52,23 @@ > * there must not be any returns or indirect branches between this code > * and vmentry. > */ > - movl SVM_spec_ctrl(%_ASM_DI), %eax > - cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax > +#ifdef CONFIG_X86_64 > + mov SVM_spec_ctrl(%rdi), %rdx > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > + je 801b > + movl %edx, %eax > + shr $32, %rdx > +#else > + mov SVM_spec_ctrl(%edi), %eax > + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx > + xor %eax, %ecx > + mov SVM_spec_ctrl + 4(%edi), %edx > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi > + xor %edx, %esi > + or %esi, %ecx > je 801b > +#endif > mov $MSR_IA32_SPEC_CTRL, %ecx > - xor %edx, %edx > wrmsr > jmp 801b > .endm > @@ -80,14 +92,31 @@ > cmpb $0, \spec_ctrl_intercepted > jnz 998f > rdmsr > - movl %eax, SVM_spec_ctrl(%_ASM_DI) > +#ifdef CONFIG_X86_64 > + shl $32, %rdx > + or %rax, %rdx > + mov %rdx, SVM_spec_ctrl(%rdi) > 998: To avoid defining the 998 label separately for 64-bit vs. 32-bit, I think it's worth making two 32-bit writes to svm->spec_ctrl even on 64-bit kernels, e.g. cmpb $0, \spec_ctrl_intercepted jnz 998f rdmsr movl %eax, SVM_spec_ctrl(%_ASM_DI) movl %edx, SVM_spec_ctrl + 4(%_ASM_DI) 998: /* Now restore the host value of the MSR if different from the guest's. */ > - > /* Now restore the host value of the MSR if different from the guest's. */ > - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax > - cmp SVM_spec_ctrl(%_ASM_DI), %eax > + mov SVM_spec_ctrl(%rdi), %rdx > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > je 901b > - xor %edx, %edx > + movl %edx, %eax > + shr $32, %rdx > +#else > + mov %eax, SVM_spec_ctrl(%edi) > + mov %edx, SVM_spec_ctrl + 4(%edi) > +998: > + /* Now restore the host value of the MSR if different from the guest's. */ > + mov SVM_spec_ctrl(%edi), %eax > + mov PER_CPU_VAR(x86_spec_ctrl_current), %esi > + xor %eax, %esi > + mov SVM_spec_ctrl + 4(%edi), %edx > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi > + xor %edx, %edi > + or %edi, %esi > + je 901b This particular flow is backwards, in that it loads the guest value into EDX:EAX instead of the host values. > +#endif > wrmsr > jmp 901b > .endm
On Thu, Nov 6, 2025 at 2:12 AM Sean Christopherson <seanjc@google.com> wrote: > > VMX patch is at [1]. SVM patch is a bit more involved, because new > > 32-bit code needs to clobber one additional register. The SVM patch is > > attached to this message, but while I compile tested it, I have no > > means of testing it with runtime tests. Can you please put it through > > your torture tests? [...] > > - > > /* Now restore the host value of the MSR if different from the guest's. */ > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax > > - cmp SVM_spec_ctrl(%_ASM_DI), %eax > > + mov SVM_spec_ctrl(%rdi), %rdx > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > > je 901b > > - xor %edx, %edx > > + movl %edx, %eax > > + shr $32, %rdx > > +#else > > + mov %eax, SVM_spec_ctrl(%edi) > > + mov %edx, SVM_spec_ctrl + 4(%edi) > > +998: > > + /* Now restore the host value of the MSR if different from the guest's. */ > > + mov SVM_spec_ctrl(%edi), %eax > > + mov PER_CPU_VAR(x86_spec_ctrl_current), %esi > > + xor %eax, %esi > > + mov SVM_spec_ctrl + 4(%edi), %edx > > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edi > > + xor %edx, %edi > > + or %edi, %esi > > + je 901b > > This particular flow is backwards, in that it loads the guest value into EDX:EAX > instead of the host values. Yeah, sorry about that, I was really not sure which value is where. Please just swap SVM_spec_ctrl(%edi) with PER_CPU_VAR(x86_spec_ctrl_current) references (and their offseted variants) in the above code, and it will result in the correct and optimal code. Thanks, Uros.
© 2016 - 2026 Red Hat, Inc.