arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-)
From: Uros Bizjak <ubizjak@gmail.com>
SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code
assumes bits 63:32 are always zero. The bug is _currently_ benign because
neither KVM nor the kernel support setting any of bits 63:32, but it's
still a bug that needs to be fixed.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Suggested-by: Sean Christopherson <seanjc@google.com>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++++++++++++++-------
1 file changed, 36 insertions(+), 8 deletions(-)
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 235c4af6b692..53f45f5b611f 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -52,11 +52,23 @@
* there must not be any returns or indirect branches between this code
* and vmentry.
*/
- movl SVM_spec_ctrl(%_ASM_DI), %eax
- cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
+#ifdef CONFIG_X86_64
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
je 801b
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov SVM_spec_ctrl(%edi), %eax
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx
+ xor %eax, %ecx
+ mov SVM_spec_ctrl + 4(%edi), %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi
+ xor %edx, %esi
+ or %esi, %ecx
+ je 801b
+#endif
mov $MSR_IA32_SPEC_CTRL, %ecx
- xor %edx, %edx
wrmsr
jmp 801b
.endm
@@ -81,13 +93,26 @@
jnz 998f
rdmsr
movl %eax, SVM_spec_ctrl(%_ASM_DI)
+ movl %edx, SVM_spec_ctrl + 4(%_ASM_DI)
998:
-
/* Now restore the host value of the MSR if different from the guest's. */
- movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
- cmp SVM_spec_ctrl(%_ASM_DI), %eax
+#ifdef CONFIG_X86_64
+ mov SVM_spec_ctrl(%rdi), %rdx
+ cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx
je 901b
- xor %edx, %edx
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx
+ movl %edx, %eax
+ shr $32, %rdx
+#else
+ mov SVM_spec_ctrl(%edi), %esi
+ mov PER_CPU_VAR(x86_spec_ctrl_current), %eax
+ xor %eax, %esi
+ mov SVM_spec_ctrl + 4(%edi), %edi
+ mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx
+ xor %edx, %edi
+ or %edi, %esi
+ je 901b
+#endif
wrmsr
jmp 901b
.endm
@@ -211,7 +236,10 @@ SYM_FUNC_START(__svm_vcpu_run)
/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
- /* Clobbers RAX, RCX, RDX. */
+ /*
+ * Clobbers RAX, RCX, RDX (and EDI on 32-bit), consumes RDI (@svm) and
+ * RSP (pointer to @spec_ctrl_intercepted).
+ */
RESTORE_HOST_SPEC_CTRL
/*
base-commit: a996dd2a5e1ec54dcf7d7b93915ea3f97e14e68a
--
2.51.2.1041.gc1ab5b90ca-goog
On Thu, Nov 6, 2025 at 2:13 AM Sean Christopherson <seanjc@google.com> wrote: > > From: Uros Bizjak <ubizjak@gmail.com> > > SPEC_CTRL is an MSR, i.e. a 64-bit value, but the VMRUN assembly code > assumes bits 63:32 are always zero. The bug is _currently_ benign because > neither KVM nor the kernel support setting any of bits 63:32, but it's > still a bug that needs to be fixed. > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > Suggested-by: Sean Christopherson <seanjc@google.com> > Co-developed-by: Sean Christopherson <seanjc@google.com> > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/svm/vmenter.S | 44 +++++++++++++++++++++++++++++++------- > 1 file changed, 36 insertions(+), 8 deletions(-) > > diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S > index 235c4af6b692..53f45f5b611f 100644 > --- a/arch/x86/kvm/svm/vmenter.S > +++ b/arch/x86/kvm/svm/vmenter.S > @@ -52,11 +52,23 @@ > * there must not be any returns or indirect branches between this code > * and vmentry. > */ > - movl SVM_spec_ctrl(%_ASM_DI), %eax > - cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax > +#ifdef CONFIG_X86_64 > + mov SVM_spec_ctrl(%rdi), %rdx > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > je 801b > + movl %edx, %eax > + shr $32, %rdx > +#else > + mov SVM_spec_ctrl(%edi), %eax > + mov PER_CPU_VAR(x86_spec_ctrl_current), %ecx > + xor %eax, %ecx > + mov SVM_spec_ctrl + 4(%edi), %edx > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %esi > + xor %edx, %esi > + or %esi, %ecx > + je 801b > +#endif > mov $MSR_IA32_SPEC_CTRL, %ecx > - xor %edx, %edx > wrmsr > jmp 801b > .endm > @@ -81,13 +93,26 @@ > jnz 998f > rdmsr > movl %eax, SVM_spec_ctrl(%_ASM_DI) > + movl %edx, SVM_spec_ctrl + 4(%_ASM_DI) > 998: > - > /* Now restore the host value of the MSR if different from the guest's. */ > - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax > - cmp SVM_spec_ctrl(%_ASM_DI), %eax > +#ifdef CONFIG_X86_64 > + mov SVM_spec_ctrl(%rdi), %rdx > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > je 901b > - xor %edx, %edx > + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx > + movl %edx, %eax > + shr $32, %rdx The above code can be written as: mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx cmp SVM_spec_ctrl(%rdi), %rdx je 901b movl %edx, %eax shr $32, %rdx The improved code will save a memory read from x86_spec_ctrl_current. > +#else > + mov SVM_spec_ctrl(%edi), %esi > + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax Can the above two instructions be swapped, just to be consistent with x86_64 code? > + xor %eax, %esi > + mov SVM_spec_ctrl + 4(%edi), %edi > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx ... and the above two insns. Uros.
On Thu, Nov 06, 2025, Uros Bizjak wrote: > On Thu, Nov 6, 2025 at 2:13 AM Sean Christopherson <seanjc@google.com> wrote: > > 998: > > - > > /* Now restore the host value of the MSR if different from the guest's. */ > > - movl PER_CPU_VAR(x86_spec_ctrl_current), %eax > > - cmp SVM_spec_ctrl(%_ASM_DI), %eax > > +#ifdef CONFIG_X86_64 > > + mov SVM_spec_ctrl(%rdi), %rdx > > + cmp PER_CPU_VAR(x86_spec_ctrl_current), %rdx > > je 901b > > - xor %edx, %edx > > + mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx > > + movl %edx, %eax > > + shr $32, %rdx > > The above code can be written as: > > mov PER_CPU_VAR(x86_spec_ctrl_current), %rdx > cmp SVM_spec_ctrl(%rdi), %rdx Gah, that's obvious in hindsight. > je 901b > movl %edx, %eax > shr $32, %rdx > > The improved code will save a memory read from x86_spec_ctrl_current. > > > +#else > > + mov SVM_spec_ctrl(%edi), %esi > > + mov PER_CPU_VAR(x86_spec_ctrl_current), %eax > > Can the above two instructions be swapped, just to be consistent with > x86_64 code? > > > + xor %eax, %esi > > > + mov SVM_spec_ctrl + 4(%edi), %edi > > + mov PER_CPU_VAR(x86_spec_ctrl_current + 4), %edx > > ... and the above two insns. Ya, will do. Thanks!
© 2016 - 2025 Red Hat, Inc.