arch/x86/kernel/head_64.S | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-)
From: "Borislav Petkov (AMD)" <bp@alien8.de>
There's no need to do it on every AP.
The C-bit value read on the BSP and also verified there, is used
everywhere from now on.
There should be no functional changes resulting from this patch - just
a bit faster booting APs.
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
---
arch/x86/kernel/head_64.S | 31 ++++++++++++++++++++++---------
1 file changed, 22 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 3dcabbc49149..af40d8eb4dca 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -114,6 +114,28 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Form the CR3 value being sure to include the CR3 modifier */
addq $(early_top_pgt - __START_KERNEL_map), %rax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ mov %rax, %rdi
+ mov %rax, %r14
+
+ addq phys_base(%rip), %rdi
+
+ /*
+ * For SEV guests: Verify that the C-bit is correct. A malicious
+ * hypervisor could lie about the C-bit position to perform a ROP
+ * attack on the guest by writing to the unencrypted stack and wait for
+ * the next RET instruction.
+ */
+ call sev_verify_cbit
+
+ /*
+ * Restore CR3 value without the phys_base which will be added
+ * below, before writing %cr3.
+ */
+ mov %r14, %rax
+#endif
+
jmp 1f
SYM_CODE_END(startup_64)
@@ -192,15 +214,6 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
/* Setup early boot stage 4-/5-level pagetables. */
addq phys_base(%rip), %rax
- /*
- * For SEV guests: Verify that the C-bit is correct. A malicious
- * hypervisor could lie about the C-bit position to perform a ROP
- * attack on the guest by writing to the unencrypted stack and wait for
- * the next RET instruction.
- */
- movq %rax, %rdi
- call sev_verify_cbit
-
/*
* Switch to new page-table
*
--
2.42.0.rc0.25.ga82fb66fed25
On 11/30/23 07:26, Borislav Petkov wrote: > From: "Borislav Petkov (AMD)" <bp@alien8.de> > > There's no need to do it on every AP. > > The C-bit value read on the BSP and also verified there, is used > everywhere from now on. > > There should be no functional changes resulting from this patch - just > a bit faster booting APs. > > Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de> One minor question below, but otherwise Acked-by: Tom Lendacky <thomas.lendacky@amd.com> > --- > arch/x86/kernel/head_64.S | 31 ++++++++++++++++++++++--------- > 1 file changed, 22 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S > index 3dcabbc49149..af40d8eb4dca 100644 > --- a/arch/x86/kernel/head_64.S > +++ b/arch/x86/kernel/head_64.S > @@ -114,6 +114,28 @@ SYM_CODE_START_NOALIGN(startup_64) > > /* Form the CR3 value being sure to include the CR3 modifier */ > addq $(early_top_pgt - __START_KERNEL_map), %rax > + > +#ifdef CONFIG_AMD_MEM_ENCRYPT > + mov %rax, %rdi > + mov %rax, %r14 > + > + addq phys_base(%rip), %rdi > + > + /* > + * For SEV guests: Verify that the C-bit is correct. A malicious > + * hypervisor could lie about the C-bit position to perform a ROP > + * attack on the guest by writing to the unencrypted stack and wait for > + * the next RET instruction. > + */ > + call sev_verify_cbit > + > + /* > + * Restore CR3 value without the phys_base which will be added > + * below, before writing %cr3. > + */ > + mov %r14, %rax You're ignoring RAX now on return, so you can probably just make sev_verify_cbit() a void function now. You would still need to save RAX because of the calling convention, though, so it doesn't make this code any cleaner (other than the comment could then just say restore CR3 value). You're call, I'm good either way. Thanks, Tom > +#endif > + > jmp 1f > SYM_CODE_END(startup_64) > > @@ -192,15 +214,6 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) > /* Setup early boot stage 4-/5-level pagetables. */ > addq phys_base(%rip), %rax > > - /* > - * For SEV guests: Verify that the C-bit is correct. A malicious > - * hypervisor could lie about the C-bit position to perform a ROP > - * attack on the guest by writing to the unencrypted stack and wait for > - * the next RET instruction. > - */ > - movq %rax, %rdi > - call sev_verify_cbit > - > /* > * Switch to new page-table > *
On Mon, Dec 04, 2023 at 10:06:42AM -0600, Tom Lendacky wrote:
> You're ignoring RAX now on return, so you can probably just make
> sev_verify_cbit() a void function now. You would still need to save RAX
> because of the calling convention, though, so it doesn't make this code any
> cleaner (other than the comment could then just say restore CR3 value).
> You're call, I'm good either way.
I'm thinking I should leave that change for the patch which converts
sev_verify_cbit() to C...
Thx for looking.
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
The following commit has been merged into the x86/sev branch of tip:
Commit-ID: 30579c8baa5b4bd986420a984dad2940f1ff65d3
Gitweb: https://git.kernel.org/tip/30579c8baa5b4bd986420a984dad2940f1ff65d3
Author: Borislav Petkov (AMD) <bp@alien8.de>
AuthorDate: Thu, 30 Nov 2023 14:26:01 +01:00
Committer: Borislav Petkov (AMD) <bp@alien8.de>
CommitterDate: Wed, 13 Dec 2023 21:07:56 +01:00
x86/sev: Do the C-bit verification only on the BSP
There's no need to do it on every AP.
The C-bit value read on the BSP and also verified there, is used
everywhere from now on.
No functional changes - just a bit faster booting APs.
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
Link: https://lore.kernel.org/r/20231130132601.10317-1-bp@alien8.de
---
arch/x86/kernel/head_64.S | 31 ++++++++++++++++++++++---------
1 file changed, 22 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 086a2c3..d1dc39a 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -114,6 +114,28 @@ SYM_CODE_START_NOALIGN(startup_64)
/* Form the CR3 value being sure to include the CR3 modifier */
addq $(early_top_pgt - __START_KERNEL_map), %rax
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ mov %rax, %rdi
+ mov %rax, %r14
+
+ addq phys_base(%rip), %rdi
+
+ /*
+ * For SEV guests: Verify that the C-bit is correct. A malicious
+ * hypervisor could lie about the C-bit position to perform a ROP
+ * attack on the guest by writing to the unencrypted stack and wait for
+ * the next RET instruction.
+ */
+ call sev_verify_cbit
+
+ /*
+ * Restore CR3 value without the phys_base which will be added
+ * below, before writing %cr3.
+ */
+ mov %r14, %rax
+#endif
+
jmp 1f
SYM_CODE_END(startup_64)
@@ -193,15 +215,6 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
addq phys_base(%rip), %rax
/*
- * For SEV guests: Verify that the C-bit is correct. A malicious
- * hypervisor could lie about the C-bit position to perform a ROP
- * attack on the guest by writing to the unencrypted stack and wait for
- * the next RET instruction.
- */
- movq %rax, %rdi
- call sev_verify_cbit
-
- /*
* Switch to new page-table
*
* For the boot CPU this switches to early_top_pgt which still has the
© 2016 - 2025 Red Hat, Inc.