From: "Borislav Petkov (AMD)" <bp@alien8.de>
Fixup label numbering too as the new macros have new label numbers.
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
---
arch/x86/include/asm/alternative.h | 24 ++++--------------------
arch/x86/kernel/fpu/xstate.h | 4 ++--
2 files changed, 6 insertions(+), 22 deletions(-)
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 73ee18705ef1..0df99855e003 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -277,26 +277,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
N_ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
newinstr_yes, ft_flags)
-#define ALTERNATIVE_3(oldinsn, newinsn1, ft_flags1, newinsn2, ft_flags2, \
- newinsn3, ft_flags3) \
- OLDINSTR_3(oldinsn, 1, 2, 3) \
- ".pushsection .altinstructions,\"a\"\n" \
- ALTINSTR_ENTRY(ft_flags1, 1) \
- ALTINSTR_ENTRY(ft_flags2, 2) \
- ALTINSTR_ENTRY(ft_flags3, 3) \
- ".popsection\n" \
- ".pushsection .altinstr_replacement, \"ax\"\n" \
- ALTINSTR_REPLACEMENT(newinsn1, 1) \
- ALTINSTR_REPLACEMENT(newinsn2, 2) \
- ALTINSTR_REPLACEMENT(newinsn3, 3) \
- ".popsection\n"
-
-
-#define N_ALTERNATIVE_3(oldinst, newinst1, flag1, newinst2, flag2, \
- newinst3, flag3) \
- N_ALTERNATIVE(N_ALTERNATIVE_2(oldinst, newinst1, flag1, newinst2, flag2), \
- newinst3, flag3)
-
+#define ALTERNATIVE_3(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2, \
+ newinstr3, ft_flags3) \
+ N_ALTERNATIVE(N_ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2), \
+ newinstr3, ft_flags3)
/*
* Alternative instructions for different CPU types or capabilities.
*
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 05df04f39628..4fe8501efc6c 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -108,7 +108,7 @@ static inline u64 xfeatures_mask_independent(void)
*
* We use XSAVE as a fallback.
*
- * The 661 label is defined in the ALTERNATIVE* macros as the address of the
+ * The 771 label is defined in the ALTERNATIVE* macros as the address of the
* original instruction which gets replaced. We need to use it here as the
* address of the instruction where we might get an exception at.
*/
@@ -120,7 +120,7 @@ static inline u64 xfeatures_mask_independent(void)
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
- _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
+ _ASM_EXTABLE_TYPE_REG(771b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
--
2.43.0
On Fri, May 31, 2024 at 8:41 AM Borislav Petkov <bp@kernel.org> wrote:
>
> From: "Borislav Petkov (AMD)" <bp@alien8.de>
>
> Fixup label numbering too as the new macros have new label numbers.
>
> Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
> ---
> arch/x86/include/asm/alternative.h | 24 ++++--------------------
> arch/x86/kernel/fpu/xstate.h | 4 ++--
> 2 files changed, 6 insertions(+), 22 deletions(-)
>
> diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
> index 73ee18705ef1..0df99855e003 100644
> --- a/arch/x86/include/asm/alternative.h
> +++ b/arch/x86/include/asm/alternative.h
> @@ -277,26 +277,10 @@ static inline int alternatives_text_reserved(void *start, void *end)
> N_ALTERNATIVE_2(oldinstr, newinstr_no, X86_FEATURE_ALWAYS, \
> newinstr_yes, ft_flags)
>
> -#define ALTERNATIVE_3(oldinsn, newinsn1, ft_flags1, newinsn2, ft_flags2, \
> - newinsn3, ft_flags3) \
> - OLDINSTR_3(oldinsn, 1, 2, 3) \
> - ".pushsection .altinstructions,\"a\"\n" \
> - ALTINSTR_ENTRY(ft_flags1, 1) \
> - ALTINSTR_ENTRY(ft_flags2, 2) \
> - ALTINSTR_ENTRY(ft_flags3, 3) \
> - ".popsection\n" \
> - ".pushsection .altinstr_replacement, \"ax\"\n" \
> - ALTINSTR_REPLACEMENT(newinsn1, 1) \
> - ALTINSTR_REPLACEMENT(newinsn2, 2) \
> - ALTINSTR_REPLACEMENT(newinsn3, 3) \
> - ".popsection\n"
> -
> -
> -#define N_ALTERNATIVE_3(oldinst, newinst1, flag1, newinst2, flag2, \
> - newinst3, flag3) \
> - N_ALTERNATIVE(N_ALTERNATIVE_2(oldinst, newinst1, flag1, newinst2, flag2), \
> - newinst3, flag3)
> -
> +#define ALTERNATIVE_3(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2, \
> + newinstr3, ft_flags3) \
> + N_ALTERNATIVE(N_ALTERNATIVE_2(oldinstr, newinstr1, ft_flags1, newinstr2, ft_flags2), \
> + newinstr3, ft_flags3)
> /*
> * Alternative instructions for different CPU types or capabilities.
> *
> diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
> index 05df04f39628..4fe8501efc6c 100644
> --- a/arch/x86/kernel/fpu/xstate.h
> +++ b/arch/x86/kernel/fpu/xstate.h
> @@ -108,7 +108,7 @@ static inline u64 xfeatures_mask_independent(void)
> *
> * We use XSAVE as a fallback.
> *
> - * The 661 label is defined in the ALTERNATIVE* macros as the address of the
> + * The 771 label is defined in the ALTERNATIVE* macros as the address of the
> * original instruction which gets replaced. We need to use it here as the
> * address of the instruction where we might get an exception at.
> */
> @@ -120,7 +120,7 @@ static inline u64 xfeatures_mask_independent(void)
> "\n" \
> "xor %[err], %[err]\n" \
> "3:\n" \
> - _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
> + _ASM_EXTABLE_TYPE_REG(771b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
> : [err] "=r" (err) \
> : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
> : "memory")
> --
> 2.43.0
>
>
Just add a label at the start of this macro, so it doesn't depend on
the internal labels of ALTERNATIVE(). Something like:
asm volatile("1:" ALTERNATIVE_3(XSAVE, \
...
_ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
...
Brian Gerst
On Fri, May 31, 2024 at 01:00:55PM -0400, Brian Gerst wrote:
> Just add a label at the start of this macro, so it doesn't depend on
> the internal labels of ALTERNATIVE(). Something like:
> asm volatile("1:" ALTERNATIVE_3(XSAVE, \
> ...
> _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
> ...
Thanks for the cool idea - that use of the asm label outside of the
macro was really nasty but I didn't think of that. Cool.
And yap, it looks good:
------
#APP
# 188 "arch/x86/kernel/fpu/xstate.h" 1
1: # ALT: oldinstr
<--- the outer label 1:
771:
# ALT: oldinstr
771:
# ALT: oldinstr
771:
.byte 0x48, 0x0f,0xae,0x27
772:
# ALT: padding
.skip -(((775f-774f)-(772b-771b)) > 0) * ((775f-774f)-(772b-771b)),0x90
...
triple-alternative gunk
...
775:
.popsection
xor %edi, %edi # err
3:
.pushsection "__ex_table","a"
------
Yap, and boots in the guest.
I'll fold in the below:
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 70903c12a911..2ee0b9c53dcc 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -106,21 +106,17 @@ static inline u64 xfeatures_mask_independent(void)
* Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
* supports modified optimization which is not supported by XSAVE.
*
- * We use XSAVE as a fallback.
- *
- * The 771 label is defined in the ALTERNATIVE* macros as the address of the
- * original instruction which gets replaced. We need to use it here as the
- * address of the instruction where we might get an exception at.
+ * Use XSAVE as a fallback.
*/
#define XSTATE_XSAVE(st, lmask, hmask, err) \
- asm volatile(ALTERNATIVE_3(XSAVE, \
+ asm volatile("1: " ALTERNATIVE_3(XSAVE, \
XSAVEOPT, X86_FEATURE_XSAVEOPT, \
XSAVEC, X86_FEATURE_XSAVEC, \
XSAVES, X86_FEATURE_XSAVES) \
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
- _ASM_EXTABLE_TYPE_REG(771b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
+ _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@@ -130,11 +126,11 @@ static inline u64 xfeatures_mask_independent(void)
* XSAVE area format.
*/
#define XSTATE_XRESTORE(st, lmask, hmask) \
- asm volatile(ALTERNATIVE(XRSTOR, \
+ asm volatile("1: " ALTERNATIVE(XRSTOR, \
XRSTORS, X86_FEATURE_XSAVES) \
"\n" \
"3:\n" \
- _ASM_EXTABLE_TYPE(771b, 3b, EX_TYPE_FPU_RESTORE) \
+ _ASM_EXTABLE_TYPE(1b, 3b, EX_TYPE_FPU_RESTORE) \
: \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-netiquette
© 2016 - 2026 Red Hat, Inc.