From: "H. Peter Anvin (Intel)" <hpa@zytor.com>
The LKGS instruction atomically loads a segment descriptor into the
%gs descriptor registers, *except* that %gs.base is unchanged, and the
base is instead loaded into MSR_IA32_KERNEL_GS_BASE, which is exactly
what we want this function to do.
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Xin Li <xin3.li@intel.com>
---
arch/x86/include/asm/gsseg.h | 28 +++++++++++++++++++++++++++-
1 file changed, 27 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/gsseg.h b/arch/x86/include/asm/gsseg.h
index 5e3b56a17098..b8a6a98d88b8 100644
--- a/arch/x86/include/asm/gsseg.h
+++ b/arch/x86/include/asm/gsseg.h
@@ -3,15 +3,41 @@
#define _ASM_X86_GSSEG_H
#include <linux/types.h>
+
+#include <asm/asm.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
#include <asm/processor.h>
+#include <asm/nops.h>
#ifdef CONFIG_X86_64
extern asmlinkage void asm_load_gs_index(u16 selector);
+#define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7)
+
static inline void native_load_gs_index(unsigned int selector)
{
- asm_load_gs_index(selector);
+ u16 sel = selector;
+
+ /*
+ * Note: the fixup is used for the LKGS instruction, but
+ * it needs to be attached to the primary instruction sequence
+ * as it isn't something that gets patched.
+ *
+ * %rax is provided to the assembly routine as a scratch
+ * register.
+ */
+ alternative_io("1: call asm_load_gs_index\n"
+ ".pushsection \".fixup\",\"ax\"\n"
+ "2: xorl %k[sel], %k[sel]\n"
+ " jmp 1b\n"
+ ".popsection\n"
+ _ASM_EXTABLE(1b, 2b),
+ _ASM_BYTES(0x3e) LKGS_DI,
+ X86_FEATURE_LKGS,
+ ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
+ ASM_NO_INPUT_CLOBBER(_ASM_AX));
}
#endif /* CONFIG_X86_64 */
--
2.34.1
On Thu, Oct 6, 2022 at 12:19 PM Xin Li <xin3.li@intel.com> wrote:
>
> From: "H. Peter Anvin (Intel)" <hpa@zytor.com>
>
> The LKGS instruction atomically loads a segment descriptor into the
> %gs descriptor registers, *except* that %gs.base is unchanged, and the
> base is instead loaded into MSR_IA32_KERNEL_GS_BASE, which is exactly
> what we want this function to do.
>
> Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
> Signed-off-by: Xin Li <xin3.li@intel.com>
> ---
> arch/x86/include/asm/gsseg.h | 28 +++++++++++++++++++++++++++-
> 1 file changed, 27 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/include/asm/gsseg.h b/arch/x86/include/asm/gsseg.h
> index 5e3b56a17098..b8a6a98d88b8 100644
> --- a/arch/x86/include/asm/gsseg.h
> +++ b/arch/x86/include/asm/gsseg.h
> @@ -3,15 +3,41 @@
> #define _ASM_X86_GSSEG_H
>
> #include <linux/types.h>
> +
> +#include <asm/asm.h>
> +#include <asm/cpufeature.h>
> +#include <asm/alternative.h>
> #include <asm/processor.h>
> +#include <asm/nops.h>
>
> #ifdef CONFIG_X86_64
>
> extern asmlinkage void asm_load_gs_index(u16 selector);
>
> +#define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7)
> +
> static inline void native_load_gs_index(unsigned int selector)
> {
> - asm_load_gs_index(selector);
> + u16 sel = selector;
> +
> + /*
> + * Note: the fixup is used for the LKGS instruction, but
> + * it needs to be attached to the primary instruction sequence
> + * as it isn't something that gets patched.
> + *
> + * %rax is provided to the assembly routine as a scratch
> + * register.
> + */
> + alternative_io("1: call asm_load_gs_index\n"
> + ".pushsection \".fixup\",\"ax\"\n"
> + "2: xorl %k[sel], %k[sel]\n"
> + " jmp 1b\n"
> + ".popsection\n"
> + _ASM_EXTABLE(1b, 2b),
> + _ASM_BYTES(0x3e) LKGS_DI,
> + X86_FEATURE_LKGS,
> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> }
>
> #endif /* CONFIG_X86_64 */
> --
> 2.34.1
There are not that many call sites, so using something like this
(incorporating Peter Z's suggestion for the exception handler) would
be better from a code readability perspective vs. a tiny increase in
code size.
if (static_cpu_has(X86_FEATURE_LKGS))
asm volatile("1: " LKGS_DI
_ASM_EXTABLE_TYPE_REG(1b, 1b,
EX_TYPE_ZERO_REG, %k[sel])
: [sel] "+D" (sel) : : "memory");
else
asm_load_gs_index(sel);
--
Brian Gerst
> > + alternative_io("1: call asm_load_gs_index\n"
> > + ".pushsection \".fixup\",\"ax\"\n"
> > + "2: xorl %k[sel], %k[sel]\n"
> > + " jmp 1b\n"
> > + ".popsection\n"
> > + _ASM_EXTABLE(1b, 2b),
> > + _ASM_BYTES(0x3e) LKGS_DI,
> > + X86_FEATURE_LKGS,
> > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> > }
> >
> > #endif /* CONFIG_X86_64 */
> > --
> > 2.34.1
>
> There are not that many call sites, so using something like this (incorporating
> Peter Z's suggestion for the exception handler) would be better from a code
> readability perspective vs. a tiny increase in code size.
The existing approach patches the binary code thus we don't need to check it at runtime.
>
> if (static_cpu_has(X86_FEATURE_LKGS))
> asm volatile("1: " LKGS_DI
> _ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG,
> %k[sel])
> : [sel] "+D" (sel) : : "memory");
> else
> asm_load_gs_index(sel);
On Sat, Oct 8, 2022 at 1:40 AM Li, Xin3 <xin3.li@intel.com> wrote:
>
> > > + alternative_io("1: call asm_load_gs_index\n"
> > > + ".pushsection \".fixup\",\"ax\"\n"
> > > + "2: xorl %k[sel], %k[sel]\n"
> > > + " jmp 1b\n"
> > > + ".popsection\n"
> > > + _ASM_EXTABLE(1b, 2b),
> > > + _ASM_BYTES(0x3e) LKGS_DI,
> > > + X86_FEATURE_LKGS,
> > > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> > > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> > > }
> > >
> > > #endif /* CONFIG_X86_64 */
> > > --
> > > 2.34.1
> >
> > There are not that many call sites, so using something like this (incorporating
> > Peter Z's suggestion for the exception handler) would be better from a code
> > readability perspective vs. a tiny increase in code size.
>
> The existing approach patches the binary code thus we don't need to check it at runtime.
static_cpu_has() uses alternatives to patch the branch, so there is no
runtime check after early boot.
--
Brian Gerst
> > > There are not that many call sites, so using something like this > > > (incorporating Peter Z's suggestion for the exception handler) would > > > be better from a code readability perspective vs. a tiny increase in code size. > > > > The existing approach patches the binary code thus we don't need to check it > at runtime. > > static_cpu_has() uses alternatives to patch the branch, so there is no runtime > check after early boot. > Sorry, didn't know it, thanks for point it out. If we prefer static_cpu_has, are you asking to replace all alternative macros with it? Xin
On Mon, Oct 10, 2022 at 04:32:34AM +0000, Li, Xin3 wrote: > > > > There are not that many call sites, so using something like this > > > > (incorporating Peter Z's suggestion for the exception handler) would > > > > be better from a code readability perspective vs. a tiny increase in code size. > > > > > > The existing approach patches the binary code thus we don't need to check it > > at runtime. > > > > static_cpu_has() uses alternatives to patch the branch, so there is no runtime > > check after early boot. > > > > Sorry, didn't know it, thanks for point it out. > > If we prefer static_cpu_has, are you asking to replace all alternative macros with it? No; the only reason to do it here would be to unconfuse that exception thing. But even there I'm not convinced. Yes, Brian's code is much easier to read, but code-gen is quite terrible (also, my binutils can't seem to decode this -- GNU objdump (GNU Binutils for Debian) 2.38.90.20220713)
On October 9, 2022 9:32:34 PM PDT, "Li, Xin3" <xin3.li@intel.com> wrote: >> > > There are not that many call sites, so using something like this >> > > (incorporating Peter Z's suggestion for the exception handler) would >> > > be better from a code readability perspective vs. a tiny increase in code size. >> > >> > The existing approach patches the binary code thus we don't need to check it >> at runtime. >> >> static_cpu_has() uses alternatives to patch the branch, so there is no runtime >> check after early boot. >> > >Sorry, didn't know it, thanks for point it out. > >If we prefer static_cpu_has, are you asking to replace all alternative macros with it? > >Xin > > Honestly, it seems to me to be more than a bit excessive. The code might be nontrivial, but *with proper commenting* it should be perfectly understandable...
On Thu, Oct 06, 2022 at 08:40:41AM -0700, Xin Li wrote:
> static inline void native_load_gs_index(unsigned int selector)
> {
> + u16 sel = selector;
> +
> + /*
> + * Note: the fixup is used for the LKGS instruction, but
> + * it needs to be attached to the primary instruction sequence
> + * as it isn't something that gets patched.
> + *
> + * %rax is provided to the assembly routine as a scratch
> + * register.
> + */
> + alternative_io("1: call asm_load_gs_index\n"
> + ".pushsection \".fixup\",\"ax\"\n"
> + "2: xorl %k[sel], %k[sel]\n"
> + " jmp 1b\n"
> + ".popsection\n"
> + _ASM_EXTABLE(1b, 2b),
> + _ASM_BYTES(0x3e) LKGS_DI,
> + X86_FEATURE_LKGS,
> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> }
Something like so perhaps?
asm_inline volatile("1:\n"
ALTERNATIVE("call asm_load_gs_index\n",
LKGS_DI,
X86_FEATURE_LKGS)
_ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k[sel])
: ASM_CALL_CONSTRAINT
: [sel] "D" (sel)
: "memory", _ASM_AX);
(completely untested, not even seen a compiler upclose)
> Something like so perhaps?
>
> asm_inline volatile("1:\n"
> ALTERNATIVE("call asm_load_gs_index\n",
> LKGS_DI,
> X86_FEATURE_LKGS)
> _ASM_EXTABLE_TYPE_REG(1b, 1b,
> EX_TYPE_ZERO_REG, %k[sel])
> : ASM_CALL_CONSTRAINT
> : [sel] "D" (sel)
> : "memory", _ASM_AX);
>
>
> (completely untested, not even seen a compiler upclose)
It compiles (after adding "0x3e" to make it 5 bytes to match the size of "call asm_load_gs_index") and passes my fixup test, thanks!
On Thu, Oct 06, 2022 at 08:40:41AM -0700, Xin Li wrote: > +#define LKGS_DI _ASM_BYTES(0xf2,0x0f,0x00,0xf7) This should come with a comment that states the binutils version that understands: "lkgs %di" (or however that ends being).
On Thu, Oct 06, 2022 at 08:40:41AM -0700, Xin Li wrote:
> static inline void native_load_gs_index(unsigned int selector)
> {
> + u16 sel = selector;
> +
> + /*
> + * Note: the fixup is used for the LKGS instruction, but
> + * it needs to be attached to the primary instruction sequence
> + * as it isn't something that gets patched.
> + *
> + * %rax is provided to the assembly routine as a scratch
> + * register.
> + */
> + alternative_io("1: call asm_load_gs_index\n"
> + ".pushsection \".fixup\",\"ax\"\n"
> + "2: xorl %k[sel], %k[sel]\n"
> + " jmp 1b\n"
> + ".popsection\n"
> + _ASM_EXTABLE(1b, 2b),
> + _ASM_BYTES(0x3e) LKGS_DI,
> + X86_FEATURE_LKGS,
> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> }
I'm very sure none of this was tested... the .fixup section hasn't
existed for almost a year now.
e5eefda5aa51 ("x86: Remove .fixup section")
> > + alternative_io("1: call asm_load_gs_index\n"
> > + ".pushsection \".fixup\",\"ax\"\n"
> > + "2: xorl %k[sel], %k[sel]\n"
> > + " jmp 1b\n"
> > + ".popsection\n"
> > + _ASM_EXTABLE(1b, 2b),
> > + _ASM_BYTES(0x3e) LKGS_DI,
> > + X86_FEATURE_LKGS,
> > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> > }
>
> I'm very sure none of this was tested... the .fixup section hasn't existed for
> almost a year now.
Weird, did you ever check a kernel dump?
>
> e5eefda5aa51 ("x86: Remove .fixup section")
On Fri, Oct 07, 2022 at 06:01:06PM +0000, Li, Xin3 wrote:
> > > + alternative_io("1: call asm_load_gs_index\n"
> > > + ".pushsection \".fixup\",\"ax\"\n"
> > > + "2: xorl %k[sel], %k[sel]\n"
> > > + " jmp 1b\n"
> > > + ".popsection\n"
> > > + _ASM_EXTABLE(1b, 2b),
> > > + _ASM_BYTES(0x3e) LKGS_DI,
> > > + X86_FEATURE_LKGS,
> > > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> > > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> > > }
> >
> > I'm very sure none of this was tested... the .fixup section hasn't existed for
> > almost a year now.
>
> Weird, did you ever check a kernel dump?
$ readelf -WS defconfig-build/vmlinux | grep fixup
[ 5] .pci_fixup PROGBITS ffffffff826a5350 18a5350 003570 00 A 0 0 16
[ 6] .rela.pci_fixup RELA 0000000000000000 360c388 005028 18 I 60 5 8
In fact, when I add one I get:
ld: warning: orphan section `.fixup' from `arch/x86/kernel/traps.o' being placed in section `.fixup'
On October 7, 2022 12:24:13 PM PDT, Peter Zijlstra <peterz@infradead.org> wrote:
>On Fri, Oct 07, 2022 at 06:01:06PM +0000, Li, Xin3 wrote:
>> > > + alternative_io("1: call asm_load_gs_index\n"
>> > > + ".pushsection \".fixup\",\"ax\"\n"
>> > > + "2: xorl %k[sel], %k[sel]\n"
>> > > + " jmp 1b\n"
>> > > + ".popsection\n"
>> > > + _ASM_EXTABLE(1b, 2b),
>> > > + _ASM_BYTES(0x3e) LKGS_DI,
>> > > + X86_FEATURE_LKGS,
>> > > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
>> > > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
>> > > }
>> >
>> > I'm very sure none of this was tested... the .fixup section hasn't existed for
>> > almost a year now.
>>
>> Weird, did you ever check a kernel dump?
>
>$ readelf -WS defconfig-build/vmlinux | grep fixup
>[ 5] .pci_fixup PROGBITS ffffffff826a5350 18a5350 003570 00 A 0 0 16
>[ 6] .rela.pci_fixup RELA 0000000000000000 360c388 005028 18 I 60 5 8
>
>In fact, when I add one I get:
>
>ld: warning: orphan section `.fixup' from `arch/x86/kernel/traps.o' being placed in section `.fixup'
Perhaps the two of you need to compare confugurations?
On Fri, Oct 07, 2022 at 01:03:12PM -0700, H. Peter Anvin wrote:
> On October 7, 2022 12:24:13 PM PDT, Peter Zijlstra <peterz@infradead.org> wrote:
> >On Fri, Oct 07, 2022 at 06:01:06PM +0000, Li, Xin3 wrote:
> >> > > + alternative_io("1: call asm_load_gs_index\n"
> >> > > + ".pushsection \".fixup\",\"ax\"\n"
> >> > > + "2: xorl %k[sel], %k[sel]\n"
> >> > > + " jmp 1b\n"
> >> > > + ".popsection\n"
> >> > > + _ASM_EXTABLE(1b, 2b),
> >> > > + _ASM_BYTES(0x3e) LKGS_DI,
> >> > > + X86_FEATURE_LKGS,
> >> > > + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> >> > > + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> >> > > }
> >> >
> >> > I'm very sure none of this was tested... the .fixup section hasn't existed for
> >> > almost a year now.
> >>
> >> Weird, did you ever check a kernel dump?
> >
> >$ readelf -WS defconfig-build/vmlinux | grep fixup
> >[ 5] .pci_fixup PROGBITS ffffffff826a5350 18a5350 003570 00 A 0 0 16
> >[ 6] .rela.pci_fixup RELA 0000000000000000 360c388 005028 18 I 60 5 8
> >
> >In fact, when I add one I get:
> >
> >ld: warning: orphan section `.fixup' from `arch/x86/kernel/traps.o' being placed in section `.fixup'
>
> Perhaps the two of you need to compare confugurations?
Whatever for? I know the robots report this warning because there was
one from the KVM cross-merge when the .fixup removal went in. It got
reported and fixed and that was the last of it.
Anyway; try:
$ git grep "\.fixup" arch/x86/
there isn't a single usage.
Andrew Cooper suggested upgrading the orphan section warning to a hard
link error, orphan sections are bad regardless.
On 10/7/22 13:23, Peter Zijlstra wrote: >> >> Perhaps the two of you need to compare confugurations? > > Whatever for? I know the robots report this warning because there was > one from the KVM cross-merge when the .fixup removal went in. It got > reported and fixed and that was the last of it. > To help track down what appears to be a problem? > Anyway; try: > > $ git grep "\.fixup" arch/x86/ > > there isn't a single usage. > > Andrew Cooper suggested upgrading the orphan section warning to a hard > link error, orphan sections are bad regardless. > Agreed 1000%. This is a no-brainer. From IRC: <andyhhp> -LDFLAGS_vmlinux += --orphan-handling=warn <andyhhp> +LDFLAGS_vmlinux += --orphan-handling=error -hpa
> > Andrew Cooper suggested upgrading the orphan section warning to a hard > > link error, orphan sections are bad regardless. > > > > Agreed 1000%. This is a no-brainer. From IRC: > > > <andyhhp> -LDFLAGS_vmlinux += --orphan-handling=warn > <andyhhp> +LDFLAGS_vmlinux += --orphan-handling=error There is an arch independent config CONFIG_LD_ORPHAN_WARN, which forces linker to warn on implicit named sections, or there is even no warning. CONFIG_LD_ORPHAN_WARN depends on ARCH_WANT_LD_ORPHAN_WARN, and some archs (arm/arm64/mips/x86/...) have it defined, and then ld generates warnings on orphan sections. Should we promote warning to error only on x86? > > -hpa
> > Andrew Cooper suggested upgrading the orphan section warning to a hard > > link error, orphan sections are bad regardless. > > > > Agreed 1000%. This is a no-brainer. From IRC: > > > <andyhhp> -LDFLAGS_vmlinux += --orphan-handling=warn <andyhhp> > +LDFLAGS_vmlinux += --orphan-handling=error Who is going to make the change?
On October 7, 2022 10:32:31 PM PDT, "Li, Xin3" <xin3.li@intel.com> wrote: >> > Andrew Cooper suggested upgrading the orphan section warning to a hard >> > link error, orphan sections are bad regardless. >> > >> >> Agreed 1000%. This is a no-brainer. From IRC: >> >> >> <andyhhp> -LDFLAGS_vmlinux += --orphan-handling=warn <andyhhp> >> +LDFLAGS_vmlinux += --orphan-handling=error > >Who is going to make the change? > Why don't you?
> On October 7, 2022 10:32:31 PM PDT, "Li, Xin3" <xin3.li@intel.com> wrote: > >> > Andrew Cooper suggested upgrading the orphan section warning to a > >> > hard link error, orphan sections are bad regardless. > >> > > >> > >> Agreed 1000%. This is a no-brainer. From IRC: > >> > >> > >> <andyhhp> -LDFLAGS_vmlinux += --orphan-handling=warn <andyhhp> > >> +LDFLAGS_vmlinux += --orphan-handling=error > > > >Who is going to make the change? > > > > Why don't you? Will do. Xin
On October 7, 2022 7:47:09 AM PDT, Peter Zijlstra <peterz@infradead.org> wrote:
>On Thu, Oct 06, 2022 at 08:40:41AM -0700, Xin Li wrote:
>
>> static inline void native_load_gs_index(unsigned int selector)
>> {
>> + u16 sel = selector;
>> +
>> + /*
>> + * Note: the fixup is used for the LKGS instruction, but
>> + * it needs to be attached to the primary instruction sequence
>> + * as it isn't something that gets patched.
>> + *
>> + * %rax is provided to the assembly routine as a scratch
>> + * register.
>> + */
>> + alternative_io("1: call asm_load_gs_index\n"
>> + ".pushsection \".fixup\",\"ax\"\n"
>> + "2: xorl %k[sel], %k[sel]\n"
>> + " jmp 1b\n"
>> + ".popsection\n"
>> + _ASM_EXTABLE(1b, 2b),
>> + _ASM_BYTES(0x3e) LKGS_DI,
>> + X86_FEATURE_LKGS,
>> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
>> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
>> }
>
>I'm very sure none of this was tested... the .fixup section hasn't
>existed for almost a year now.
>
> e5eefda5aa51 ("x86: Remove .fixup section")
Xin, what did you use as the forward-porting baseline?
> >> + alternative_io("1: call asm_load_gs_index\n"
> >> + ".pushsection \".fixup\",\"ax\"\n"
> >> + "2: xorl %k[sel], %k[sel]\n"
> >> + " jmp 1b\n"
> >> + ".popsection\n"
> >> + _ASM_EXTABLE(1b, 2b),
> >> + _ASM_BYTES(0x3e) LKGS_DI,
> >> + X86_FEATURE_LKGS,
> >> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
> >> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
> >> }
> >
> >I'm very sure none of this was tested... the .fixup section hasn't
> >existed for almost a year now.
> >
> > e5eefda5aa51 ("x86: Remove .fixup section")
>
> Xin, what did you use as the forward-porting baseline?
6.0 release, and my kernel dump shows me a fixup section is there, and a fixup section is created anyway if we do "pushsection "\.fixup\"".
On October 7, 2022 11:07:34 AM PDT, "Li, Xin3" <xin3.li@intel.com> wrote:
>> >> + alternative_io("1: call asm_load_gs_index\n"
>> >> + ".pushsection \".fixup\",\"ax\"\n"
>> >> + "2: xorl %k[sel], %k[sel]\n"
>> >> + " jmp 1b\n"
>> >> + ".popsection\n"
>> >> + _ASM_EXTABLE(1b, 2b),
>> >> + _ASM_BYTES(0x3e) LKGS_DI,
>> >> + X86_FEATURE_LKGS,
>> >> + ASM_OUTPUT2([sel] "+D" (sel), ASM_CALL_CONSTRAINT),
>> >> + ASM_NO_INPUT_CLOBBER(_ASM_AX));
>> >> }
>> >
>> >I'm very sure none of this was tested... the .fixup section hasn't
>> >existed for almost a year now.
>> >
>> > e5eefda5aa51 ("x86: Remove .fixup section")
>>
>> Xin, what did you use as the forward-porting baseline?
>
>6.0 release, and my kernel dump shows me a fixup section is there, and a fixup section is created anyway if we do "pushsection "\.fixup\"".
>
>
>
Yeah. .fixup is really Just Another Text Section ™, so it is probably not surprising if it has crept back in.
© 2016 - 2025 Red Hat, Inc.