In preparation for converting the alternative implementation to
assembler macros, remove all newlines from alternative instructions.
Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
---
arch/x86/include/asm/paravirt_types.h | 2 +-
arch/x86/include/asm/processor.h | 2 +-
arch/x86/include/asm/smap.h | 8 ++++----
arch/x86/include/asm/vmware.h | 2 +-
4 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 3502939415ad..a05fcc4f5172 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -249,7 +249,7 @@ extern struct paravirt_patch_template pv_ops;
* don't need to bother with CFI prefixes.
*/
#define PARAVIRT_CALL \
- ANNOTATE_RETPOLINE_SAFE "\n\t" \
+ ANNOTATE_RETPOLINE_SAFE "; " \
"call *%[paravirt_opptr];"
/*
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index a24c7805acdb..816dbddd7507 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -706,7 +706,7 @@ static inline u32 per_cpu_l2c_id(unsigned int cpu)
*/
static __always_inline void amd_clear_divider(void)
{
- asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ asm volatile(ALTERNATIVE("", "div %2", X86_BUG_DIV0)
:: "a" (0), "d" (0), "r" (1));
}
diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
index 977bef14a0ab..d9d565024e3b 100644
--- a/arch/x86/include/asm/smap.h
+++ b/arch/x86/include/asm/smap.h
@@ -77,7 +77,7 @@ static __always_inline unsigned long smap_save(void)
unsigned long flags;
asm volatile ("# smap_save\n\t"
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
"", "pushf; pop %0; clac",
X86_FEATURE_SMAP)
: "=rm" (flags) : : "memory", "cc");
@@ -88,7 +88,7 @@ static __always_inline unsigned long smap_save(void)
static __always_inline void smap_restore(unsigned long flags)
{
asm volatile ("# smap_restore\n\t"
- ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE "\n\t"
+ ALTERNATIVE(ANNOTATE_IGNORE_ALTERNATIVE
"", "push %0; popf",
X86_FEATURE_SMAP)
: : "g" (flags) : "memory", "cc");
@@ -101,9 +101,9 @@ static __always_inline void smap_restore(unsigned long flags)
ALTERNATIVE("", "stac", X86_FEATURE_SMAP)
#define ASM_CLAC_UNSAFE \
- ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "clac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "; clac", X86_FEATURE_SMAP)
#define ASM_STAC_UNSAFE \
- ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "\n\t" "stac", X86_FEATURE_SMAP)
+ ALTERNATIVE("", ANNOTATE_IGNORE_ALTERNATIVE "; stac", X86_FEATURE_SMAP)
#endif /* __ASSEMBLER__ */
diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index c9cf43d5ef23..100fba7555bf 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -90,7 +90,7 @@ extern unsigned long vmware_tdx_hypercall(unsigned long cmd,
* set. The high word of %edx may contain input data from the caller.
*/
#define VMWARE_HYPERCALL \
- ALTERNATIVE_2("movw %[port], %%dx\n\t" \
+ ALTERNATIVE_2("movw %[port], %%dx; " \
"inl (%%dx), %%eax", \
"vmcall", X86_FEATURE_VMCALL, \
"vmmcall", X86_FEATURE_VMW_VMMCALL)
--
2.52.0
On Sat, Dec 06, 2025 at 01:41:14PM -0800, Josh Poimboeuf wrote:
> In preparation for converting the alternative implementation to
> assembler macros, remove all newlines from alternative instructions.
>
> Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
x86-32 fixes:
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 334c6058a1ec..2313f3fe19d0 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -441,18 +441,18 @@ static inline void call_depth_return_thunk(void) {}
* -mindirect-branch-cs-prefix.
*/
#define __CS_PREFIX(reg) \
- ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15\n" \
- ".ifc \\rs," reg "\n" \
- ".byte 0x2e\n" \
- ".endif\n" \
- ".endr\n"
+ ".irp rs,r8,r9,r10,r11,r12,r13,r14,r15; " \
+ ".ifc \\rs," reg "; " \
+ ".byte 0x2e; " \
+ ".endif; " \
+ ".endr; "
/*
* Inline asm uses the %V modifier which is only in newer GCC
* which is ensured when CONFIG_MITIGATION_RETPOLINE is defined.
*/
#define CALL_NOSPEC __CS_PREFIX("%V[thunk_target]") \
- "call __x86_indirect_thunk_%V[thunk_target]\n"
+ "call __x86_indirect_thunk_%V[thunk_target]"
# define THUNK_TARGET(addr) [thunk_target] "r" (addr)
@@ -464,30 +464,30 @@ static inline void call_depth_return_thunk(void) {}
*/
# define CALL_NOSPEC \
ALTERNATIVE_2( \
- ANNOTATE_RETPOLINE_SAFE "\n" \
- "call *%[thunk_target]\n", \
- " jmp 904f;\n" \
- " .align 16\n" \
- "901: call 903f;\n" \
- "902: pause;\n" \
- " lfence;\n" \
- " jmp 902b;\n" \
- " .align 16\n" \
- "903: lea 4(%%esp), %%esp;\n" \
- " pushl %[thunk_target];\n" \
- " ret;\n" \
- " .align 16\n" \
- "904: call 901b;\n", \
+ ANNOTATE_RETPOLINE_SAFE "; " \
+ "call *%[thunk_target]; ", \
+ " jmp 904f; " \
+ " .align 16; " \
+ "901: call 903f; " \
+ "902: pause; " \
+ " lfence; " \
+ " jmp 902b; " \
+ " .align 16; " \
+ "903: lea 4(%%esp), %%esp; " \
+ " pushl %[thunk_target]; " \
+ " ret; " \
+ " .align 16; " \
+ "904: call 901b; ", \
X86_FEATURE_RETPOLINE, \
- "lfence;\n" \
- ANNOTATE_RETPOLINE_SAFE "\n" \
- "call *%[thunk_target]\n", \
+ "lfence; " \
+ ANNOTATE_RETPOLINE_SAFE "; " \
+ "call *%[thunk_target]; ", \
X86_FEATURE_RETPOLINE_LFENCE)
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif
#else /* No retpoline for C / inline asm */
-# define CALL_NOSPEC "call *%[thunk_target]\n"
+# define CALL_NOSPEC "call *%[thunk_target]; "
# define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
#endif
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index c7a5d2960d57..9c5bb666bd55 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -53,7 +53,7 @@ DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, softirq_stack_ptr);
static void call_on_stack(void *func, void *stack)
{
- asm volatile("xchgl %[sp], %%esp\n"
+ asm volatile("xchgl %[sp], %%esp; "
CALL_NOSPEC
"movl %[sp], %%esp"
: [sp] "+b" (stack)
© 2016 - 2025 Red Hat, Inc.