[PATCH] x86/kexec: Merge x86_32 and x86_64 code using macros from asm.h

Uros Bizjak posted 1 patch 11 months, 1 week ago
There is a newer version of this series
arch/x86/include/asm/asm.h   |  2 ++
arch/x86/include/asm/kexec.h | 44 +++++++++++++++---------------------
2 files changed, 20 insertions(+), 26 deletions(-)
[PATCH] x86/kexec: Merge x86_32 and x86_64 code using macros from asm.h
Posted by Uros Bizjak 11 months, 1 week ago
Merge common x86_32 and x86_64 code in crash_setup_regs()
using macros from asm/asm.h.

The compiled object files before and after the patch are unchanged.

Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
---
 arch/x86/include/asm/asm.h   |  2 ++
 arch/x86/include/asm/kexec.h | 44 +++++++++++++++---------------------
 2 files changed, 20 insertions(+), 26 deletions(-)

diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 975ae7a9397e..2bccc063d30b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -41,6 +41,8 @@
 #define _ASM_SUB	__ASM_SIZE(sub)
 #define _ASM_XADD	__ASM_SIZE(xadd)
 #define _ASM_MUL	__ASM_SIZE(mul)
+#define _ASM_PUSHF	__ASM_SIZE(pushf)
+#define _ASM_POP	__ASM_SIZE(pop)
 
 #define _ASM_AX		__ASM_REG(ax)
 #define _ASM_BX		__ASM_REG(bx)
diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
index 8ad187462b68..56040ae6bda0 100644
--- a/arch/x86/include/asm/kexec.h
+++ b/arch/x86/include/asm/kexec.h
@@ -18,6 +18,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 
+#include <asm/asm.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 
@@ -71,29 +72,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 	if (oldregs) {
 		memcpy(newregs, oldregs, sizeof(*newregs));
 	} else {
-#ifdef CONFIG_X86_32
-		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
-		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
-		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
-		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
-		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
-		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
-		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
-		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
-		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
-		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
-		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
-#else
-		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
-		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
-		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
-		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
-		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
-		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
-		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
-		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
+		asm volatile(_ASM_MOV " %%" _ASM_BX ",%0" : "=m"(newregs->bx));
+		asm volatile(_ASM_MOV " %%" _ASM_CX ",%0" : "=m"(newregs->cx));
+		asm volatile(_ASM_MOV " %%" _ASM_DX ",%0" : "=m"(newregs->dx));
+		asm volatile(_ASM_MOV " %%" _ASM_SI ",%0" : "=m"(newregs->si));
+		asm volatile(_ASM_MOV " %%" _ASM_DI ",%0" : "=m"(newregs->di));
+		asm volatile(_ASM_MOV " %%" _ASM_BP ",%0" : "=m"(newregs->bp));
+		asm volatile(_ASM_MOV " %%" _ASM_AX ",%0" : "=m"(newregs->ax));
+		asm volatile(_ASM_MOV " %%" _ASM_SP ",%0" : "=m"(newregs->sp));
+#ifdef CONFIG_X86_64
 		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
 		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
 		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
@@ -102,10 +89,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
 		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
 		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
 		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
-		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
 #endif
+		asm volatile("movl %%ss,%k0" :"=a"(newregs->ss));
+		asm volatile("movl %%cs,%k0" :"=a"(newregs->cs));
+#ifdef CONFIG_X86_32
+		asm volatile("movl %%ds,%k0" :"=a"(newregs->ds));
+		asm volatile("movl %%es,%k0" :"=a"(newregs->es));
+#endif
+		asm volatile(_ASM_PUSHF "\n\t"
+			     _ASM_POP " %0" : "=m"(newregs->flags));
 		newregs->ip = _THIS_IP_;
 	}
 }
-- 
2.42.0
Re: [PATCH] x86/kexec: Merge x86_32 and x86_64 code using macros from asm.h
Posted by H. Peter Anvin 11 months, 1 week ago
On March 5, 2025 8:26:37 AM PST, Uros Bizjak <ubizjak@gmail.com> wrote:
>Merge common x86_32 and x86_64 code in crash_setup_regs()
>using macros from asm/asm.h.
>
>The compiled object files before and after the patch are unchanged.
>
>Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
>Cc: Thomas Gleixner <tglx@linutronix.de>
>Cc: Ingo Molnar <mingo@kernel.org>
>Cc: Borislav Petkov <bp@alien8.de>
>Cc: Dave Hansen <dave.hansen@linux.intel.com>
>Cc: "H. Peter Anvin" <hpa@zytor.com>
>---
> arch/x86/include/asm/asm.h   |  2 ++
> arch/x86/include/asm/kexec.h | 44 +++++++++++++++---------------------
> 2 files changed, 20 insertions(+), 26 deletions(-)
>
>diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
>index 975ae7a9397e..2bccc063d30b 100644
>--- a/arch/x86/include/asm/asm.h
>+++ b/arch/x86/include/asm/asm.h
>@@ -41,6 +41,8 @@
> #define _ASM_SUB	__ASM_SIZE(sub)
> #define _ASM_XADD	__ASM_SIZE(xadd)
> #define _ASM_MUL	__ASM_SIZE(mul)
>+#define _ASM_PUSHF	__ASM_SIZE(pushf)
>+#define _ASM_POP	__ASM_SIZE(pop)
> 
> #define _ASM_AX		__ASM_REG(ax)
> #define _ASM_BX		__ASM_REG(bx)
>diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
>index 8ad187462b68..56040ae6bda0 100644
>--- a/arch/x86/include/asm/kexec.h
>+++ b/arch/x86/include/asm/kexec.h
>@@ -18,6 +18,7 @@
> #include <linux/string.h>
> #include <linux/kernel.h>
> 
>+#include <asm/asm.h>
> #include <asm/page.h>
> #include <asm/ptrace.h>
> 
>@@ -71,29 +72,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> 	if (oldregs) {
> 		memcpy(newregs, oldregs, sizeof(*newregs));
> 	} else {
>-#ifdef CONFIG_X86_32
>-		asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
>-		asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
>-		asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
>-		asm volatile("movl %%esi,%0" : "=m"(newregs->si));
>-		asm volatile("movl %%edi,%0" : "=m"(newregs->di));
>-		asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
>-		asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
>-		asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
>-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
>-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
>-		asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
>-		asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
>-		asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
>-#else
>-		asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
>-		asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
>-		asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
>-		asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
>-		asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
>-		asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
>-		asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
>-		asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
>+		asm volatile(_ASM_MOV " %%" _ASM_BX ",%0" : "=m"(newregs->bx));
>+		asm volatile(_ASM_MOV " %%" _ASM_CX ",%0" : "=m"(newregs->cx));
>+		asm volatile(_ASM_MOV " %%" _ASM_DX ",%0" : "=m"(newregs->dx));
>+		asm volatile(_ASM_MOV " %%" _ASM_SI ",%0" : "=m"(newregs->si));
>+		asm volatile(_ASM_MOV " %%" _ASM_DI ",%0" : "=m"(newregs->di));
>+		asm volatile(_ASM_MOV " %%" _ASM_BP ",%0" : "=m"(newregs->bp));
>+		asm volatile(_ASM_MOV " %%" _ASM_AX ",%0" : "=m"(newregs->ax));
>+		asm volatile(_ASM_MOV " %%" _ASM_SP ",%0" : "=m"(newregs->sp));
>+#ifdef CONFIG_X86_64
> 		asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
> 		asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
> 		asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
>@@ -102,10 +89,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> 		asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
> 		asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
> 		asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
>-		asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
>-		asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
>-		asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
> #endif
>+		asm volatile("movl %%ss,%k0" :"=a"(newregs->ss));
>+		asm volatile("movl %%cs,%k0" :"=a"(newregs->cs));
>+#ifdef CONFIG_X86_32
>+		asm volatile("movl %%ds,%k0" :"=a"(newregs->ds));
>+		asm volatile("movl %%es,%k0" :"=a"(newregs->es));
>+#endif
>+		asm volatile(_ASM_PUSHF "\n\t"
>+			     _ASM_POP " %0" : "=m"(newregs->flags));
> 		newregs->ip = _THIS_IP_;
> 	}
> }

There is no reason to stick a size suffix on if it is unambiguous. Normally pushf/popf never are, since stack operations are promoted automatically.
Re: [PATCH] x86/kexec: Merge x86_32 and x86_64 code using macros from asm.h
Posted by Uros Bizjak 11 months, 1 week ago
On Thu, Mar 6, 2025 at 3:14 PM H. Peter Anvin <hpa@zytor.com> wrote:
>
> On March 5, 2025 8:26:37 AM PST, Uros Bizjak <ubizjak@gmail.com> wrote:
> >Merge common x86_32 and x86_64 code in crash_setup_regs()
> >using macros from asm/asm.h.
> >
> >The compiled object files before and after the patch are unchanged.
> >
> >Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
> >Cc: Thomas Gleixner <tglx@linutronix.de>
> >Cc: Ingo Molnar <mingo@kernel.org>
> >Cc: Borislav Petkov <bp@alien8.de>
> >Cc: Dave Hansen <dave.hansen@linux.intel.com>
> >Cc: "H. Peter Anvin" <hpa@zytor.com>
> >---
> > arch/x86/include/asm/asm.h   |  2 ++
> > arch/x86/include/asm/kexec.h | 44 +++++++++++++++---------------------
> > 2 files changed, 20 insertions(+), 26 deletions(-)
> >
> >diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
> >index 975ae7a9397e..2bccc063d30b 100644
> >--- a/arch/x86/include/asm/asm.h
> >+++ b/arch/x86/include/asm/asm.h
> >@@ -41,6 +41,8 @@
> > #define _ASM_SUB      __ASM_SIZE(sub)
> > #define _ASM_XADD     __ASM_SIZE(xadd)
> > #define _ASM_MUL      __ASM_SIZE(mul)
> >+#define _ASM_PUSHF    __ASM_SIZE(pushf)
> >+#define _ASM_POP      __ASM_SIZE(pop)
> >
> > #define _ASM_AX               __ASM_REG(ax)
> > #define _ASM_BX               __ASM_REG(bx)
> >diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h
> >index 8ad187462b68..56040ae6bda0 100644
> >--- a/arch/x86/include/asm/kexec.h
> >+++ b/arch/x86/include/asm/kexec.h
> >@@ -18,6 +18,7 @@
> > #include <linux/string.h>
> > #include <linux/kernel.h>
> >
> >+#include <asm/asm.h>
> > #include <asm/page.h>
> > #include <asm/ptrace.h>
> >
> >@@ -71,29 +72,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> >       if (oldregs) {
> >               memcpy(newregs, oldregs, sizeof(*newregs));
> >       } else {
> >-#ifdef CONFIG_X86_32
> >-              asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
> >-              asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
> >-              asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
> >-              asm volatile("movl %%esi,%0" : "=m"(newregs->si));
> >-              asm volatile("movl %%edi,%0" : "=m"(newregs->di));
> >-              asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
> >-              asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
> >-              asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
> >-              asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
> >-              asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
> >-              asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
> >-              asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
> >-              asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
> >-#else
> >-              asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
> >-              asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
> >-              asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
> >-              asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
> >-              asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
> >-              asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
> >-              asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
> >-              asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
> >+              asm volatile(_ASM_MOV " %%" _ASM_BX ",%0" : "=m"(newregs->bx));
> >+              asm volatile(_ASM_MOV " %%" _ASM_CX ",%0" : "=m"(newregs->cx));
> >+              asm volatile(_ASM_MOV " %%" _ASM_DX ",%0" : "=m"(newregs->dx));
> >+              asm volatile(_ASM_MOV " %%" _ASM_SI ",%0" : "=m"(newregs->si));
> >+              asm volatile(_ASM_MOV " %%" _ASM_DI ",%0" : "=m"(newregs->di));
> >+              asm volatile(_ASM_MOV " %%" _ASM_BP ",%0" : "=m"(newregs->bp));
> >+              asm volatile(_ASM_MOV " %%" _ASM_AX ",%0" : "=m"(newregs->ax));
> >+              asm volatile(_ASM_MOV " %%" _ASM_SP ",%0" : "=m"(newregs->sp));
> >+#ifdef CONFIG_X86_64
> >               asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
> >               asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
> >               asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
> >@@ -102,10 +89,15 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
> >               asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
> >               asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
> >               asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
> >-              asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
> >-              asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
> >-              asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
> > #endif
> >+              asm volatile("movl %%ss,%k0" :"=a"(newregs->ss));
> >+              asm volatile("movl %%cs,%k0" :"=a"(newregs->cs));
> >+#ifdef CONFIG_X86_32
> >+              asm volatile("movl %%ds,%k0" :"=a"(newregs->ds));
> >+              asm volatile("movl %%es,%k0" :"=a"(newregs->es));
> >+#endif
> >+              asm volatile(_ASM_PUSHF "\n\t"
> >+                           _ASM_POP " %0" : "=m"(newregs->flags));
> >               newregs->ip = _THIS_IP_;
> >       }
> > }
>
> There is no reason to stick a size suffix on if it is unambiguous. Normally pushf/popf never are, since stack operations are promoted automatically.

Thanks, I'll provide a v2 with removed suffixes.

BR,
Uros.