arch/x86/kernel/acpi/wakeup_64.S | 64 +++++++++++++------------------- 1 file changed, 26 insertions(+), 38 deletions(-)
wakeup_long64() is called from common_startup_64() via inital_code, so
it is already running on the normal virtual mapping. There is no need
to use an indirect jump since it is not switching mappings.
Remove the indirect jump by embedding wakeup_long64() as an inner label
of do_suspend_lowlevel(). Remove saved_rip is which is now unused.
No functional change.
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
---
arch/x86/kernel/acpi/wakeup_64.S | 64 +++++++++++++-------------------
1 file changed, 26 insertions(+), 38 deletions(-)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 04f561f75e99..a256cdd03ab5 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -13,39 +13,6 @@
# Copyright 2003 Pavel Machek <pavel@suse.cz
.code64
- /*
- * Hooray, we are in Long 64-bit mode (but still running in low memory)
- */
-SYM_FUNC_START(wakeup_long64)
- ANNOTATE_NOENDBR
- movq saved_magic(%rip), %rax
- movq $0x123456789abcdef0, %rdx
- cmpq %rdx, %rax
- je 2f
-
- /* stop here on a saved_magic mismatch */
- movq $0xbad6d61676963, %rcx
-1:
- jmp 1b
-2:
- movw $__KERNEL_DS, %ax
- movw %ax, %ss
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %fs
- movw %ax, %gs
- movq saved_rsp(%rip), %rsp
-
- movq saved_rbx(%rip), %rbx
- movq saved_rdi(%rip), %rdi
- movq saved_rsi(%rip), %rsi
- movq saved_rbp(%rip), %rbp
-
- movq saved_rip(%rip), %rax
- ANNOTATE_RETPOLINE_SAFE
- jmp *%rax
-SYM_FUNC_END(wakeup_long64)
-
SYM_FUNC_START(do_suspend_lowlevel)
FRAME_BEGIN
subq $8, %rsp
@@ -71,8 +38,6 @@ SYM_FUNC_START(do_suspend_lowlevel)
pushfq
popq pt_regs_flags(%rax)
- movq $.Lresume_point, saved_rip(%rip)
-
movq %rsp, saved_rsp(%rip)
movq %rbp, saved_rbp(%rip)
movq %rbx, saved_rbx(%rip)
@@ -86,9 +51,27 @@ SYM_FUNC_START(do_suspend_lowlevel)
/* in case something went wrong, restore the machine status and go on */
jmp .Lresume_point
- .align 4
-.Lresume_point:
+SYM_INNER_LABEL_ALIGN(wakeup_long64, SYM_L_GLOBAL)
ANNOTATE_NOENDBR
+ movq saved_magic(%rip), %rax
+ movq $0x123456789abcdef0, %rdx
+ cmpq %rdx, %rax
+ jne .Lbad_saved_magic
+
+ movw $__KERNEL_DS, %ax
+ movw %ax, %ss
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movq saved_rsp(%rip), %rsp
+
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp
+
+.Lresume_point:
/* We don't restore %rax, it must be 0 anyway */
movq $saved_context, %rax
movq saved_context_cr4(%rax), %rbx
@@ -130,6 +113,12 @@ SYM_FUNC_START(do_suspend_lowlevel)
addq $8, %rsp
FRAME_END
jmp restore_processor_state
+
+.Lbad_saved_magic:
+ /* stop here on a saved_magic mismatch */
+ movq $0xbad6d61676963, %rcx
+1:
+ jmp 1b
SYM_FUNC_END(do_suspend_lowlevel)
STACK_FRAME_NON_STANDARD do_suspend_lowlevel
@@ -139,7 +128,6 @@ saved_rsi: .quad 0
saved_rdi: .quad 0
saved_rbx: .quad 0
-saved_rip: .quad 0
saved_rsp: .quad 0
SYM_DATA(saved_magic, .quad 0)
base-commit: 72249a0533c63e77e4bf56012b7b4f8fb3066317
--
2.52.0
On Sun, 18 Jan 2026 at 18:05, Brian Gerst <brgerst@gmail.com> wrote: > > wakeup_long64() is called from common_startup_64() via inital_code, so initial_code > it is already running on the normal virtual mapping. There is no need > to use an indirect jump since it is not switching mappings. > By the same reasoning (i.e., that everything executes in the kernel virtual mapping) you might also convert the movq on line 55 into a RIP-relative leaq, as it is the only remaining non-RIP relative access in the file. That way, I can drop this file from my PIE series too. But regardless of that, Acked-by: Ard Biesheuvel <ardb@kernel.org> > Remove the indirect jump by embedding wakeup_long64() as an inner label > of do_suspend_lowlevel(). Remove saved_rip is which is now unused. > > No functional change. > > Signed-off-by: Brian Gerst <brgerst@gmail.com> > Cc: Ard Biesheuvel <ardb@kernel.org> > Cc: "Rafael J. Wysocki" <rafael@kernel.org> > --- > arch/x86/kernel/acpi/wakeup_64.S | 64 +++++++++++++------------------- > 1 file changed, 26 insertions(+), 38 deletions(-) > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S > index 04f561f75e99..a256cdd03ab5 100644 > --- a/arch/x86/kernel/acpi/wakeup_64.S > +++ b/arch/x86/kernel/acpi/wakeup_64.S > @@ -13,39 +13,6 @@ > # Copyright 2003 Pavel Machek <pavel@suse.cz > > .code64 > - /* > - * Hooray, we are in Long 64-bit mode (but still running in low memory) > - */ > -SYM_FUNC_START(wakeup_long64) > - ANNOTATE_NOENDBR > - movq saved_magic(%rip), %rax > - movq $0x123456789abcdef0, %rdx > - cmpq %rdx, %rax > - je 2f > - > - /* stop here on a saved_magic mismatch */ > - movq $0xbad6d61676963, %rcx > -1: > - jmp 1b > -2: > - movw $__KERNEL_DS, %ax > - movw %ax, %ss > - movw %ax, %ds > - movw %ax, %es > - movw %ax, %fs > - movw %ax, %gs > - movq saved_rsp(%rip), %rsp > - > - movq saved_rbx(%rip), %rbx > - movq saved_rdi(%rip), %rdi > - movq saved_rsi(%rip), %rsi > - movq saved_rbp(%rip), %rbp > - > - movq saved_rip(%rip), %rax > - ANNOTATE_RETPOLINE_SAFE > - jmp *%rax > -SYM_FUNC_END(wakeup_long64) > - > SYM_FUNC_START(do_suspend_lowlevel) > FRAME_BEGIN > subq $8, %rsp > @@ -71,8 +38,6 @@ SYM_FUNC_START(do_suspend_lowlevel) > pushfq > popq pt_regs_flags(%rax) > > - movq $.Lresume_point, saved_rip(%rip) > - > movq %rsp, saved_rsp(%rip) > movq %rbp, saved_rbp(%rip) > movq %rbx, saved_rbx(%rip) > @@ -86,9 +51,27 @@ SYM_FUNC_START(do_suspend_lowlevel) > /* in case something went wrong, restore the machine status and go on */ > jmp .Lresume_point > > - .align 4 > -.Lresume_point: > +SYM_INNER_LABEL_ALIGN(wakeup_long64, SYM_L_GLOBAL) > ANNOTATE_NOENDBR > + movq saved_magic(%rip), %rax > + movq $0x123456789abcdef0, %rdx > + cmpq %rdx, %rax > + jne .Lbad_saved_magic > + > + movw $__KERNEL_DS, %ax > + movw %ax, %ss > + movw %ax, %ds > + movw %ax, %es > + movw %ax, %fs > + movw %ax, %gs > + movq saved_rsp(%rip), %rsp > + > + movq saved_rbx(%rip), %rbx > + movq saved_rdi(%rip), %rdi > + movq saved_rsi(%rip), %rsi > + movq saved_rbp(%rip), %rbp > + > +.Lresume_point: > /* We don't restore %rax, it must be 0 anyway */ > movq $saved_context, %rax > movq saved_context_cr4(%rax), %rbx > @@ -130,6 +113,12 @@ SYM_FUNC_START(do_suspend_lowlevel) > addq $8, %rsp > FRAME_END > jmp restore_processor_state > + > +.Lbad_saved_magic: > + /* stop here on a saved_magic mismatch */ > + movq $0xbad6d61676963, %rcx > +1: > + jmp 1b > SYM_FUNC_END(do_suspend_lowlevel) > STACK_FRAME_NON_STANDARD do_suspend_lowlevel > > @@ -139,7 +128,6 @@ saved_rsi: .quad 0 > saved_rdi: .quad 0 > saved_rbx: .quad 0 > > -saved_rip: .quad 0 > saved_rsp: .quad 0 > > SYM_DATA(saved_magic, .quad 0) > > base-commit: 72249a0533c63e77e4bf56012b7b4f8fb3066317 > -- > 2.52.0 >
On Sun, Jan 18, 2026 at 1:09 PM Ard Biesheuvel <ardb@kernel.org> wrote: > > On Sun, 18 Jan 2026 at 18:05, Brian Gerst <brgerst@gmail.com> wrote: > > > > wakeup_long64() is called from common_startup_64() via inital_code, so > > initial_code > > > it is already running on the normal virtual mapping. There is no need > > to use an indirect jump since it is not switching mappings. > > > > By the same reasoning (i.e., that everything executes in the kernel > virtual mapping) you might also convert the movq on line 55 into a > RIP-relative leaq, as it is the only remaining non-RIP relative access > in the file. That way, I can drop this file from my PIE series too. This is more of a general cleanup than specifically for PIE. When resume was converted to using the SMP trampoline, a lot of the old cruft was left in place and I am working on additional cleanups.
On Sun, 18 Jan 2026 at 20:17, Brian Gerst <brgerst@gmail.com> wrote: > > On Sun, Jan 18, 2026 at 1:09 PM Ard Biesheuvel <ardb@kernel.org> wrote: > > > > On Sun, 18 Jan 2026 at 18:05, Brian Gerst <brgerst@gmail.com> wrote: > > > > > > wakeup_long64() is called from common_startup_64() via inital_code, so > > > > initial_code > > > > > it is already running on the normal virtual mapping. There is no need > > > to use an indirect jump since it is not switching mappings. > > > > > > > By the same reasoning (i.e., that everything executes in the kernel > > virtual mapping) you might also convert the movq on line 55 into a > > RIP-relative leaq, as it is the only remaining non-RIP relative access > > in the file. That way, I can drop this file from my PIE series too. > > This is more of a general cleanup than specifically for PIE. When > resume was converted to using the SMP trampoline, a lot of the old > cruft was left in place and I am working on additional cleanups. Fair enough.
© 2016 - 2026 Red Hat, Inc.