arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-)
Instruction with %rip-relative address operand is one byte shorter than
its absolute address counterpart and is also compatible with position
independent executable (-fpie) build.
No functional changes intended.
Cc: "Rafael J. Wysocki" <rafael@kernel.org>
Cc: Len Brown <len.brown@intel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
---
arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a352eafa..94ff83f3d3fe 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
- movq saved_magic, %rax
+ movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
je 2f
@@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movq saved_rsp, %rsp
+ movq saved_rsp(%rip), %rsp
- movq saved_rbx, %rbx
- movq saved_rdi, %rdi
- movq saved_rsi, %rsi
- movq saved_rbp, %rbp
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp
- movq saved_rip, %rax
+ movq saved_rip(%rip), %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq $.Lresume_point, saved_rip(%rip)
- movq %rsp, saved_rsp
- movq %rbp, saved_rbp
- movq %rbx, saved_rbx
- movq %rdi, saved_rdi
- movq %rsi, saved_rsi
+ movq %rsp, saved_rsp(%rip)
+ movq %rbp, saved_rbp(%rip)
+ movq %rbx, saved_rbx(%rip)
+ movq %rdi, saved_rdi(%rip)
+ movq %rsi, saved_rsi(%rip)
addq $8, %rsp
movl $3, %edi
--
2.41.0
On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote: > > Instruction with %rip-relative address operand is one byte shorter than > its absolute address counterpart and is also compatible with position > independent executable (-fpie) build. > > No functional changes intended. I'm wondering what's the exact motivation for making this change. Any urgent need for it doesn't seem to be there. > Cc: "Rafael J. Wysocki" <rafael@kernel.org> > Cc: Len Brown <len.brown@intel.com> > Cc: Pavel Machek <pavel@ucw.cz> > Cc: Thomas Gleixner <tglx@linutronix.de> > Cc: Ingo Molnar <mingo@kernel.org> > Cc: Borislav Petkov <bp@alien8.de> > Cc: Dave Hansen <dave.hansen@linux.intel.com> > Cc: "H. Peter Anvin" <hpa@zytor.com> > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > --- > arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------ > 1 file changed, 12 insertions(+), 12 deletions(-) > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S > index d5d8a352eafa..94ff83f3d3fe 100644 > --- a/arch/x86/kernel/acpi/wakeup_64.S > +++ b/arch/x86/kernel/acpi/wakeup_64.S > @@ -17,7 +17,7 @@ > * Hooray, we are in Long 64-bit mode (but still running in low memory) > */ > SYM_FUNC_START(wakeup_long64) > - movq saved_magic, %rax > + movq saved_magic(%rip), %rax > movq $0x123456789abcdef0, %rdx > cmpq %rdx, %rax > je 2f > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64) > movw %ax, %es > movw %ax, %fs > movw %ax, %gs > - movq saved_rsp, %rsp > + movq saved_rsp(%rip), %rsp > > - movq saved_rbx, %rbx > - movq saved_rdi, %rdi > - movq saved_rsi, %rsi > - movq saved_rbp, %rbp > + movq saved_rbx(%rip), %rbx > + movq saved_rdi(%rip), %rdi > + movq saved_rsi(%rip), %rsi > + movq saved_rbp(%rip), %rbp > > - movq saved_rip, %rax > + movq saved_rip(%rip), %rax > ANNOTATE_RETPOLINE_SAFE > jmp *%rax > SYM_FUNC_END(wakeup_long64) > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel) > > movq $.Lresume_point, saved_rip(%rip) > > - movq %rsp, saved_rsp > - movq %rbp, saved_rbp > - movq %rbx, saved_rbx > - movq %rdi, saved_rdi > - movq %rsi, saved_rsi > + movq %rsp, saved_rsp(%rip) > + movq %rbp, saved_rbp(%rip) > + movq %rbx, saved_rbx(%rip) > + movq %rdi, saved_rdi(%rip) > + movq %rsi, saved_rsi(%rip) > > addq $8, %rsp > movl $3, %edi > --
On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <rafael@kernel.org> wrote: > > On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote: > > > > Instruction with %rip-relative address operand is one byte shorter than > > its absolute address counterpart and is also compatible with position > > independent executable (-fpie) build. > > > > No functional changes intended. > > I'm wondering what's the exact motivation for making this change. Mainly to be consistent with what the compiler emits by default when a symbol is accessed. As said in the commit message, the %rip-relative access is also one byte shorter, and results in a position independent code. > Any urgent need for it doesn't seem to be there. True. It's mostly a nice-to-have change. Thanks, Uros. > > Cc: "Rafael J. Wysocki" <rafael@kernel.org> > > Cc: Len Brown <len.brown@intel.com> > > Cc: Pavel Machek <pavel@ucw.cz> > > Cc: Thomas Gleixner <tglx@linutronix.de> > > Cc: Ingo Molnar <mingo@kernel.org> > > Cc: Borislav Petkov <bp@alien8.de> > > Cc: Dave Hansen <dave.hansen@linux.intel.com> > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > > --- > > arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------ > > 1 file changed, 12 insertions(+), 12 deletions(-) > > > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S > > index d5d8a352eafa..94ff83f3d3fe 100644 > > --- a/arch/x86/kernel/acpi/wakeup_64.S > > +++ b/arch/x86/kernel/acpi/wakeup_64.S > > @@ -17,7 +17,7 @@ > > * Hooray, we are in Long 64-bit mode (but still running in low memory) > > */ > > SYM_FUNC_START(wakeup_long64) > > - movq saved_magic, %rax > > + movq saved_magic(%rip), %rax > > movq $0x123456789abcdef0, %rdx > > cmpq %rdx, %rax > > je 2f > > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64) > > movw %ax, %es > > movw %ax, %fs > > movw %ax, %gs > > - movq saved_rsp, %rsp > > + movq saved_rsp(%rip), %rsp > > > > - movq saved_rbx, %rbx > > - movq saved_rdi, %rdi > > - movq saved_rsi, %rsi > > - movq saved_rbp, %rbp > > + movq saved_rbx(%rip), %rbx > > + movq saved_rdi(%rip), %rdi > > + movq saved_rsi(%rip), %rsi > > + movq saved_rbp(%rip), %rbp > > > > - movq saved_rip, %rax > > + movq saved_rip(%rip), %rax > > ANNOTATE_RETPOLINE_SAFE > > jmp *%rax > > SYM_FUNC_END(wakeup_long64) > > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel) > > > > movq $.Lresume_point, saved_rip(%rip) > > > > - movq %rsp, saved_rsp > > - movq %rbp, saved_rbp > > - movq %rbx, saved_rbx > > - movq %rdi, saved_rdi > > - movq %rsi, saved_rsi > > + movq %rsp, saved_rsp(%rip) > > + movq %rbp, saved_rbp(%rip) > > + movq %rbx, saved_rbx(%rip) > > + movq %rdi, saved_rdi(%rip) > > + movq %rsi, saved_rsi(%rip) > > > > addq $8, %rsp > > movl $3, %edi > > --
On Mon, Nov 6, 2023 at 3:25 PM Uros Bizjak <ubizjak@gmail.com> wrote: > > On Mon, Nov 6, 2023 at 3:14 PM Rafael J. Wysocki <rafael@kernel.org> wrote: > > > > On Fri, Nov 3, 2023 at 11:49 AM Uros Bizjak <ubizjak@gmail.com> wrote: > > > > > > Instruction with %rip-relative address operand is one byte shorter than > > > its absolute address counterpart and is also compatible with position > > > independent executable (-fpie) build. > > > > > > No functional changes intended. > > > > I'm wondering what's the exact motivation for making this change. > > Mainly to be consistent with what the compiler emits by default when a > symbol is accessed. As said in the commit message, the %rip-relative > access is also one byte shorter, and results in a position independent > code. > > > Any urgent need for it doesn't seem to be there. > > True. It's mostly a nice-to-have change. OK, so Acked-by: Rafael J. Wysocki <rafael@kernel.org> and the decision what to do with it is up to the x86 folks. > > > Cc: "Rafael J. Wysocki" <rafael@kernel.org> > > > Cc: Len Brown <len.brown@intel.com> > > > Cc: Pavel Machek <pavel@ucw.cz> > > > Cc: Thomas Gleixner <tglx@linutronix.de> > > > Cc: Ingo Molnar <mingo@kernel.org> > > > Cc: Borislav Petkov <bp@alien8.de> > > > Cc: Dave Hansen <dave.hansen@linux.intel.com> > > > Cc: "H. Peter Anvin" <hpa@zytor.com> > > > Signed-off-by: Uros Bizjak <ubizjak@gmail.com> > > > --- > > > arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------ > > > 1 file changed, 12 insertions(+), 12 deletions(-) > > > > > > diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S > > > index d5d8a352eafa..94ff83f3d3fe 100644 > > > --- a/arch/x86/kernel/acpi/wakeup_64.S > > > +++ b/arch/x86/kernel/acpi/wakeup_64.S > > > @@ -17,7 +17,7 @@ > > > * Hooray, we are in Long 64-bit mode (but still running in low memory) > > > */ > > > SYM_FUNC_START(wakeup_long64) > > > - movq saved_magic, %rax > > > + movq saved_magic(%rip), %rax > > > movq $0x123456789abcdef0, %rdx > > > cmpq %rdx, %rax > > > je 2f > > > @@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64) > > > movw %ax, %es > > > movw %ax, %fs > > > movw %ax, %gs > > > - movq saved_rsp, %rsp > > > + movq saved_rsp(%rip), %rsp > > > > > > - movq saved_rbx, %rbx > > > - movq saved_rdi, %rdi > > > - movq saved_rsi, %rsi > > > - movq saved_rbp, %rbp > > > + movq saved_rbx(%rip), %rbx > > > + movq saved_rdi(%rip), %rdi > > > + movq saved_rsi(%rip), %rsi > > > + movq saved_rbp(%rip), %rbp > > > > > > - movq saved_rip, %rax > > > + movq saved_rip(%rip), %rax > > > ANNOTATE_RETPOLINE_SAFE > > > jmp *%rax > > > SYM_FUNC_END(wakeup_long64) > > > @@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel) > > > > > > movq $.Lresume_point, saved_rip(%rip) > > > > > > - movq %rsp, saved_rsp > > > - movq %rbp, saved_rbp > > > - movq %rbx, saved_rbx > > > - movq %rdi, saved_rdi > > > - movq %rsi, saved_rsi > > > + movq %rsp, saved_rsp(%rip) > > > + movq %rbp, saved_rbp(%rip) > > > + movq %rbx, saved_rbx(%rip) > > > + movq %rdi, saved_rdi(%rip) > > > + movq %rsi, saved_rsi(%rip) > > > > > > addq $8, %rsp > > > movl $3, %edi > > > --
The following commit has been merged into the x86/percpu branch of tip:
Commit-ID: 0978d64f9406122c369d5f46e1eb855646f6c32c
Gitweb: https://git.kernel.org/tip/0978d64f9406122c369d5f46e1eb855646f6c32c
Author: Uros Bizjak <ubizjak@gmail.com>
AuthorDate: Fri, 03 Nov 2023 11:48:22 +01:00
Committer: Ingo Molnar <mingo@kernel.org>
CommitterDate: Thu, 30 Nov 2023 20:09:49 +01:00
x86/acpi: Use %rip-relative addressing in wakeup_64.S
This is a "nice-to-have" change with minor code generation benefits:
- Instruction with %rip-relative address operand is one byte shorter than
its absolute address counterpart,
- it is also compatible with position independent executable (-fpie) builds,
- it is also consistent with what the compiler emits by default when
a symbol is accessed.
No functional changes intended.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Rafael J. Wysocki <rafael@kernel.org>
Link: https://lore.kernel.org/r/20231103104900.409470-1-ubizjak@gmail.com
---
arch/x86/kernel/acpi/wakeup_64.S | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index d5d8a35..94ff83f 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -17,7 +17,7 @@
* Hooray, we are in Long 64-bit mode (but still running in low memory)
*/
SYM_FUNC_START(wakeup_long64)
- movq saved_magic, %rax
+ movq saved_magic(%rip), %rax
movq $0x123456789abcdef0, %rdx
cmpq %rdx, %rax
je 2f
@@ -33,14 +33,14 @@ SYM_FUNC_START(wakeup_long64)
movw %ax, %es
movw %ax, %fs
movw %ax, %gs
- movq saved_rsp, %rsp
+ movq saved_rsp(%rip), %rsp
- movq saved_rbx, %rbx
- movq saved_rdi, %rdi
- movq saved_rsi, %rsi
- movq saved_rbp, %rbp
+ movq saved_rbx(%rip), %rbx
+ movq saved_rdi(%rip), %rdi
+ movq saved_rsi(%rip), %rsi
+ movq saved_rbp(%rip), %rbp
- movq saved_rip, %rax
+ movq saved_rip(%rip), %rax
ANNOTATE_RETPOLINE_SAFE
jmp *%rax
SYM_FUNC_END(wakeup_long64)
@@ -72,11 +72,11 @@ SYM_FUNC_START(do_suspend_lowlevel)
movq $.Lresume_point, saved_rip(%rip)
- movq %rsp, saved_rsp
- movq %rbp, saved_rbp
- movq %rbx, saved_rbx
- movq %rdi, saved_rdi
- movq %rsi, saved_rsi
+ movq %rsp, saved_rsp(%rip)
+ movq %rbp, saved_rbp(%rip)
+ movq %rbx, saved_rbx(%rip)
+ movq %rdi, saved_rdi(%rip)
+ movq %rsi, saved_rsi(%rip)
addq $8, %rsp
movl $3, %edi
© 2016 - 2025 Red Hat, Inc.