common_startup_64() has set the segments to usable values already, and
they will be restored later in restore_processor_state(). Remove the
redundant segment loads in wakeup_long64()..
Signed-off-by: Brian Gerst <brgerst@gmail.com>
---
arch/x86/kernel/acpi/wakeup_64.S | 6 ------
1 file changed, 6 deletions(-)
diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S
index 3d56610f0108..099401ef2ea4 100644
--- a/arch/x86/kernel/acpi/wakeup_64.S
+++ b/arch/x86/kernel/acpi/wakeup_64.S
@@ -28,12 +28,6 @@ SYM_FUNC_START(wakeup_long64)
1:
jmp 1b
2:
- movw $__KERNEL_DS, %ax
- movw %ax, %ss
- movw %ax, %ds
- movw %ax, %es
- movw %ax, %fs
- movw %ax, %gs
movq saved_rsp(%rip), %rsp
movq saved_rbx(%rip), %rbx
--
2.52.0