From: Ard Biesheuvel <ardb@kernel.org>
Avoid absolute references in code, which require fixing up at boot time,
and replace them with RIP-relative ones. In this particular case, due to
the register pressure, they cannot be avoided entirely, so one absolute
reference is retained but the resulting reference via the GOT is
compatible with running the linker in PIE mode.
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
---
arch/x86/kernel/kvm.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 263f8aed4e2c..8eac209a31aa 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -800,9 +800,11 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
* Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
* restoring to/from the stack.
*/
-#define PV_VCPU_PREEMPTED_ASM \
- "movq __per_cpu_offset(,%rdi,8), %rax\n\t" \
- "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \
+#define PV_VCPU_PREEMPTED_ASM \
+ "leaq __per_cpu_offset(%rip), %rax \n\t" \
+ "movq (%rax,%rdi,8), %rax \n\t" \
+ "addq steal_time@GOTPCREL(%rip), %rax \n\t" \
+ "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "(%rax) \n\t" \
"setne %al\n\t"
DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted,
--
2.46.0.792.g87dc391469-goog