xen/arch/x86/x86_64/compat/entry.S | 17 +++++++---------- xen/arch/x86/x86_64/entry.S | 8 ++++---- 2 files changed, 11 insertions(+), 14 deletions(-)
cr4_pv32_restore() needs two registers. Right now, it spills %rdx and
clobbers %rax.
However, %rcx is free to use at all callsites. Annotate CR4_PV32_RESTORE with
our usual clobber comments, and swap %rdx for %rcx in the non-fatal paths
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Roger Pau Monné <roger.pau@citrix.com>
CC: Wei Liu <wl@xen.org>
I suspect we can further improve this by using %r14 rather than
GET_CPUINFO_FIELD(), but I'll leave that to a future change.
---
xen/arch/x86/x86_64/compat/entry.S | 17 +++++++----------
xen/arch/x86/x86_64/entry.S | 8 ++++----
2 files changed, 11 insertions(+), 14 deletions(-)
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 49811a56e965..d4f0e4804090 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -23,7 +23,7 @@ FUNC(entry_int82)
sti
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
GET_CURRENT(bx)
@@ -163,17 +163,15 @@ FUNC(compat_restore_all_guest)
_ASM_PRE_EXTABLE(.Lft0, handle_exception)
END(compat_restore_all_guest)
-/* This mustn't modify registers other than %rax. */
+/* Callers can cope with both %rax and %rcx being clobbered. */
FUNC(cr4_pv32_restore)
- push %rdx
- GET_CPUINFO_FIELD(cr4, dx)
- mov (%rdx), %rax
+ GET_CPUINFO_FIELD(cr4, cx)
+ mov (%rcx), %rax
test $XEN_CR4_PV32_BITS, %eax
jnz 0f
or cr4_pv32_mask(%rip), %rax
mov %rax, %cr4
- mov %rax, (%rdx)
- pop %rdx
+ mov %rax, (%rcx)
ret
0:
#ifndef NDEBUG
@@ -191,7 +189,6 @@ FUNC(cr4_pv32_restore)
BUG
1:
#endif
- pop %rdx
xor %eax, %eax
ret
END(cr4_pv32_restore)
@@ -227,7 +224,7 @@ UNLIKELY_END(compat_syscall_gpf)
END(compat_syscall)
FUNC(compat_sysenter)
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
movq VCPU_trap_ctxt(%rbx),%rcx
cmpb $X86_EXC_GP, UREGS_entry_vector(%rsp)
movzwl VCPU_sysenter_sel(%rbx),%eax
@@ -242,7 +239,7 @@ FUNC(compat_sysenter)
END(compat_sysenter)
FUNC(compat_int80_direct_trap)
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
call compat_create_bounce_frame
jmp compat_test_all_events
END(compat_int80_direct_trap)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index c3f6b667a72a..6c53c0091168 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -309,7 +309,7 @@ FUNC(cstar_enter)
.Lcstar_cr3_okay:
sti
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
@@ -712,7 +712,7 @@ FUNC(common_interrupt)
cmovnz %r12d, %ebx
.Lintr_cr3_okay:
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
movq %rsp,%rdi
callq do_IRQ
mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
@@ -762,7 +762,7 @@ handle_exception_saved:
jz .Lcr4_pv32_done
cmpb $0,DOMAIN_is_32bit_pv(%rax)
je .Lcr4_pv32_done
- call cr4_pv32_restore
+ call cr4_pv32_restore /* Clob: ac */
/*
* An NMI or #MC may occur between clearing CR4.SMEP / CR4.SMAP in
* compat_restore_all_guest and it actually returning to guest
@@ -1046,7 +1046,7 @@ FUNC(handle_ist_exception)
.List_cr3_okay:
#ifdef CONFIG_PV
- CR4_PV32_RESTORE
+ CR4_PV32_RESTORE /* Clob: ac */
testb $3,UREGS_cs(%rsp)
jz 1f
/*
base-commit: 98ae35cab0e40e59963f9f58345bf378b9522d74
--
2.30.2
On 24.01.2024 16:49, Andrew Cooper wrote: > cr4_pv32_restore() needs two registers. Right now, it spills %rdx and > clobbers %rax. > > However, %rcx is free to use at all callsites. Annotate CR4_PV32_RESTORE with > our usual clobber comments, and swap %rdx for %rcx in the non-fatal paths > > Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> Reviewed-by: Jan Beulich <jbeulich@suse.com>
© 2016 - 2024 Red Hat, Inc.