The legacy vsyscall page is mapped at a fixed address in the kernel
address range 0xffffffffff600000-0xffffffffff601000. Prior to LASS, a
vsyscall page access from userspace would always generate a #PF. The
kernel emulates the execute (XONLY) accesses in the #PF handler and
returns the appropriate values to userspace.
With LASS, these accesses are intercepted before the paging structures
are traversed triggering a #GP instead of a #PF. However, the #GP
doesn't provide much information in terms of the error code.
Emulate the vsyscall access without going through complex instruction
decoding. Use the faulting RIP which is preserved in the user registers
to determine if the #GP was triggered due to a vsyscall access.
Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
---
v10:
- No change.
---
arch/x86/entry/vsyscall/vsyscall_64.c | 14 +++++++++++++-
arch/x86/include/asm/vsyscall.h | 6 ++++++
arch/x86/kernel/traps.c | 4 ++++
3 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 4c3f49bf39e6..ff319d7e778c 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -23,7 +23,7 @@
* soon be no new userspace code that will ever use a vsyscall.
*
* The code in this file emulates vsyscalls when notified of a page
- * fault to a vsyscall address.
+ * fault or a general protection fault to a vsyscall address.
*/
#include <linux/kernel.h>
@@ -282,6 +282,18 @@ bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
return __emulate_vsyscall(regs, address);
}
+bool emulate_vsyscall_gp(struct pt_regs *regs)
+{
+ if (!cpu_feature_enabled(X86_FEATURE_LASS))
+ return false;
+
+ /* Emulate only if the RIP points to the vsyscall address */
+ if (!is_vsyscall_vaddr(regs->ip))
+ return false;
+
+ return __emulate_vsyscall(regs, regs->ip);
+}
+
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index f34902364972..538053b1656a 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -15,6 +15,7 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
* Returns true if handled.
*/
bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs, unsigned long address);
+bool emulate_vsyscall_gp(struct pt_regs *regs);
#else
static inline void map_vsyscall(void) {}
static inline bool emulate_vsyscall_pf(unsigned long error_code,
@@ -22,6 +23,11 @@ static inline bool emulate_vsyscall_pf(unsigned long error_code,
{
return false;
}
+
+static inline bool emulate_vsyscall_gp(struct pt_regs *regs)
+{
+ return false;
+}
#endif
/*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 25b45193eb19..59bfbdf0a1a0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -69,6 +69,7 @@
#include <asm/tdx.h>
#include <asm/cfi.h>
#include <asm/msr.h>
+#include <asm/vsyscall.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -817,6 +818,9 @@ DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
if (fixup_umip_exception(regs))
goto exit;
+ if (emulate_vsyscall_gp(regs))
+ goto exit;
+
gp_user_force_sig_segv(regs, X86_TRAP_GP, error_code, desc);
goto exit;
}
--
2.43.0