With LASS, vsyscall page accesses will cause a #GP instead of a #PF.
Separate out the core vsyscall emulation code from the #PF specific
handling in preparation for the upcoming #GP emulation.
No functional change intended.
Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
---
v2:
- No change
---
arch/x86/entry/vsyscall/vsyscall_64.c | 64 ++++++++++++++-------------
arch/x86/include/asm/vsyscall.h | 7 ++-
arch/x86/mm/fault.c | 2 +-
3 files changed, 37 insertions(+), 36 deletions(-)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 4bd1e271bb22..5c6559c37c5b 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -111,43 +111,13 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
}
}
-bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address)
+static bool __emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
long ret;
unsigned long orig_dx;
- /* Write faults or kernel-privilege faults never get fixed up. */
- if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
- return false;
-
- /*
- * Assume that faults at regs->ip are because of an
- * instruction fetch. Return early and avoid
- * emulation for faults during data accesses:
- */
- if (address != regs->ip) {
- /* Failed vsyscall read */
- if (vsyscall_mode == EMULATE)
- return false;
-
- /*
- * User code tried and failed to read the vsyscall page.
- */
- warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
- return false;
- }
-
- /*
- * X86_PF_INSTR is only set when NX is supported. When
- * available, use it to double-check that the emulation code
- * is only being used for instruction fetches:
- */
- if (cpu_feature_enabled(X86_FEATURE_NX))
- WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
-
/*
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
@@ -280,6 +250,38 @@ bool emulate_vsyscall(unsigned long error_code,
return true;
}
+bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
+ unsigned long address)
+{
+ /* Write faults or kernel-privilege faults never get fixed up. */
+ if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
+ return false;
+
+ /*
+ * Assume that faults at regs->ip are because of an instruction
+ * fetch. Return early and avoid emulation for faults during
+ * data accesses:
+ */
+ if (address != regs->ip) {
+ /* User code tried and failed to read the vsyscall page. */
+ if (vsyscall_mode != EMULATE)
+ warn_bad_vsyscall(KERN_INFO, regs,
+ "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
+
+ return false;
+ }
+
+ /*
+ * X86_PF_INSTR is only set when NX is supported. When
+ * available, use it to double-check that the emulation code
+ * is only being used for instruction fetches:
+ */
+ if (cpu_feature_enabled(X86_FEATURE_NX))
+ WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
+
+ return __emulate_vsyscall(regs, address);
+}
+
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 472f0263dbc6..f34902364972 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -14,12 +14,11 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
* Called on instruction fetch fault in vsyscall page.
* Returns true if handled.
*/
-extern bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address);
+bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs, unsigned long address);
#else
static inline void map_vsyscall(void) {}
-static inline bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address)
+static inline bool emulate_vsyscall_pf(unsigned long error_code,
+ struct pt_regs *regs, unsigned long address)
{
return false;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b83a06739b51..f0e77e084482 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1314,7 +1314,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* to consider the PF_PK bit.
*/
if (is_vsyscall_vaddr(address)) {
- if (emulate_vsyscall(error_code, regs, address))
+ if (emulate_vsyscall_pf(error_code, regs, address))
return;
}
#endif
--
2.43.0
On 2026-03-05 13:40, Sohil Mehta wrote:
> With LASS, vsyscall page accesses will cause a #GP instead of a #PF.
> Separate out the core vsyscall emulation code from the #PF specific
> handling in preparation for the upcoming #GP emulation.
>
> No functional change intended.
>
> Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
> Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
> ---
> v2:
> - No change
> ---
> arch/x86/entry/vsyscall/vsyscall_64.c | 64 ++++++++++++++-------------
> arch/x86/include/asm/vsyscall.h | 7 ++-
> arch/x86/mm/fault.c | 2 +-
> 3 files changed, 37 insertions(+), 36 deletions(-)
>
> diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
> index 4bd1e271bb22..5c6559c37c5b 100644
> --- a/arch/x86/entry/vsyscall/vsyscall_64.c
> +++ b/arch/x86/entry/vsyscall/vsyscall_64.c
> @@ -111,43 +111,13 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
> }
> }
>
> -bool emulate_vsyscall(unsigned long error_code,
> - struct pt_regs *regs, unsigned long address)
> +static bool __emulate_vsyscall(struct pt_regs *regs, unsigned long address)
> {
> unsigned long caller;
> int vsyscall_nr, syscall_nr, tmp;
> long ret;
> unsigned long orig_dx;
>
> - /* Write faults or kernel-privilege faults never get fixed up. */
> - if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
> - return false;
> -
> - /*
> - * Assume that faults at regs->ip are because of an
> - * instruction fetch. Return early and avoid
> - * emulation for faults during data accesses:
> - */
> - if (address != regs->ip) {
> - /* Failed vsyscall read */
> - if (vsyscall_mode == EMULATE)
> - return false;
> -
> - /*
> - * User code tried and failed to read the vsyscall page.
> - */
> - warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
> - return false;
> - }
> -
> - /*
> - * X86_PF_INSTR is only set when NX is supported. When
> - * available, use it to double-check that the emulation code
> - * is only being used for instruction fetches:
> - */
> - if (cpu_feature_enabled(X86_FEATURE_NX))
> - WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
> -
> /*
> * No point in checking CS -- the only way to get here is a user mode
> * trap to a high address, which means that we're in 64-bit user code.
> @@ -280,6 +250,38 @@ bool emulate_vsyscall(unsigned long error_code,
> return true;
> }
>
> +bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
> + unsigned long address)
> +{
> + /* Write faults or kernel-privilege faults never get fixed up. */
> + if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
> + return false;
I think this can be tightened further. If X86_PF_PK, X86_PF_SHSTK or
X86_PF_RSVD are set we should definitely not try to do any emulation, and I
believe the same is true for X86_PF_SGX or X86_PF_RMP; I'm not 100% as I don't
have the semantics of those bits in my head at the moment.
> + /*
> + * Assume that faults at regs->ip are because of an instruction
> + * fetch. Return early and avoid emulation for faults during
> + * data accesses:
> + */
> + if (address != regs->ip) {
> + /* User code tried and failed to read the vsyscall page. */
> + if (vsyscall_mode != EMULATE)
> + warn_bad_vsyscall(KERN_INFO, regs,
> + "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
> +
> + return false;
> + }
> +
I don't really like the reshuffling of the code here.
> + /*
> + * X86_PF_INSTR is only set when NX is supported. When
> + * available, use it to double-check that the emulation code
> + * is only being used for instruction fetches:
> + */
> + if (cpu_feature_enabled(X86_FEATURE_NX))
> + WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
> +
I realize this is the same as the previous code, but I really think this
should have a "return false;" in it as well.
> + return __emulate_vsyscall(regs, address);
> +}
> +
> /*
> * A pseudo VMA to allow ptrace access for the vsyscall page. This only
> * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
> diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
> index 472f0263dbc6..f34902364972 100644
> --- a/arch/x86/include/asm/vsyscall.h
> +++ b/arch/x86/include/asm/vsyscall.h
> @@ -14,12 +14,11 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
> * Called on instruction fetch fault in vsyscall page.
> * Returns true if handled.
> */
> -extern bool emulate_vsyscall(unsigned long error_code,
> - struct pt_regs *regs, unsigned long address);
> +bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs, unsigned long address);
> #else
> static inline void map_vsyscall(void) {}
> -static inline bool emulate_vsyscall(unsigned long error_code,
> - struct pt_regs *regs, unsigned long address)
> +static inline bool emulate_vsyscall_pf(unsigned long error_code,
> + struct pt_regs *regs, unsigned long address)
> {
> return false;
> }
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index b83a06739b51..f0e77e084482 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -1314,7 +1314,7 @@ void do_user_addr_fault(struct pt_regs *regs,
> * to consider the PF_PK bit.
> */
> if (is_vsyscall_vaddr(address)) {
> - if (emulate_vsyscall(error_code, regs, address))
> + if (emulate_vsyscall_pf(error_code, regs, address))
> return;
> }
> #endif
Other than the above minor nitpicks, looks good to me.
Reviewed-by: H. Peter Anvin (Intel) <hpa@zytor.com>
On 3/5/2026 2:36 PM, H. Peter Anvin wrote:
> On 2026-03-05 13:40, Sohil Mehta wrote:
>> +bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
>> + unsigned long address)
>> +{
>> + /* Write faults or kernel-privilege faults never get fixed up. */
>> + if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
>> + return false;
>
>
> I think this can be tightened further. If X86_PF_PK, X86_PF_SHSTK or
> X86_PF_RSVD are set we should definitely not try to do any emulation, and I
> believe the same is true for X86_PF_SGX or X86_PF_RMP; I'm not 100% as I don't
> have the semantics of those bits in my head at the moment.
>
Could some of this already be (or might need to be) taken care of in the
calling function do_user_addr_fault(). For example, I see comments such as:
* PKRU never rejects instruction fetches, so we don't need
* to consider the PF_PK bit.
* Read-only permissions can not be expressed in shadow stack PTEs.
* Treat all shadow stack accesses as WRITE faults.
I would prefer to avoid changing any logic for the existing #PF
emulation handling in this patch. If it's okay, we could pursue these as
a follow-on to this series.
>> + /*
>> + * Assume that faults at regs->ip are because of an instruction
>> + * fetch. Return early and avoid emulation for faults during
>> + * data accesses:
>> + */
>> + if (address != regs->ip) {
>> + /* User code tried and failed to read the vsyscall page. */
>> + if (vsyscall_mode != EMULATE)
>> + warn_bad_vsyscall(KERN_INFO, regs,
>> + "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
>> +
>> + return false;
>> + }
>> +
>
> I don't really like the reshuffling of the code here.
>
Sure, I'll keep the flow same as the original code. Will change it in
the next revision.
>> + /*
>> + * X86_PF_INSTR is only set when NX is supported. When
>> + * available, use it to double-check that the emulation code
>> + * is only being used for instruction fetches:
>> + */
>> + if (cpu_feature_enabled(X86_FEATURE_NX))
>> + WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
>> +
>
> I realize this is the same as the previous code, but I really think this
> should have a "return false;" in it as well.
>
Yes, returning early makes sense if the warning triggers. But we would
need to change the logic to:
if (cpu_feature_enabled(X86_FEATURE_NX) &&
WARN_ON_ONCE(!(error_code & X86_PF_INSTR)))
return false;
Again, I would like to avoid such a logic change in this patch as it
only focuses on code reorganization. Would it need to be backported?
Preferably I could send it out as a follow-on patch along with the other
tightening that you mentioned above. Your suggestions seem fine to me,
but I really want to understand and evaluate the changes before sending
them out.
>> + return __emulate_vsyscall(regs, address);
>> +}
>> +
>> /*
>
> Other than the above minor nitpicks, looks good to me.
>
> Reviewed-by: H. Peter Anvin (Intel) <hpa@zytor.com>
>
Thank you!
The following commit has been merged into the x86/cpu branch of tip:
Commit-ID: 5fe9c000008c56224b6f9a528f0cd8f0977ebe42
Gitweb: https://git.kernel.org/tip/5fe9c000008c56224b6f9a528f0cd8f0977ebe42
Author: Sohil Mehta <sohil.mehta@intel.com>
AuthorDate: Thu, 05 Mar 2026 13:40:22 -08:00
Committer: Dave Hansen <dave.hansen@linux.intel.com>
CommitterDate: Thu, 05 Mar 2026 13:49:25 -08:00
x86/vsyscall: Reorganize the page fault emulation code
With LASS, vsyscall page accesses will cause a #GP instead of a #PF.
Separate out the core vsyscall emulation code from the #PF specific
handling in preparation for the upcoming #GP emulation.
No functional change intended.
Signed-off-by: Sohil Mehta <sohil.mehta@intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://patch.msgid.link/20260305214026.3887452-2-sohil.mehta@intel.com
---
arch/x86/entry/vsyscall/vsyscall_64.c | 64 +++++++++++++-------------
arch/x86/include/asm/vsyscall.h | 7 +--
arch/x86/mm/fault.c | 2 +-
3 files changed, 37 insertions(+), 36 deletions(-)
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
index 4bd1e27..5c6559c 100644
--- a/arch/x86/entry/vsyscall/vsyscall_64.c
+++ b/arch/x86/entry/vsyscall/vsyscall_64.c
@@ -111,43 +111,13 @@ static bool write_ok_or_segv(unsigned long ptr, size_t size)
}
}
-bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address)
+static bool __emulate_vsyscall(struct pt_regs *regs, unsigned long address)
{
unsigned long caller;
int vsyscall_nr, syscall_nr, tmp;
long ret;
unsigned long orig_dx;
- /* Write faults or kernel-privilege faults never get fixed up. */
- if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
- return false;
-
- /*
- * Assume that faults at regs->ip are because of an
- * instruction fetch. Return early and avoid
- * emulation for faults during data accesses:
- */
- if (address != regs->ip) {
- /* Failed vsyscall read */
- if (vsyscall_mode == EMULATE)
- return false;
-
- /*
- * User code tried and failed to read the vsyscall page.
- */
- warn_bad_vsyscall(KERN_INFO, regs, "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
- return false;
- }
-
- /*
- * X86_PF_INSTR is only set when NX is supported. When
- * available, use it to double-check that the emulation code
- * is only being used for instruction fetches:
- */
- if (cpu_feature_enabled(X86_FEATURE_NX))
- WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
-
/*
* No point in checking CS -- the only way to get here is a user mode
* trap to a high address, which means that we're in 64-bit user code.
@@ -280,6 +250,38 @@ sigsegv:
return true;
}
+bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs,
+ unsigned long address)
+{
+ /* Write faults or kernel-privilege faults never get fixed up. */
+ if ((error_code & (X86_PF_WRITE | X86_PF_USER)) != X86_PF_USER)
+ return false;
+
+ /*
+ * Assume that faults at regs->ip are because of an instruction
+ * fetch. Return early and avoid emulation for faults during
+ * data accesses:
+ */
+ if (address != regs->ip) {
+ /* User code tried and failed to read the vsyscall page. */
+ if (vsyscall_mode != EMULATE)
+ warn_bad_vsyscall(KERN_INFO, regs,
+ "vsyscall read attempt denied -- look up the vsyscall kernel parameter if you need a workaround");
+
+ return false;
+ }
+
+ /*
+ * X86_PF_INSTR is only set when NX is supported. When
+ * available, use it to double-check that the emulation code
+ * is only being used for instruction fetches:
+ */
+ if (cpu_feature_enabled(X86_FEATURE_NX))
+ WARN_ON_ONCE(!(error_code & X86_PF_INSTR));
+
+ return __emulate_vsyscall(regs, address);
+}
+
/*
* A pseudo VMA to allow ptrace access for the vsyscall page. This only
* covers the 64bit vsyscall page now. 32bit has a real VMA now and does
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
index 472f026..f349023 100644
--- a/arch/x86/include/asm/vsyscall.h
+++ b/arch/x86/include/asm/vsyscall.h
@@ -14,12 +14,11 @@ extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
* Called on instruction fetch fault in vsyscall page.
* Returns true if handled.
*/
-extern bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address);
+bool emulate_vsyscall_pf(unsigned long error_code, struct pt_regs *regs, unsigned long address);
#else
static inline void map_vsyscall(void) {}
-static inline bool emulate_vsyscall(unsigned long error_code,
- struct pt_regs *regs, unsigned long address)
+static inline bool emulate_vsyscall_pf(unsigned long error_code,
+ struct pt_regs *regs, unsigned long address)
{
return false;
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index b83a067..f0e77e0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1314,7 +1314,7 @@ void do_user_addr_fault(struct pt_regs *regs,
* to consider the PF_PK bit.
*/
if (is_vsyscall_vaddr(address)) {
- if (emulate_vsyscall(error_code, regs, address))
+ if (emulate_vsyscall_pf(error_code, regs, address))
return;
}
#endif
© 2016 - 2026 Red Hat, Inc.