arch/x86/entry/syscall_64.c | 9 ++++++++- kernel/kallsyms.c | 3 +++ security/Kconfig | 8 ++++++++ 3 files changed, 19 insertions(+), 1 deletion(-)
From: Elly I Esparza <ellyesparza8@gmail.com>
Blacklist 'x64_sys_call()' from being kprobed to prevent syscall hooking
techniques that overwrite the content of a 'case' block inside the main
syscall dispatch switch statement.
Also blacklist 'kallsyms_lookup_name()' to prevent a potential bypass
of the blacklist, since this function can be used to discover and target
arbitrary kernel symbols.
Add a Kconfig option under security/ to enable or disable this feature.
Signed-off-by: Elly I Esparza <ellyesparza8@gmail.com>
---
This repo shows how kprobes without this blacklist entries could be
exploited: https://github.com/1337-42/FlipSwitch-dev/
---
arch/x86/entry/syscall_64.c | 9 ++++++++-
kernel/kallsyms.c | 3 +++
security/Kconfig | 8 ++++++++
3 files changed, 19 insertions(+), 1 deletion(-)
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index b6e68ea98b83..e878fa865532 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* 64-bit system call dispatch */
+#include <linux/kprobes.h>
#include <linux/linkage.h>
#include <linux/sys.h>
#include <linux/cache.h>
@@ -39,6 +40,9 @@ long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
default: return __x64_sys_ni_syscall(regs);
}
}
+#ifdef CONFIG_SEC_KPROBES
+NOKPROBE_SYMBOL(x64_sys_call)
+#endif
#ifdef CONFIG_X86_X32_ABI
long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
@@ -48,7 +52,10 @@ long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
default: return __x64_sys_ni_syscall(regs);
}
}
-#endif
+#ifdef CONFIG_SEC_KPROBES
+NOKPROBE_SYMBOL(x32_sys_call)
+#endif //CONFIG_SEC_KPROBES
+#endif //CONFIG_X86_X32_ABI
static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
{
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 1e7635864124..4082e98681a4 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -234,6 +234,9 @@ unsigned long kallsyms_lookup_name(const char *name)
return module_kallsyms_lookup_name(name);
}
+#ifdef CONFIG_SEC_KPROBES
+NOKPROBE_SYMBOL(kallsyms_lookup_name)
+#endif
/*
* Iterate over all symbols in vmlinux. For symbols from modules use
diff --git a/security/Kconfig b/security/Kconfig
index 285f284dfcac..a0906a5a7410 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -286,5 +286,13 @@ config LSM
source "security/Kconfig.hardening"
+config SEC_KPROBES
+ bool "Secure kprobe blacklist"
+ depends on KPROBES
+ help
+ Adds functions x64_syscall_table() and kallsyms_lookup_name()
+ to the kprobe blacklist to prevent syscall hooking attacks.
+ If in doubt, say "N".
+
endmenu
--
2.43.0
On Thu, Dec 04, 2025 at 11:41:41AM -0300, ellyndra wrote: > From: Elly I Esparza <ellyesparza8@gmail.com> > > Blacklist 'x64_sys_call()' from being kprobed to prevent syscall hooking > techniques that overwrite the content of a 'case' block inside the main > syscall dispatch switch statement. > > Also blacklist 'kallsyms_lookup_name()' to prevent a potential bypass > of the blacklist, since this function can be used to discover and target > arbitrary kernel symbols. > > Add a Kconfig option under security/ to enable or disable this feature. > > Signed-off-by: Elly I Esparza <ellyesparza8@gmail.com> > --- I'd be okay doing this unconditionally. Pretty much everything else until that point lives in noinstr which is already excluded from probes.
On Thu, Dec 04, 2025 at 03:50:13PM +0100, Peter Zijlstra wrote:
> On Thu, Dec 04, 2025 at 11:41:41AM -0300, ellyndra wrote:
> > From: Elly I Esparza <ellyesparza8@gmail.com>
> >
> > Blacklist 'x64_sys_call()' from being kprobed to prevent syscall hooking
> > techniques that overwrite the content of a 'case' block inside the main
> > syscall dispatch switch statement.
> >
> > Also blacklist 'kallsyms_lookup_name()' to prevent a potential bypass
> > of the blacklist, since this function can be used to discover and target
> > arbitrary kernel symbols.
> >
> > Add a Kconfig option under security/ to enable or disable this feature.
> >
> > Signed-off-by: Elly I Esparza <ellyesparza8@gmail.com>
> > ---
>
> I'd be okay doing this unconditionally. Pretty much everything else
> until that point lives in noinstr which is already excluded from probes.
How about something like this (completely untested) instead?
---
arch/x86/entry/syscall_32.c | 6 ++++--
arch/x86/entry/syscall_64.c | 10 ++++++----
arch/x86/include/asm/syscall.h | 8 --------
3 files changed, 10 insertions(+), 14 deletions(-)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
index a67a644d0cfe..6451550df027 100644
--- a/arch/x86/entry/syscall_32.c
+++ b/arch/x86/entry/syscall_32.c
@@ -41,7 +41,7 @@ const sys_call_ptr_t sys_call_table[] = {
#endif
#define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
-long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
+static __always_inline long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
{
switch (nr) {
#include <asm/syscalls_32.h>
@@ -70,7 +70,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline);
/*
* Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
*/
-static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
+static noinstr void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
{
/*
* Convert negative numbers to very high and thus out of range
@@ -78,12 +78,14 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
*/
unsigned int unr = nr;
+ instrumentation_begin();
if (likely(unr < IA32_NR_syscalls)) {
unr = array_index_nospec(unr, IA32_NR_syscalls);
regs->ax = ia32_sys_call(regs, unr);
} else if (nr != -1) {
regs->ax = __ia32_sys_ni_syscall(regs);
}
+ instrumentation_end();
}
#ifdef CONFIG_IA32_EMULATION
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
index b6e68ea98b83..ce9ded561354 100644
--- a/arch/x86/entry/syscall_64.c
+++ b/arch/x86/entry/syscall_64.c
@@ -32,7 +32,7 @@ const sys_call_ptr_t sys_call_table[] = {
#undef __SYSCALL
#define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
-long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
+static __always_inline long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
{
switch (nr) {
#include <asm/syscalls_64.h>
@@ -40,15 +40,17 @@ long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
}
}
-#ifdef CONFIG_X86_X32_ABI
-long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
+static __always_inline long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
{
+#ifdef CONFIG_X86_X32_ABI
switch (nr) {
#include <asm/syscalls_x32.h>
default: return __x64_sys_ni_syscall(regs);
}
-}
+#else
+ return __x64_sys_ni_syscall(regs);
#endif
+}
static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
{
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index c10dbb74cd00..59a406074dc0 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -20,14 +20,6 @@
typedef long (*sys_call_ptr_t)(const struct pt_regs *);
extern const sys_call_ptr_t sys_call_table[];
-/*
- * These may not exist, but still put the prototypes in so we
- * can use IS_ENABLED().
- */
-extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
-extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
-extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
-
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
* This importantly ignores the high bits on 64-bit, so comparisons
On Mon, Dec 8, 2025 at 12:01 PM Peter Zijlstra <peterz@infradead.org> wrote:
> On Thu, Dec 04, 2025 at 03:50:13PM +0100, Peter Zijlstra wrote:
> > On Thu, Dec 04, 2025 at 11:41:41AM -0300, ellyndra wrote:
> > > From: Elly I Esparza <ellyesparza8@gmail.com>
> > >
> > > Blacklist 'x64_sys_call()' from being kprobed to prevent syscall hooking
> > > techniques that overwrite the content of a 'case' block inside the main
> > > syscall dispatch switch statement.
> > >
> > > Also blacklist 'kallsyms_lookup_name()' to prevent a potential bypass
> > > of the blacklist, since this function can be used to discover and target
> > > arbitrary kernel symbols.
> > >
> > > Add a Kconfig option under security/ to enable or disable this feature.
> > >
> > > Signed-off-by: Elly I Esparza <ellyesparza8@gmail.com>
> > > ---
> >
> > I'd be okay doing this unconditionally. Pretty much everything else
> > until that point lives in noinstr which is already excluded from probes.
>
> How about something like this (completely untested) instead?
>
I compiled it on an x86_64 with CONFIG_IA32_EMULATION=y, it does
prevent ia32_sys_call() and x64_sys_call() both from being kprobed and
being accessed via kallsyms_lookup_name().
> ---
> arch/x86/entry/syscall_32.c | 6 ++++--
> arch/x86/entry/syscall_64.c | 10 ++++++----
> arch/x86/include/asm/syscall.h | 8 --------
> 3 files changed, 10 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
> index a67a644d0cfe..6451550df027 100644
> --- a/arch/x86/entry/syscall_32.c
> +++ b/arch/x86/entry/syscall_32.c
> @@ -41,7 +41,7 @@ const sys_call_ptr_t sys_call_table[] = {
> #endif
>
> #define __SYSCALL(nr, sym) case nr: return __ia32_##sym(regs);
> -long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
> +static __always_inline long ia32_sys_call(const struct pt_regs *regs, unsigned int nr)
> {
> switch (nr) {
> #include <asm/syscalls_32.h>
> @@ -70,7 +70,7 @@ early_param("ia32_emulation", ia32_emulation_override_cmdline);
> /*
> * Invoke a 32-bit syscall. Called with IRQs on in CT_STATE_KERNEL.
> */
> -static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
> +static noinstr void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
> {
> /*
> * Convert negative numbers to very high and thus out of range
> @@ -78,12 +78,14 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs, int nr)
> */
> unsigned int unr = nr;
>
> + instrumentation_begin();
> if (likely(unr < IA32_NR_syscalls)) {
> unr = array_index_nospec(unr, IA32_NR_syscalls);
> regs->ax = ia32_sys_call(regs, unr);
> } else if (nr != -1) {
> regs->ax = __ia32_sys_ni_syscall(regs);
> }
> + instrumentation_end();
> }
>
> #ifdef CONFIG_IA32_EMULATION
> diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
> index b6e68ea98b83..ce9ded561354 100644
> --- a/arch/x86/entry/syscall_64.c
> +++ b/arch/x86/entry/syscall_64.c
> @@ -32,7 +32,7 @@ const sys_call_ptr_t sys_call_table[] = {
> #undef __SYSCALL
>
> #define __SYSCALL(nr, sym) case nr: return __x64_##sym(regs);
> -long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
> +static __always_inline long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
> {
> switch (nr) {
> #include <asm/syscalls_64.h>
> @@ -40,15 +40,17 @@ long x64_sys_call(const struct pt_regs *regs, unsigned int nr)
> }
> }
>
> -#ifdef CONFIG_X86_X32_ABI
> -long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
> +static __always_inline long x32_sys_call(const struct pt_regs *regs, unsigned int nr)
> {
> +#ifdef CONFIG_X86_X32_ABI
> switch (nr) {
> #include <asm/syscalls_x32.h>
> default: return __x64_sys_ni_syscall(regs);
> }
> -}
> +#else
> + return __x64_sys_ni_syscall(regs);
> #endif
> +}
>
> static __always_inline bool do_syscall_x64(struct pt_regs *regs, int nr)
> {
> diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
> index c10dbb74cd00..59a406074dc0 100644
> --- a/arch/x86/include/asm/syscall.h
> +++ b/arch/x86/include/asm/syscall.h
> @@ -20,14 +20,6 @@
> typedef long (*sys_call_ptr_t)(const struct pt_regs *);
> extern const sys_call_ptr_t sys_call_table[];
>
> -/*
> - * These may not exist, but still put the prototypes in so we
> - * can use IS_ENABLED().
> - */
> -extern long ia32_sys_call(const struct pt_regs *, unsigned int nr);
> -extern long x32_sys_call(const struct pt_regs *, unsigned int nr);
> -extern long x64_sys_call(const struct pt_regs *, unsigned int nr);
> -
> /*
> * Only the low 32 bits of orig_ax are meaningful, so we return int.
> * This importantly ignores the high bits on 64-bit, so comparisons
© 2016 - 2026 Red Hat, Inc.