arch/loongarch/Kconfig | 1 + arch/loongarch/kernel/syscall.c | 17 ++++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-)
Add support of kernel stack offset randomization while handling syscall,
the offset is defaultly limited by KSTACK_OFFSET_MAX().
In order to avoid trigger stack canaries (due to __builtin_alloca) and
slowing down the entry path, use __no_stack_protector attribute to
disable stack protector for do_syscall() at function level.
With this patch, the REPORT_STACK test show that:
`loongarch64 bits of stack entropy: 7`
Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
Suggested-by: Huacai Chen <chenhuacai@kernel.org>
---
v2:
- get_random_u16() -> drdtime().
- Add Suggested-by.
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/kernel/syscall.c | 17 ++++++++++++++++-
2 files changed, 17 insertions(+), 1 deletion(-)
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index ddc042895d01..fcf6451b4e38 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -106,6 +106,7 @@ config LOONGARCH
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
index ec17cd5163b7..9df81197a09b 100644
--- a/arch/loongarch/kernel/syscall.c
+++ b/arch/loongarch/kernel/syscall.c
@@ -9,11 +9,13 @@
#include <linux/entry-common.h>
#include <linux/errno.h>
#include <linux/linkage.h>
+#include <linux/randomize_kstack.h>
#include <linux/syscalls.h>
#include <linux/unistd.h>
#include <asm/asm.h>
#include <asm/exception.h>
+#include <asm/loongarch.h>
#include <asm/signal.h>
#include <asm/switch_to.h>
#include <asm-generic/syscalls.h>
@@ -39,7 +41,7 @@ void *sys_call_table[__NR_syscalls] = {
typedef long (*sys_call_fn)(unsigned long, unsigned long,
unsigned long, unsigned long, unsigned long, unsigned long);
-void noinstr do_syscall(struct pt_regs *regs)
+__no_stack_protector void noinstr do_syscall(struct pt_regs *regs)
{
unsigned long nr;
sys_call_fn syscall_fn;
@@ -55,11 +57,24 @@ void noinstr do_syscall(struct pt_regs *regs)
nr = syscall_enter_from_user_mode(regs, nr);
+ add_random_kstack_offset();
+
if (nr < NR_syscalls) {
syscall_fn = sys_call_table[nr];
regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
regs->regs[7], regs->regs[8], regs->regs[9]);
}
+ /*
+ * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
+ * bits. The actual entropy will be further reduced by the compiler
+ * when applying stack alignment constraints: 16-byte (i.e. 4-bit)
+ * aligned, which will remove the 4 low bits from any entropy chosen
+ * here.
+ *
+ * The resulting 6 bits of entropy is seen in SP[9:4].
+ */
+ choose_random_kstack_offset(drdtime());
+
syscall_exit_to_user_mode(regs);
}
--
2.34.1
On Fri, Jul 19, 2024 at 11:14:27AM +0800, Jinjie Ruan wrote: > Add support of kernel stack offset randomization while handling syscall, > the offset is defaultly limited by KSTACK_OFFSET_MAX(). > > In order to avoid trigger stack canaries (due to __builtin_alloca) and > slowing down the entry path, use __no_stack_protector attribute to > disable stack protector for do_syscall() at function level. > > With this patch, the REPORT_STACK test show that: > `loongarch64 bits of stack entropy: 7` I suspect this will report the correct "6" after now that this commit has landed: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=872bb37f6829d4f7f3ed5afe2786add3d4384b4b > > Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> > Suggested-by: Huacai Chen <chenhuacai@kernel.org> Thanks for adding this and getting it tested! Reviewed-by: Kees Cook <kees@kernel.org> -- Kees Cook
On 2024/7/20 0:01, Kees Cook wrote: > On Fri, Jul 19, 2024 at 11:14:27AM +0800, Jinjie Ruan wrote: >> Add support of kernel stack offset randomization while handling syscall, >> the offset is defaultly limited by KSTACK_OFFSET_MAX(). >> >> In order to avoid trigger stack canaries (due to __builtin_alloca) and >> slowing down the entry path, use __no_stack_protector attribute to >> disable stack protector for do_syscall() at function level. >> >> With this patch, the REPORT_STACK test show that: >> `loongarch64 bits of stack entropy: 7` > > I suspect this will report the correct "6" after now that this commit > has landed: > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=872bb37f6829d4f7f3ed5afe2786add3d4384b4b Hi, Kees I noticed your patch, and I reconfirm that I have updated to the latest mainline and that your patch is in the code. However,the following REPORT_STACK test of your below script has the same result (run multiple times). And riscv64, arm64, x86 also has the 7 bit of stack entropy. for i in $(seq 1 1000); do echo "REPORT_STACK" >/sys/kernel/debug/provoke-crash/DIRECT done offsets=$(dmesg | grep 'Stack offset' | cut -d: -f3 | sort | uniq -c | sort -n | wc -l) echo "$(uname -m) bits of stack entropy: $(echo "obase=2; $offsets" | bc | wc -L)" > >> >> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com> >> Suggested-by: Huacai Chen <chenhuacai@kernel.org> > > Thanks for adding this and getting it tested! > > Reviewed-by: Kees Cook <kees@kernel.org> >
On Sat, Jul 20, 2024 at 10:52:06AM +0800, Jinjie Ruan wrote: > > > On 2024/7/20 0:01, Kees Cook wrote: > > On Fri, Jul 19, 2024 at 11:14:27AM +0800, Jinjie Ruan wrote: > >> Add support of kernel stack offset randomization while handling syscall, > >> the offset is defaultly limited by KSTACK_OFFSET_MAX(). > >> > >> In order to avoid trigger stack canaries (due to __builtin_alloca) and > >> slowing down the entry path, use __no_stack_protector attribute to > >> disable stack protector for do_syscall() at function level. > >> > >> With this patch, the REPORT_STACK test show that: > >> `loongarch64 bits of stack entropy: 7` > > > > I suspect this will report the correct "6" after now that this commit > > has landed: > > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=872bb37f6829d4f7f3ed5afe2786add3d4384b4b > > Hi, Kees > > I noticed your patch, and I reconfirm that I have updated to the latest > mainline and that your patch is in the code. > > However,the following REPORT_STACK test of your below script has the > same result (run multiple times). > > And riscv64, arm64, x86 also has the 7 bit of stack entropy. Okay, thanks for checking! I may go take a closer look if I have time. It'd only be a problem if the distribution isn't sufficiently even. -Kees -- Kees Cook
Applied with small changes, thanks.
https://github.com/chenhuacai/linux/commit/a55a07439613689f0890c9482b9c5f76dec255dd
Huacai
On Fri, Jul 19, 2024 at 11:09 AM Jinjie Ruan <ruanjinjie@huawei.com> wrote:
>
> Add support of kernel stack offset randomization while handling syscall,
> the offset is defaultly limited by KSTACK_OFFSET_MAX().
>
> In order to avoid trigger stack canaries (due to __builtin_alloca) and
> slowing down the entry path, use __no_stack_protector attribute to
> disable stack protector for do_syscall() at function level.
>
> With this patch, the REPORT_STACK test show that:
> `loongarch64 bits of stack entropy: 7`
>
> Signed-off-by: Jinjie Ruan <ruanjinjie@huawei.com>
> Suggested-by: Huacai Chen <chenhuacai@kernel.org>
> ---
> v2:
> - get_random_u16() -> drdtime().
> - Add Suggested-by.
> ---
> arch/loongarch/Kconfig | 1 +
> arch/loongarch/kernel/syscall.c | 17 ++++++++++++++++-
> 2 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
> index ddc042895d01..fcf6451b4e38 100644
> --- a/arch/loongarch/Kconfig
> +++ b/arch/loongarch/Kconfig
> @@ -106,6 +106,7 @@ config LOONGARCH
> select HAVE_ARCH_KFENCE
> select HAVE_ARCH_KGDB if PERF_EVENTS
> select HAVE_ARCH_MMAP_RND_BITS if MMU
> + select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
> select HAVE_ARCH_SECCOMP
> select HAVE_ARCH_SECCOMP_FILTER
> select HAVE_ARCH_TRACEHOOK
> diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
> index ec17cd5163b7..9df81197a09b 100644
> --- a/arch/loongarch/kernel/syscall.c
> +++ b/arch/loongarch/kernel/syscall.c
> @@ -9,11 +9,13 @@
> #include <linux/entry-common.h>
> #include <linux/errno.h>
> #include <linux/linkage.h>
> +#include <linux/randomize_kstack.h>
> #include <linux/syscalls.h>
> #include <linux/unistd.h>
>
> #include <asm/asm.h>
> #include <asm/exception.h>
> +#include <asm/loongarch.h>
> #include <asm/signal.h>
> #include <asm/switch_to.h>
> #include <asm-generic/syscalls.h>
> @@ -39,7 +41,7 @@ void *sys_call_table[__NR_syscalls] = {
> typedef long (*sys_call_fn)(unsigned long, unsigned long,
> unsigned long, unsigned long, unsigned long, unsigned long);
>
> -void noinstr do_syscall(struct pt_regs *regs)
> +__no_stack_protector void noinstr do_syscall(struct pt_regs *regs)
> {
> unsigned long nr;
> sys_call_fn syscall_fn;
> @@ -55,11 +57,24 @@ void noinstr do_syscall(struct pt_regs *regs)
>
> nr = syscall_enter_from_user_mode(regs, nr);
>
> + add_random_kstack_offset();
> +
> if (nr < NR_syscalls) {
> syscall_fn = sys_call_table[nr];
> regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
> regs->regs[7], regs->regs[8], regs->regs[9]);
> }
>
> + /*
> + * This value will get limited by KSTACK_OFFSET_MAX(), which is 10
> + * bits. The actual entropy will be further reduced by the compiler
> + * when applying stack alignment constraints: 16-byte (i.e. 4-bit)
> + * aligned, which will remove the 4 low bits from any entropy chosen
> + * here.
> + *
> + * The resulting 6 bits of entropy is seen in SP[9:4].
> + */
> + choose_random_kstack_offset(drdtime());
> +
> syscall_exit_to_user_mode(regs);
> }
> --
> 2.34.1
>
© 2016 - 2025 Red Hat, Inc.