arch/arm64/mm/mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-)
cpu_install_idmap replaces ttbr0 to install idmap. replace_phys uses
idmap_cpu_replace_ttbr1 which is located in idmap,
thus it requires ttbr0 to point idmap.
However, if a kernel task has switched out right after
cpu_install_idmap, when kernel task is loaded, its ttbr0 is
mapped to reserved_pg_dir which doesn't contain any idmap
functions.
Therefore, the kernel task cannot find any functions to run,
causing memory fault.
As far as I can verify from the current arm64 tree,
no in-tree caller invokes this function from a preemptible context.
Nevertheless, future work might allow this function to be called in
preemptive state. Encapsulating the idmap sequence with
preempt_disable() and preempt_enable() prevents this possible problem.
Signed-off-by: Jeongin Yeo <leo.yeo@luaberry.com>
---
arch/arm64/mm/mmu.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 2ba01dc8ef82..c3da836ebe8d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -29,6 +29,7 @@
#include <linux/mm_inline.h>
#include <linux/pagewalk.h>
#include <linux/stop_machine.h>
+#include <linux/preempt.h>
#include <asm/barrier.h>
#include <asm/cputype.h>
@@ -2136,7 +2137,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
ttbr1 |= TTBR_CNP_BIT;
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
-
+ preempt_disable();
cpu_install_idmap();
/*
@@ -2148,6 +2149,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp)
local_daif_restore(daif);
cpu_uninstall_idmap();
+ preempt_enable();
}
#ifdef CONFIG_ARCH_HAS_PKEYS
--
2.43.0
On Tue, Dec 02, 2025 at 12:42:23AM +0000, Jeongin Yeo wrote: > cpu_install_idmap replaces ttbr0 to install idmap. replace_phys uses > idmap_cpu_replace_ttbr1 which is located in idmap, > thus it requires ttbr0 to point idmap. > > However, if a kernel task has switched out right after > cpu_install_idmap, when kernel task is loaded, its ttbr0 is > mapped to reserved_pg_dir which doesn't contain any idmap > functions. > Therefore, the kernel task cannot find any functions to run, > causing memory fault. > > As far as I can verify from the current arm64 tree, > no in-tree caller invokes this function from a preemptible context. > > Nevertheless, future work might allow this function to be called in > preemptive state. Encapsulating the idmap sequence with > preempt_disable() and preempt_enable() prevents this possible problem. > > Signed-off-by: Jeongin Yeo <leo.yeo@luaberry.com> > --- > arch/arm64/mm/mmu.c | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index 2ba01dc8ef82..c3da836ebe8d 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -29,6 +29,7 @@ > #include <linux/mm_inline.h> > #include <linux/pagewalk.h> > #include <linux/stop_machine.h> > +#include <linux/preempt.h> > > #include <asm/barrier.h> > #include <asm/cputype.h> > @@ -2136,7 +2137,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) > ttbr1 |= TTBR_CNP_BIT; > > replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); > - > + preempt_disable(); > cpu_install_idmap(); > > /* > @@ -2148,6 +2149,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) > local_daif_restore(daif); > > cpu_uninstall_idmap(); > + preempt_enable(); > } __cpu_replace_ttbr1() is a _very_ low-level helper with only a handful of arch-specific callers and so we shouldn't be worrying about random functions calling it with preemption enabled. Will
On Tue, Dec 2, 2025 at 9:43 AM Jeongin Yeo <leo.yeo@luaberry.com> wrote: > > cpu_install_idmap replaces ttbr0 to install idmap. replace_phys uses > idmap_cpu_replace_ttbr1 which is located in idmap, > thus it requires ttbr0 to point idmap. > > However, if a kernel task has switched out right after > cpu_install_idmap, when kernel task is loaded, its ttbr0 is > mapped to reserved_pg_dir which doesn't contain any idmap > functions. > Therefore, the kernel task cannot find any functions to run, > causing memory fault. > > As far as I can verify from the current arm64 tree, > no in-tree caller invokes this function from a preemptible context. > > Nevertheless, future work might allow this function to be called in > preemptive state. Encapsulating the idmap sequence with > preempt_disable() and preempt_enable() prevents this possible problem. Hi, Gentle ping on this patch (sent on Dec 1). Is there any feedback or concerns I should address? Thanks. > > Signed-off-by: Jeongin Yeo <leo.yeo@luaberry.com> > --- > arch/arm64/mm/mmu.c | 4 +++- > 1 file changed, 3 insertions(+), 1 deletion(-) > > diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c > index 2ba01dc8ef82..c3da836ebe8d 100644 > --- a/arch/arm64/mm/mmu.c > +++ b/arch/arm64/mm/mmu.c > @@ -29,6 +29,7 @@ > #include <linux/mm_inline.h> > #include <linux/pagewalk.h> > #include <linux/stop_machine.h> > +#include <linux/preempt.h> > > #include <asm/barrier.h> > #include <asm/cputype.h> > @@ -2136,7 +2137,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) > ttbr1 |= TTBR_CNP_BIT; > > replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); > - > + preempt_disable(); > cpu_install_idmap(); > > /* > @@ -2148,6 +2149,7 @@ void __cpu_replace_ttbr1(pgd_t *pgdp, bool cnp) > local_daif_restore(daif); > > cpu_uninstall_idmap(); > + preempt_enable(); > } > > #ifdef CONFIG_ARCH_HAS_PKEYS > -- > 2.43.0 >
© 2016 - 2026 Red Hat, Inc.