Historically the fast-path static key `kasan_flag_enabled` existed
only for `CONFIG_KASAN_HW_TAGS`. Generic and SW_TAGS either relied on
`kasan_arch_is_ready()` or evaluated KASAN checks unconditionally.
As a result every architecture had to toggle a private flag
in its `kasan_init()`.
This patch turns the flag into a single global runtime predicate that
is built for every `CONFIG_KASAN` mode and adds a helper that flips
the key once KASAN is ready.
Closes: https://bugzilla.kernel.org/show_bug.cgi?id=218315
Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
---
include/linux/kasan-enabled.h | 22 ++++++++++++++++------
include/linux/kasan.h | 6 ++++++
mm/kasan/common.c | 7 +++++++
mm/kasan/generic.c | 11 +++++++++++
mm/kasan/hw_tags.c | 7 -------
mm/kasan/sw_tags.c | 2 ++
6 files changed, 42 insertions(+), 13 deletions(-)
diff --git a/include/linux/kasan-enabled.h b/include/linux/kasan-enabled.h
index 6f612d69ea0c..2436eb45cfee 100644
--- a/include/linux/kasan-enabled.h
+++ b/include/linux/kasan-enabled.h
@@ -4,8 +4,12 @@
#include <linux/static_key.h>
-#ifdef CONFIG_KASAN_HW_TAGS
+#ifdef CONFIG_KASAN
+/*
+ * Global runtime flag. Starts ‘false’; switched to ‘true’ by
+ * the appropriate kasan_init_*() once KASAN is fully initialized.
+ */
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
static __always_inline bool kasan_enabled(void)
@@ -13,18 +17,24 @@ static __always_inline bool kasan_enabled(void)
return static_branch_likely(&kasan_flag_enabled);
}
-static inline bool kasan_hw_tags_enabled(void)
+#else /* !CONFIG_KASAN */
+
+static __always_inline bool kasan_enabled(void)
{
- return kasan_enabled();
+ return false;
}
-#else /* CONFIG_KASAN_HW_TAGS */
+#endif /* CONFIG_KASAN */
-static inline bool kasan_enabled(void)
+#ifdef CONFIG_KASAN_HW_TAGS
+
+static inline bool kasan_hw_tags_enabled(void)
{
- return IS_ENABLED(CONFIG_KASAN);
+ return kasan_enabled();
}
+#else /* !CONFIG_KASAN_HW_TAGS */
+
static inline bool kasan_hw_tags_enabled(void)
{
return false;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 890011071f2b..51a8293d1af6 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -543,6 +543,12 @@ void kasan_report_async(void);
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN_GENERIC
+void __init kasan_init_generic(void);
+#else
+static inline void kasan_init_generic(void) { }
+#endif
+
#ifdef CONFIG_KASAN_SW_TAGS
void __init kasan_init_sw_tags(void);
#else
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index ed4873e18c75..525194da25fa 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -32,6 +32,13 @@
#include "kasan.h"
#include "../slab.h"
+/*
+ * Definition of the unified static key declared in kasan-enabled.h.
+ * This provides consistent runtime enable/disable across all KASAN modes.
+ */
+DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
+EXPORT_SYMBOL(kasan_flag_enabled);
+
struct slab *kasan_addr_to_slab(const void *addr)
{
if (virt_addr_valid(addr))
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index d54e89f8c3e7..32c432df24aa 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -36,6 +36,17 @@
#include "kasan.h"
#include "../slab.h"
+/*
+ * Initialize Generic KASAN and enable runtime checks.
+ * This should be called from arch kasan_init() once shadow memory is ready.
+ */
+void __init kasan_init_generic(void)
+{
+ static_branch_enable(&kasan_flag_enabled);
+
+ pr_info("KernelAddressSanitizer initialized (generic)\n");
+}
+
/*
* All functions below always inlined so compiler could
* perform better optimizations in each of __asan_loadX/__assn_storeX
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 9a6927394b54..8e819fc4a260 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -45,13 +45,6 @@ static enum kasan_arg kasan_arg __ro_after_init;
static enum kasan_arg_mode kasan_arg_mode __ro_after_init;
static enum kasan_arg_vmalloc kasan_arg_vmalloc __initdata;
-/*
- * Whether KASAN is enabled at all.
- * The value remains false until KASAN is initialized by kasan_init_hw_tags().
- */
-DEFINE_STATIC_KEY_FALSE(kasan_flag_enabled);
-EXPORT_SYMBOL(kasan_flag_enabled);
-
/*
* Whether the selected mode is synchronous, asynchronous, or asymmetric.
* Defaults to KASAN_MODE_SYNC.
diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c
index b9382b5b6a37..525bc91e2fcd 100644
--- a/mm/kasan/sw_tags.c
+++ b/mm/kasan/sw_tags.c
@@ -45,6 +45,8 @@ void __init kasan_init_sw_tags(void)
kasan_init_tags();
+ static_branch_enable(&kasan_flag_enabled);
+
pr_info("KernelAddressSanitizer initialized (sw-tags, stacktrace=%s)\n",
str_on_off(kasan_stack_collection_enabled()));
}
--
2.34.1
Le 25/06/2025 à 11:52, Sabyrzhan Tasbolatov a écrit : > Historically the fast-path static key `kasan_flag_enabled` existed > only for `CONFIG_KASAN_HW_TAGS`. Generic and SW_TAGS either relied on > `kasan_arch_is_ready()` or evaluated KASAN checks unconditionally. > As a result every architecture had to toggle a private flag > in its `kasan_init()`. > > This patch turns the flag into a single global runtime predicate that > is built for every `CONFIG_KASAN` mode and adds a helper that flips > the key once KASAN is ready. Shouldn't kasan_init_generic() also perform the following line to reduce even more code duplication between architectures ? init_task.kasan_depth = 0; Christophe
On Wed, Jun 25, 2025 at 3:35 PM Christophe Leroy <christophe.leroy@csgroup.eu> wrote: > > > > Le 25/06/2025 à 11:52, Sabyrzhan Tasbolatov a écrit : > > Historically the fast-path static key `kasan_flag_enabled` existed > > only for `CONFIG_KASAN_HW_TAGS`. Generic and SW_TAGS either relied on > > `kasan_arch_is_ready()` or evaluated KASAN checks unconditionally. > > As a result every architecture had to toggle a private flag > > in its `kasan_init()`. > > > > This patch turns the flag into a single global runtime predicate that > > is built for every `CONFIG_KASAN` mode and adds a helper that flips > > the key once KASAN is ready. > > Shouldn't kasan_init_generic() also perform the following line to reduce > even more code duplication between architectures ? > > init_task.kasan_depth = 0; I've tried to introduce a new function kasan_mark_ready() to gather all arch duplicated code in one place: In mm/kasan/common.c: void __init kasan_mark_ready(void) { /* Enable error reporting */ init_task.kasan_depth = 0; /* Mark KASAN as ready */ static_branch_enable(&kasan_flag_enabled); } So we could've called it in mm/kasan/generic.c: void __init kasan_init_generic(void) { kasan_mark_ready(); pr_info("KernelAddressSanitizer initialized (generic)\n"); } in mm/kasan/sw_tags.c: void __init kasan_init_sw_tags(void) { ... kasan_mark_ready(); pr_info("KernelAddressSanitizer initialized .."); } in mm/kasan/hw_tags.c: void __init kasan_init_hw_tags(void) { ... kasan_mark_ready(); pr_info("KernelAddressSanitizer initialized .."); } But it works only for CONFIG_KASAN_GENERIC mode, when arch code calls kasan_init(), for example, arm64: void __init kasan_init(void) { kasan_init_shadow(); kasan_init_generic(); } And for HW_TAGS, SW_TAGS it won't work. Fails during compiling: mm/kasan/common.c:45:12: error: no member named 'kasan_depth' in 'struct task_struct' 45 | init_task.kasan_depth = 0; because kasan_init_sw_tags(), kasan_init_hw_tags() are called once on CPU boot. For arm64, where these KASAN modes are supported, both functions are called in smp_prepare_boot_cpu(). So I guess, every arch kasan_init() has to set in kasan_init() init_task.kasan_depth = 0; to enable error messages before switching KASAN readiness via enabling kasan_flag_enabled key. > > Christophe >
© 2016 - 2025 Red Hat, Inc.