arch/loongarch/include/asm/kasan.h | 10 ++++++++-- include/linux/kasan.h | 2 +- mm/kasan/kasan.h | 8 +++----- 3 files changed, 12 insertions(+), 8 deletions(-)
As Linus suggested, __HAVE_ARCH_XYZ is "stupid" and "having historical
uses of it doesn't make it good". So migrate __HAVE_ARCH_SHADOW_MAP to
separate macros named after the respective functions.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reviewed-by: WANG Xuerui <git@xen0n.name>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
V2: Update commit messages.
arch/loongarch/include/asm/kasan.h | 10 ++++++++--
include/linux/kasan.h | 2 +-
mm/kasan/kasan.h | 8 +++-----
3 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index deeff8158f45..a12ecab37da7 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -10,8 +10,6 @@
#include <asm/io.h>
#include <asm/pgtable.h>
-#define __HAVE_ARCH_SHADOW_MAP
-
#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
@@ -68,6 +66,7 @@ static __always_inline bool kasan_arch_is_ready(void)
return !kasan_early_stage;
}
+#define kasan_mem_to_shadow kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
if (!kasan_arch_is_ready()) {
@@ -97,6 +96,7 @@ static inline void *kasan_mem_to_shadow(const void *addr)
}
}
+#define kasan_shadow_to_mem kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
unsigned long addr = (unsigned long)shadow_addr;
@@ -119,6 +119,12 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
}
}
+#define addr_has_metadata addr_has_metadata
+static __always_inline bool addr_has_metadata(const void *addr)
+{
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
+}
+
void kasan_init(void);
asmlinkage void kasan_early_init(void);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 3df5499f7936..842623d708c2 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,7 +54,7 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index f70e3d7a602e..d37831b8511c 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,7 +291,7 @@ struct kasan_stack_ring {
#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_shadow_to_mem
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
@@ -299,15 +299,13 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
}
#endif
+#ifndef addr_has_metadata
static __always_inline bool addr_has_metadata(const void *addr)
{
-#ifdef __HAVE_ARCH_SHADOW_MAP
- return (kasan_mem_to_shadow((void *)addr) != NULL);
-#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
-#endif
}
+#endif
/**
* kasan_check_range - Check memory region, and report if invalid access.
--
2.39.3
On Tue, Sep 12, 2023 at 5:18 AM Huacai Chen <chenhuacai@loongson.cn> wrote:
>
> As Linus suggested, __HAVE_ARCH_XYZ is "stupid" and "having historical
> uses of it doesn't make it good". So migrate __HAVE_ARCH_SHADOW_MAP to
> separate macros named after the respective functions.
>
> Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
> Reviewed-by: WANG Xuerui <git@xen0n.name>
> Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
> ---
> V2: Update commit messages.
>
> arch/loongarch/include/asm/kasan.h | 10 ++++++++--
> include/linux/kasan.h | 2 +-
> mm/kasan/kasan.h | 8 +++-----
> 3 files changed, 12 insertions(+), 8 deletions(-)
>
> diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
> index deeff8158f45..a12ecab37da7 100644
> --- a/arch/loongarch/include/asm/kasan.h
> +++ b/arch/loongarch/include/asm/kasan.h
> @@ -10,8 +10,6 @@
> #include <asm/io.h>
> #include <asm/pgtable.h>
>
> -#define __HAVE_ARCH_SHADOW_MAP
> -
> #define KASAN_SHADOW_SCALE_SHIFT 3
> #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
>
> @@ -68,6 +66,7 @@ static __always_inline bool kasan_arch_is_ready(void)
> return !kasan_early_stage;
> }
>
> +#define kasan_mem_to_shadow kasan_mem_to_shadow
> static inline void *kasan_mem_to_shadow(const void *addr)
> {
> if (!kasan_arch_is_ready()) {
> @@ -97,6 +96,7 @@ static inline void *kasan_mem_to_shadow(const void *addr)
> }
> }
>
> +#define kasan_shadow_to_mem kasan_shadow_to_mem
> static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
> {
> unsigned long addr = (unsigned long)shadow_addr;
> @@ -119,6 +119,12 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
> }
> }
>
> +#define addr_has_metadata addr_has_metadata
> +static __always_inline bool addr_has_metadata(const void *addr)
> +{
> + return (kasan_mem_to_shadow((void *)addr) != NULL);
> +}
> +
> void kasan_init(void);
> asmlinkage void kasan_early_init(void);
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index 3df5499f7936..842623d708c2 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -54,7 +54,7 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
> int kasan_populate_early_shadow(const void *shadow_start,
> const void *shadow_end);
>
> -#ifndef __HAVE_ARCH_SHADOW_MAP
> +#ifndef kasan_mem_to_shadow
> static inline void *kasan_mem_to_shadow(const void *addr)
> {
> return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index f70e3d7a602e..d37831b8511c 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -291,7 +291,7 @@ struct kasan_stack_ring {
>
> #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>
> -#ifndef __HAVE_ARCH_SHADOW_MAP
> +#ifndef kasan_shadow_to_mem
> static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
> {
> return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
> @@ -299,15 +299,13 @@ static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
> }
> #endif
>
> +#ifndef addr_has_metadata
> static __always_inline bool addr_has_metadata(const void *addr)
> {
> -#ifdef __HAVE_ARCH_SHADOW_MAP
> - return (kasan_mem_to_shadow((void *)addr) != NULL);
> -#else
> return (kasan_reset_tag(addr) >=
> kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
> -#endif
> }
> +#endif
>
> /**
> * kasan_check_range - Check memory region, and report if invalid access.
> --
> 2.39.3
>
Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
Thanks!
As Linus suggested, kasan_mem_to_shadow()/kasan_shadow_to_mem() are not
performance-critical and too big to inline. This is simply wrong so just
define them out-of-line.
If they really need to be inlined in future, such as the objtool / SMAP
issue for X86, we should mark them __always_inline.
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
---
arch/loongarch/include/asm/kasan.h | 59 +++---------------------------
arch/loongarch/mm/kasan_init.c | 51 ++++++++++++++++++++++++++
2 files changed, 57 insertions(+), 53 deletions(-)
diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
index a12ecab37da7..cd6084f4e153 100644
--- a/arch/loongarch/include/asm/kasan.h
+++ b/arch/loongarch/include/asm/kasan.h
@@ -60,63 +60,16 @@
extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-#define kasan_arch_is_ready kasan_arch_is_ready
-static __always_inline bool kasan_arch_is_ready(void)
-{
- return !kasan_early_stage;
-}
-
#define kasan_mem_to_shadow kasan_mem_to_shadow
-static inline void *kasan_mem_to_shadow(const void *addr)
-{
- if (!kasan_arch_is_ready()) {
- return (void *)(kasan_early_shadow_page);
- } else {
- unsigned long maddr = (unsigned long)addr;
- unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
- unsigned long offset = 0;
-
- maddr &= XRANGE_SHADOW_MASK;
- switch (xrange) {
- case XKPRANGE_CC_SEG:
- offset = XKPRANGE_CC_SHADOW_OFFSET;
- break;
- case XKPRANGE_UC_SEG:
- offset = XKPRANGE_UC_SHADOW_OFFSET;
- break;
- case XKVRANGE_VC_SEG:
- offset = XKVRANGE_VC_SHADOW_OFFSET;
- break;
- default:
- WARN_ON(1);
- return NULL;
- }
-
- return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
- }
-}
+void *kasan_mem_to_shadow(const void *addr);
#define kasan_shadow_to_mem kasan_shadow_to_mem
-static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+const void *kasan_shadow_to_mem(const void *shadow_addr);
+
+#define kasan_arch_is_ready kasan_arch_is_ready
+static __always_inline bool kasan_arch_is_ready(void)
{
- unsigned long addr = (unsigned long)shadow_addr;
-
- if (unlikely(addr > KASAN_SHADOW_END) ||
- unlikely(addr < KASAN_SHADOW_START)) {
- WARN_ON(1);
- return NULL;
- }
-
- if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
- return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
- else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
- else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
- return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
- else {
- WARN_ON(1);
- return NULL;
- }
+ return !kasan_early_stage;
}
#define addr_has_metadata addr_has_metadata
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
index da68bc1a4643..cc3e81fe0186 100644
--- a/arch/loongarch/mm/kasan_init.c
+++ b/arch/loongarch/mm/kasan_init.c
@@ -35,6 +35,57 @@ static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
bool kasan_early_stage = true;
+void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
/*
* Alloc memory for shadow memory page table.
*/
--
2.39.3
© 2016 - 2025 Red Hat, Inc.