[PATCH 2/2] kasan: cleanup of kasan_enabled() checks

Sabyrzhan Tasbolatov posted 2 patches 4 months ago
[PATCH 2/2] kasan: cleanup of kasan_enabled() checks
Posted by Sabyrzhan Tasbolatov 4 months ago
Deduplication of kasan_enabled() checks which are already used by callers.

* Altered functions:

check_page_allocation
	Delete the check because callers have it already in __wrappers in
	include/linux/kasan.h:
		__kasan_kfree_large
		__kasan_mempool_poison_pages
		__kasan_mempool_poison_object

kasan_populate_vmalloc, kasan_release_vmalloc
	Add __wrappers in include/linux/kasan.h.
	They are called externally in mm/vmalloc.c.

__kasan_unpoison_vmalloc, __kasan_poison_vmalloc
	Delete checks because there're already kasan_enabled() checks
	in respective __wrappers in include/linux/kasan.h.

release_free_meta -- Delete the check because the higher caller path
	has it already. See the stack trace:

	__kasan_slab_free -- has the check already
	__kasan_mempool_poison_object -- has the check already
		poison_slab_object
			kasan_save_free_info
				release_free_meta
					kasan_enabled() -- Delete here

Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
---
 include/linux/kasan.h | 20 ++++++++++++++++++--
 mm/kasan/common.c     |  3 ---
 mm/kasan/generic.c    |  3 ---
 mm/kasan/shadow.c     | 20 ++++----------------
 4 files changed, 22 insertions(+), 24 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index d12e1a5f5a9..f335c1d7b61 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
 
 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
+static inline int kasan_populate_vmalloc(unsigned long addr,
+					 unsigned long size, gfp_t gfp_mask)
+{
+	if (kasan_enabled())
+		return __kasan_populate_vmalloc(addr, size, gfp_mask);
+	return 0;
+}
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
 			   unsigned long free_region_start,
 			   unsigned long free_region_end,
 			   unsigned long flags);
+static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
+			   unsigned long free_region_start,
+			   unsigned long free_region_end,
+			   unsigned long flags)
+{
+	if (kasan_enabled())
+		return __kasan_release_vmalloc(start, end, free_region_start,
+					 free_region_end, flags);
+}
 
 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
 
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index d4c14359fea..22e5d67ff06 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
 
 static inline bool check_page_allocation(void *ptr, unsigned long ip)
 {
-	if (!kasan_enabled())
-		return false;
-
 	if (ptr != page_address(virt_to_head_page(ptr))) {
 		kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
 		return true;
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 516b49accc4..2b8e73f5f6a 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -506,9 +506,6 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
 
 static void release_free_meta(const void *object, struct kasan_free_meta *meta)
 {
-	if (!kasan_enabled())
-		return;
-
 	/* Check if free meta is valid. */
 	if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
 		return;
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 5d2a876035d..cf842b620a2 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -354,7 +354,7 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask
 	return 0;
 }
 
-static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
+static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
 {
 	unsigned long nr_pages, nr_total = PFN_UP(end - start);
 	struct vmalloc_populate_data data;
@@ -403,14 +403,11 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_
 	return ret;
 }
 
-int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
+int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
 {
 	unsigned long shadow_start, shadow_end;
 	int ret;
 
-	if (!kasan_enabled())
-		return 0;
-
 	if (!is_vmalloc_or_module_addr((void *)addr))
 		return 0;
 
@@ -432,7 +429,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mas
 	shadow_start = PAGE_ALIGN_DOWN(shadow_start);
 	shadow_end = PAGE_ALIGN(shadow_end);
 
-	ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
+	ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
 	if (ret)
 		return ret;
 
@@ -574,7 +571,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
  * pages entirely covered by the free region, we will not run in to any
  * trouble - any simultaneous allocations will be for disjoint regions.
  */
-void kasan_release_vmalloc(unsigned long start, unsigned long end,
+void __kasan_release_vmalloc(unsigned long start, unsigned long end,
 			   unsigned long free_region_start,
 			   unsigned long free_region_end,
 			   unsigned long flags)
@@ -583,9 +580,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
 	unsigned long region_start, region_end;
 	unsigned long size;
 
-	if (!kasan_enabled())
-		return;
-
 	region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
 	region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
 
@@ -634,9 +628,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
 	 * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
 	 */
 
-	if (!kasan_enabled())
-		return (void *)start;
-
 	if (!is_vmalloc_or_module_addr(start))
 		return (void *)start;
 
@@ -659,9 +650,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
  */
 void __kasan_poison_vmalloc(const void *start, unsigned long size)
 {
-	if (!kasan_enabled())
-		return;
-
 	if (!is_vmalloc_or_module_addr(start))
 		return;
 
-- 
2.34.1
Re: [PATCH 2/2] kasan: cleanup of kasan_enabled() checks
Posted by Andrey Konovalov 3 months, 2 weeks ago
On Thu, Oct 9, 2025 at 5:54 PM Sabyrzhan Tasbolatov <snovitoll@gmail.com> wrote:
>
> Deduplication of kasan_enabled() checks which are already used by callers.
>
> * Altered functions:
>
> check_page_allocation
>         Delete the check because callers have it already in __wrappers in
>         include/linux/kasan.h:
>                 __kasan_kfree_large
>                 __kasan_mempool_poison_pages
>                 __kasan_mempool_poison_object
>
> kasan_populate_vmalloc, kasan_release_vmalloc
>         Add __wrappers in include/linux/kasan.h.
>         They are called externally in mm/vmalloc.c.
>
> __kasan_unpoison_vmalloc, __kasan_poison_vmalloc
>         Delete checks because there're already kasan_enabled() checks
>         in respective __wrappers in include/linux/kasan.h.
>
> release_free_meta -- Delete the check because the higher caller path
>         has it already. See the stack trace:
>
>         __kasan_slab_free -- has the check already
>         __kasan_mempool_poison_object -- has the check already
>                 poison_slab_object
>                         kasan_save_free_info
>                                 release_free_meta
>                                         kasan_enabled() -- Delete here
>
> Signed-off-by: Sabyrzhan Tasbolatov <snovitoll@gmail.com>
> ---
>  include/linux/kasan.h | 20 ++++++++++++++++++--
>  mm/kasan/common.c     |  3 ---
>  mm/kasan/generic.c    |  3 ---
>  mm/kasan/shadow.c     | 20 ++++----------------
>  4 files changed, 22 insertions(+), 24 deletions(-)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index d12e1a5f5a9..f335c1d7b61 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -571,11 +571,27 @@ static inline void kasan_init_hw_tags(void) { }
>  #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>
>  void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
> -void kasan_release_vmalloc(unsigned long start, unsigned long end,
> +int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask);
> +static inline int kasan_populate_vmalloc(unsigned long addr,
> +                                        unsigned long size, gfp_t gfp_mask)
> +{
> +       if (kasan_enabled())
> +               return __kasan_populate_vmalloc(addr, size, gfp_mask);
> +       return 0;
> +}
> +void __kasan_release_vmalloc(unsigned long start, unsigned long end,
>                            unsigned long free_region_start,
>                            unsigned long free_region_end,
>                            unsigned long flags);
> +static inline void kasan_release_vmalloc(unsigned long start, unsigned long end,
> +                          unsigned long free_region_start,
> +                          unsigned long free_region_end,
> +                          unsigned long flags)
> +{
> +       if (kasan_enabled())
> +               return __kasan_release_vmalloc(start, end, free_region_start,
> +                                        free_region_end, flags);
> +}
>
>  #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index d4c14359fea..22e5d67ff06 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -305,9 +305,6 @@ bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init,
>
>  static inline bool check_page_allocation(void *ptr, unsigned long ip)
>  {
> -       if (!kasan_enabled())
> -               return false;
> -
>         if (ptr != page_address(virt_to_head_page(ptr))) {
>                 kasan_report_invalid_free(ptr, ip, KASAN_REPORT_INVALID_FREE);
>                 return true;
> diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
> index 516b49accc4..2b8e73f5f6a 100644
> --- a/mm/kasan/generic.c
> +++ b/mm/kasan/generic.c
> @@ -506,9 +506,6 @@ static void release_alloc_meta(struct kasan_alloc_meta *meta)
>
>  static void release_free_meta(const void *object, struct kasan_free_meta *meta)
>  {
> -       if (!kasan_enabled())
> -               return;
> -
>         /* Check if free meta is valid. */
>         if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
>                 return;
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index 5d2a876035d..cf842b620a2 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -354,7 +354,7 @@ static int ___alloc_pages_bulk(struct page **pages, int nr_pages, gfp_t gfp_mask
>         return 0;
>  }
>
> -static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_t gfp_mask)
> +static int __kasan_populate_vmalloc_do(unsigned long start, unsigned long end, gfp_t gfp_mask)
>  {
>         unsigned long nr_pages, nr_total = PFN_UP(end - start);
>         struct vmalloc_populate_data data;
> @@ -403,14 +403,11 @@ static int __kasan_populate_vmalloc(unsigned long start, unsigned long end, gfp_
>         return ret;
>  }
>
> -int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
> +int __kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mask)
>  {
>         unsigned long shadow_start, shadow_end;
>         int ret;
>
> -       if (!kasan_enabled())
> -               return 0;
> -
>         if (!is_vmalloc_or_module_addr((void *)addr))
>                 return 0;
>
> @@ -432,7 +429,7 @@ int kasan_populate_vmalloc(unsigned long addr, unsigned long size, gfp_t gfp_mas
>         shadow_start = PAGE_ALIGN_DOWN(shadow_start);
>         shadow_end = PAGE_ALIGN(shadow_end);
>
> -       ret = __kasan_populate_vmalloc(shadow_start, shadow_end, gfp_mask);
> +       ret = __kasan_populate_vmalloc_do(shadow_start, shadow_end, gfp_mask);
>         if (ret)
>                 return ret;
>
> @@ -574,7 +571,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
>   * pages entirely covered by the free region, we will not run in to any
>   * trouble - any simultaneous allocations will be for disjoint regions.
>   */
> -void kasan_release_vmalloc(unsigned long start, unsigned long end,
> +void __kasan_release_vmalloc(unsigned long start, unsigned long end,
>                            unsigned long free_region_start,
>                            unsigned long free_region_end,
>                            unsigned long flags)
> @@ -583,9 +580,6 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
>         unsigned long region_start, region_end;
>         unsigned long size;
>
> -       if (!kasan_enabled())
> -               return;
> -
>         region_start = ALIGN(start, KASAN_MEMORY_PER_SHADOW_PAGE);
>         region_end = ALIGN_DOWN(end, KASAN_MEMORY_PER_SHADOW_PAGE);
>
> @@ -634,9 +628,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
>          * with setting memory tags, so the KASAN_VMALLOC_INIT flag is ignored.
>          */
>
> -       if (!kasan_enabled())
> -               return (void *)start;
> -
>         if (!is_vmalloc_or_module_addr(start))
>                 return (void *)start;
>
> @@ -659,9 +650,6 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
>   */
>  void __kasan_poison_vmalloc(const void *start, unsigned long size)
>  {
> -       if (!kasan_enabled())
> -               return;
> -
>         if (!is_vmalloc_or_module_addr(start))
>                 return;
>
> --
> 2.34.1
>

Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>

Awesome, thank you!

I believe the check in kasan_byte_accessible() can be just removed as
well? If you do, please run the tests to be sure.

As for the other three (check_inline_region(), kasan_poison(), and
kasan_poison_last_granule()) - perhaps, we can leave them be.
Otherwise, we would need to duplicate the kasan_enabled() checks in a
lot of compiler-inserted functions.