From: Ethan Graham <ethangraham@google.com>
Introduce a new helper function, kasan_poison_range(), to encapsulate
the logic for poisoning an arbitrary memory range of a given size, and
expose it publically in <include/linux/kasan.h>.
This is a preparatory change for the upcoming KFuzzTest patches, which
requires the ability to poison the inter-region padding in its input
buffers.
No functional change to any other subsystem is intended by this commit.
Signed-off-by: Ethan Graham <ethangraham@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
---
PR v1:
- Enforce KASAN_GRANULE_SIZE alignment for the end of the range in
kasan_poison_range(), and return -EINVAL when this isn't respected.
---
---
include/linux/kasan.h | 11 +++++++++++
mm/kasan/shadow.c | 34 ++++++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+)
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 890011071f2b..cd6cdf732378 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -102,6 +102,16 @@ static inline bool kasan_has_integrated_init(void)
}
#ifdef CONFIG_KASAN
+
+/**
+ * kasan_poison_range - poison the memory range [@addr, @addr + @size)
+ *
+ * The exact behavior is subject to alignment with KASAN_GRANULE_SIZE, defined
+ * in <mm/kasan/kasan.h>: if @start is unaligned, the initial partial granule
+ * at the beginning of the range is only poisoned if CONFIG_KASAN_GENERIC=y.
+ */
+int kasan_poison_range(const void *addr, size_t size);
+
void __kasan_unpoison_range(const void *addr, size_t size);
static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
{
@@ -402,6 +412,7 @@ static __always_inline bool kasan_check_byte(const void *addr)
#else /* CONFIG_KASAN */
+static inline int kasan_poison_range(const void *start, size_t size) { return 0; }
static inline void kasan_unpoison_range(const void *address, size_t size) {}
static inline void kasan_poison_pages(struct page *page, unsigned int order,
bool init) {}
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index d2c70cd2afb1..7faed02264f2 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -147,6 +147,40 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init)
}
EXPORT_SYMBOL_GPL(kasan_poison);
+int kasan_poison_range(const void *addr, size_t size)
+{
+ uintptr_t start_addr = (uintptr_t)addr;
+ uintptr_t head_granule_start;
+ uintptr_t poison_body_start;
+ uintptr_t poison_body_end;
+ size_t head_prefix_size;
+ uintptr_t end_addr;
+
+ if ((start_addr + size) % KASAN_GRANULE_SIZE)
+ return -EINVAL;
+
+ end_addr = ALIGN_DOWN(start_addr + size, KASAN_GRANULE_SIZE);
+ if (start_addr >= end_addr)
+ return -EINVAL;
+
+ head_granule_start = ALIGN_DOWN(start_addr, KASAN_GRANULE_SIZE);
+ head_prefix_size = start_addr - head_granule_start;
+
+ if (IS_ENABLED(CONFIG_KASAN_GENERIC) && head_prefix_size > 0)
+ kasan_poison_last_granule((void *)head_granule_start,
+ head_prefix_size);
+
+ poison_body_start = ALIGN(start_addr, KASAN_GRANULE_SIZE);
+ poison_body_end = ALIGN_DOWN(end_addr, KASAN_GRANULE_SIZE);
+
+ if (poison_body_start < poison_body_end)
+ kasan_poison((void *)poison_body_start,
+ poison_body_end - poison_body_start,
+ KASAN_SLAB_REDZONE, false);
+ return 0;
+}
+EXPORT_SYMBOL(kasan_poison_range);
+
#ifdef CONFIG_KASAN_GENERIC
void kasan_poison_last_granule(const void *addr, size_t size)
{
--
2.51.0.470.ga7dc726c21-goog
On Fri, Sep 19, 2025 at 4:58 PM Ethan Graham <ethan.w.s.graham@gmail.com> wrote: > > From: Ethan Graham <ethangraham@google.com> > > Introduce a new helper function, kasan_poison_range(), to encapsulate > the logic for poisoning an arbitrary memory range of a given size, and > expose it publically in <include/linux/kasan.h>. > > This is a preparatory change for the upcoming KFuzzTest patches, which > requires the ability to poison the inter-region padding in its input > buffers. > > No functional change to any other subsystem is intended by this commit. > > Signed-off-by: Ethan Graham <ethangraham@google.com> > Reviewed-by: Alexander Potapenko <glider@google.com> > > --- > PR v1: > - Enforce KASAN_GRANULE_SIZE alignment for the end of the range in > kasan_poison_range(), and return -EINVAL when this isn't respected. > --- > --- > include/linux/kasan.h | 11 +++++++++++ > mm/kasan/shadow.c | 34 ++++++++++++++++++++++++++++++++++ > 2 files changed, 45 insertions(+) > > diff --git a/include/linux/kasan.h b/include/linux/kasan.h > index 890011071f2b..cd6cdf732378 100644 > --- a/include/linux/kasan.h > +++ b/include/linux/kasan.h > @@ -102,6 +102,16 @@ static inline bool kasan_has_integrated_init(void) > } > > #ifdef CONFIG_KASAN > + > +/** > + * kasan_poison_range - poison the memory range [@addr, @addr + @size) > + * > + * The exact behavior is subject to alignment with KASAN_GRANULE_SIZE, defined > + * in <mm/kasan/kasan.h>: if @start is unaligned, the initial partial granule > + * at the beginning of the range is only poisoned if CONFIG_KASAN_GENERIC=y. > + */ > +int kasan_poison_range(const void *addr, size_t size); > + > void __kasan_unpoison_range(const void *addr, size_t size); > static __always_inline void kasan_unpoison_range(const void *addr, size_t size) > { > @@ -402,6 +412,7 @@ static __always_inline bool kasan_check_byte(const void *addr) > > #else /* CONFIG_KASAN */ > > +static inline int kasan_poison_range(const void *start, size_t size) { return 0; } > static inline void kasan_unpoison_range(const void *address, size_t size) {} > static inline void kasan_poison_pages(struct page *page, unsigned int order, > bool init) {} > diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c > index d2c70cd2afb1..7faed02264f2 100644 > --- a/mm/kasan/shadow.c > +++ b/mm/kasan/shadow.c > @@ -147,6 +147,40 @@ void kasan_poison(const void *addr, size_t size, u8 value, bool init) > } > EXPORT_SYMBOL_GPL(kasan_poison); > > +int kasan_poison_range(const void *addr, size_t size) This should go into common.c, otherwise this won't be built with the HW_TAGS mode enabled. Also, you need a wrapper with a kasan_enabled() check; see how kasan_unpoison_range() is defined. > +{ > + uintptr_t start_addr = (uintptr_t)addr; > + uintptr_t head_granule_start; > + uintptr_t poison_body_start; > + uintptr_t poison_body_end; > + size_t head_prefix_size; > + uintptr_t end_addr; > + > + if ((start_addr + size) % KASAN_GRANULE_SIZE) > + return -EINVAL; Other similar KASAN functions do a WARN_ON(bad alignment). I think printing a warning is fair for this to force the caller to enforce proper alignment. > + > + end_addr = ALIGN_DOWN(start_addr + size, KASAN_GRANULE_SIZE); I don't think we need to ALIGN_DOWN(): we already checked that (start_addr + size) % KASAN_GRANULE_SIZE == 0. > + if (start_addr >= end_addr) > + return -EINVAL; Can also do a WARN_ON(). > + > + head_granule_start = ALIGN_DOWN(start_addr, KASAN_GRANULE_SIZE); > + head_prefix_size = start_addr - head_granule_start; > + > + if (IS_ENABLED(CONFIG_KASAN_GENERIC) && head_prefix_size > 0) > + kasan_poison_last_granule((void *)head_granule_start, > + head_prefix_size); Let's rename kasan_poison_last_granule() to kasan_poison_granule() then. Here the granule being poisoned is not the last one. > + > + poison_body_start = ALIGN(start_addr, KASAN_GRANULE_SIZE); > + poison_body_end = ALIGN_DOWN(end_addr, KASAN_GRANULE_SIZE); end_addr is already aligned. > + > + if (poison_body_start < poison_body_end) > + kasan_poison((void *)poison_body_start, > + poison_body_end - poison_body_start, > + KASAN_SLAB_REDZONE, false); > + return 0; > +} > +EXPORT_SYMBOL(kasan_poison_range); > + > #ifdef CONFIG_KASAN_GENERIC > void kasan_poison_last_granule(const void *addr, size_t size) > { > -- > 2.51.0.470.ga7dc726c21-goog >
© 2016 - 2025 Red Hat, Inc.