From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
A KASAN tag mismatch, possibly causing a kernel panic, can be observed
on systems with a tag-based KASAN enabled and with multiple NUMA nodes.
It was reported on arm64 and reproduced on x86. It can be explained in
the following points:
1. There can be more than one virtual memory chunk.
2. Chunk's base address has a tag.
3. The base address points at the first chunk and thus inherits
the tag of the first chunk.
4. The subsequent chunks will be accessed with the tag from the
first chunk.
5. Thus, the subsequent chunks need to have their tag set to
match that of the first chunk.
Use the modified __kasan_unpoison_vmalloc() to pass the tag of the first
vm_struct's address when vm_structs are unpoisoned in
pcpu_get_vm_areas(). Assigning a common tag resolves the pcpu chunk
address mismatch.
Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
Cc: <stable@vger.kernel.org> # 6.1+
Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
---
Changelog v2:
- Revise the whole patch to match the fixed refactorization from the
first patch.
Changelog v1:
- Rewrite the patch message to point at the user impact of the issue.
- Move helper to common.c so it can be compiled in all KASAN modes.
mm/kasan/common.c | 3 ++-
mm/kasan/hw_tags.c | 12 ++++++++----
mm/kasan/shadow.c | 15 +++++++++++----
3 files changed, 21 insertions(+), 9 deletions(-)
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 7884ea7d13f9..e5a867a5670b 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -591,11 +591,12 @@ void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
unsigned long size;
void *addr;
int area;
+ u8 tag = get_tag(vms[0]->addr);
for (area = 0 ; area < nr_vms ; area++) {
size = vms[area]->size;
addr = vms[area]->addr;
- vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags);
+ vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags, tag);
}
}
#endif
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4b7936a2bd6f..2a02b898b9d8 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -317,7 +317,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
}
static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
- kasan_vmalloc_flags_t flags)
+ kasan_vmalloc_flags_t flags, int unpoison_tag)
{
u8 tag;
unsigned long redzone_start, redzone_size;
@@ -361,7 +361,11 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
return (void *)start;
}
- tag = kasan_random_tag();
+ if (unpoison_tag < 0)
+ tag = kasan_random_tag();
+ else
+ tag = unpoison_tag;
+
start = set_tag(start, tag);
/* Unpoison and initialize memory up to size. */
@@ -390,7 +394,7 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size,
kasan_vmalloc_flags_t flags)
{
- return __kasan_unpoison_vmalloc(start, size, flags);
+ return __kasan_unpoison_vmalloc(start, size, flags, -1);
}
void __kasan_poison_vmalloc(const void *start, unsigned long size)
@@ -405,7 +409,7 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size,
kasan_vmalloc_flags_t flags, u8 tag)
{
- return __kasan_unpoison_vmalloc(addr, size, flags);
+ return __kasan_unpoison_vmalloc(addr, size, flags, tag);
}
#endif
diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
index 0a8d8bf6e9cf..7a66ffc1d5b3 100644
--- a/mm/kasan/shadow.c
+++ b/mm/kasan/shadow.c
@@ -625,8 +625,10 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
}
static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
- kasan_vmalloc_flags_t flags)
+ kasan_vmalloc_flags_t flags, int unpoison_tag)
{
+ u8 tag;
+
/*
* Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
* mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
@@ -648,7 +650,12 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
!(flags & KASAN_VMALLOC_PROT_NORMAL))
return (void *)start;
- start = set_tag(start, kasan_random_tag());
+ if (unpoison_tag < 0)
+ tag = kasan_random_tag();
+ else
+ tag = unpoison_tag;
+
+ start = set_tag(start, tag);
kasan_unpoison(start, size, false);
return (void *)start;
}
@@ -656,13 +663,13 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size,
kasan_vmalloc_flags_t flags)
{
- return __kasan_unpoison_vmalloc(start, size, flags);
+ return __kasan_unpoison_vmalloc(start, size, flags, -1);
}
void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size,
kasan_vmalloc_flags_t flags, u8 tag)
{
- return __kasan_unpoison_vmalloc(addr, size, flags);
+ return __kasan_unpoison_vmalloc(addr, size, flags, tag);
}
/*
--
2.52.0
On Tue, Dec 2, 2025 at 3:29 PM Maciej Wieczor-Retman
<m.wieczorretman@pm.me> wrote:
>
> From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
>
> A KASAN tag mismatch, possibly causing a kernel panic, can be observed
> on systems with a tag-based KASAN enabled and with multiple NUMA nodes.
> It was reported on arm64 and reproduced on x86. It can be explained in
> the following points:
>
> 1. There can be more than one virtual memory chunk.
> 2. Chunk's base address has a tag.
> 3. The base address points at the first chunk and thus inherits
> the tag of the first chunk.
> 4. The subsequent chunks will be accessed with the tag from the
> first chunk.
> 5. Thus, the subsequent chunks need to have their tag set to
> match that of the first chunk.
>
> Use the modified __kasan_unpoison_vmalloc() to pass the tag of the first
> vm_struct's address when vm_structs are unpoisoned in
> pcpu_get_vm_areas(). Assigning a common tag resolves the pcpu chunk
> address mismatch.
>
> Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
> Cc: <stable@vger.kernel.org> # 6.1+
> Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
> ---
> Changelog v2:
> - Revise the whole patch to match the fixed refactorization from the
> first patch.
>
> Changelog v1:
> - Rewrite the patch message to point at the user impact of the issue.
> - Move helper to common.c so it can be compiled in all KASAN modes.
>
> mm/kasan/common.c | 3 ++-
> mm/kasan/hw_tags.c | 12 ++++++++----
> mm/kasan/shadow.c | 15 +++++++++++----
> 3 files changed, 21 insertions(+), 9 deletions(-)
>
> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
> index 7884ea7d13f9..e5a867a5670b 100644
> --- a/mm/kasan/common.c
> +++ b/mm/kasan/common.c
> @@ -591,11 +591,12 @@ void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
> unsigned long size;
> void *addr;
> int area;
> + u8 tag = get_tag(vms[0]->addr);
>
> for (area = 0 ; area < nr_vms ; area++) {
> size = vms[area]->size;
> addr = vms[area]->addr;
> - vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags);
> + vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags, tag);
I'm thinking what you can do here is:
vms[area]->addr = set_tag(addr, tag);
__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
This is with the assumption that Jiayuan's patch is changed to add
KASAN_VMALLOC_KEEP_TAG to kasan_vmalloc_flags_t.
Then you should not need that extra __kasan_random_unpoison_vmalloc helper.
> }
> }
> #endif
> diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
> index 4b7936a2bd6f..2a02b898b9d8 100644
> --- a/mm/kasan/hw_tags.c
> +++ b/mm/kasan/hw_tags.c
> @@ -317,7 +317,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size)
> }
>
> static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> - kasan_vmalloc_flags_t flags)
> + kasan_vmalloc_flags_t flags, int unpoison_tag)
> {
> u8 tag;
> unsigned long redzone_start, redzone_size;
> @@ -361,7 +361,11 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> return (void *)start;
> }
>
> - tag = kasan_random_tag();
> + if (unpoison_tag < 0)
> + tag = kasan_random_tag();
> + else
> + tag = unpoison_tag;
> +
> start = set_tag(start, tag);
>
> /* Unpoison and initialize memory up to size. */
> @@ -390,7 +394,7 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size,
> kasan_vmalloc_flags_t flags)
> {
> - return __kasan_unpoison_vmalloc(start, size, flags);
> + return __kasan_unpoison_vmalloc(start, size, flags, -1);
> }
>
> void __kasan_poison_vmalloc(const void *start, unsigned long size)
> @@ -405,7 +409,7 @@ void __kasan_poison_vmalloc(const void *start, unsigned long size)
> void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size,
> kasan_vmalloc_flags_t flags, u8 tag)
> {
> - return __kasan_unpoison_vmalloc(addr, size, flags);
> + return __kasan_unpoison_vmalloc(addr, size, flags, tag);
> }
> #endif
>
> diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c
> index 0a8d8bf6e9cf..7a66ffc1d5b3 100644
> --- a/mm/kasan/shadow.c
> +++ b/mm/kasan/shadow.c
> @@ -625,8 +625,10 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
> }
>
> static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> - kasan_vmalloc_flags_t flags)
> + kasan_vmalloc_flags_t flags, int unpoison_tag)
> {
> + u8 tag;
> +
> /*
> * Software KASAN modes unpoison both VM_ALLOC and non-VM_ALLOC
> * mappings, so the KASAN_VMALLOC_VM_ALLOC flag is ignored.
> @@ -648,7 +650,12 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> !(flags & KASAN_VMALLOC_PROT_NORMAL))
> return (void *)start;
>
> - start = set_tag(start, kasan_random_tag());
> + if (unpoison_tag < 0)
> + tag = kasan_random_tag();
> + else
> + tag = unpoison_tag;
> +
> + start = set_tag(start, tag);
> kasan_unpoison(start, size, false);
> return (void *)start;
> }
> @@ -656,13 +663,13 @@ static void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
> void *__kasan_random_unpoison_vmalloc(const void *start, unsigned long size,
> kasan_vmalloc_flags_t flags)
> {
> - return __kasan_unpoison_vmalloc(start, size, flags);
> + return __kasan_unpoison_vmalloc(start, size, flags, -1);
> }
>
> void *__kasan_unpoison_vmap_areas(void *addr, unsigned long size,
> kasan_vmalloc_flags_t flags, u8 tag)
> {
> - return __kasan_unpoison_vmalloc(addr, size, flags);
> + return __kasan_unpoison_vmalloc(addr, size, flags, tag);
> }
>
> /*
> --
> 2.52.0
>
>
On 2025-12-03 at 16:53:01 +0100, Andrey Konovalov wrote:
>On Tue, Dec 2, 2025 at 3:29 PM Maciej Wieczor-Retman
><m.wieczorretman@pm.me> wrote:
>>
>> From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
>>
>> A KASAN tag mismatch, possibly causing a kernel panic, can be observed
>> on systems with a tag-based KASAN enabled and with multiple NUMA nodes.
>> It was reported on arm64 and reproduced on x86. It can be explained in
>> the following points:
>>
>> 1. There can be more than one virtual memory chunk.
>> 2. Chunk's base address has a tag.
>> 3. The base address points at the first chunk and thus inherits
>> the tag of the first chunk.
>> 4. The subsequent chunks will be accessed with the tag from the
>> first chunk.
>> 5. Thus, the subsequent chunks need to have their tag set to
>> match that of the first chunk.
>>
>> Use the modified __kasan_unpoison_vmalloc() to pass the tag of the first
>> vm_struct's address when vm_structs are unpoisoned in
>> pcpu_get_vm_areas(). Assigning a common tag resolves the pcpu chunk
>> address mismatch.
>>
>> Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
>> Cc: <stable@vger.kernel.org> # 6.1+
>> Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
>> ---
>> Changelog v2:
>> - Revise the whole patch to match the fixed refactorization from the
>> first patch.
>>
>> Changelog v1:
>> - Rewrite the patch message to point at the user impact of the issue.
>> - Move helper to common.c so it can be compiled in all KASAN modes.
>>
>> mm/kasan/common.c | 3 ++-
>> mm/kasan/hw_tags.c | 12 ++++++++----
>> mm/kasan/shadow.c | 15 +++++++++++----
>> 3 files changed, 21 insertions(+), 9 deletions(-)
>>
>> diff --git a/mm/kasan/common.c b/mm/kasan/common.c
>> index 7884ea7d13f9..e5a867a5670b 100644
>> --- a/mm/kasan/common.c
>> +++ b/mm/kasan/common.c
>> @@ -591,11 +591,12 @@ void kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
>> unsigned long size;
>> void *addr;
>> int area;
>> + u8 tag = get_tag(vms[0]->addr);
>>
>> for (area = 0 ; area < nr_vms ; area++) {
>> size = vms[area]->size;
>> addr = vms[area]->addr;
>> - vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags);
>> + vms[area]->addr = __kasan_unpoison_vmap_areas(addr, size, flags, tag);
>
>I'm thinking what you can do here is:
>
>vms[area]->addr = set_tag(addr, tag);
>__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
I noticed that something like this wouldn't work once I started trying
to rebase my work onto Jiayuan's. The line:
+ u8 tag = get_tag(vms[0]->addr);
is wrong and should be
+ u8 tag = kasan_random_tag();
I was sure the vms[0]->addr was already tagged (I recall checking this
so I'm not sure if something changed or my previous check was wrong) but
the problem here is that vms[0]->addr, vms[1]->addr ... were unpoisoned
with random addresses, specifically different random addresses. So then
later in the pcpu chunk code vms[1] related pointers would get the tag
from vms[0]->addr.
So I think we still need a separate way to do __kasan_unpoison_vmalloc
with a specific tag.
>
>This is with the assumption that Jiayuan's patch is changed to add
>KASAN_VMALLOC_KEEP_TAG to kasan_vmalloc_flags_t.
>
>Then you should not need that extra __kasan_random_unpoison_vmalloc helper.
I already rewrote the patch rebased onto Jiayuan's patch. I was able to
ditch the __kasan_random_unpoison_vmalloc but I needed to add
__kasan_unpoison_vrealloc - so I can pass the tag of the start pointer
to __kasan_unpoison_vmalloc. I was hoping to post it today/tomorrow so
Jiayuan can check my changes don't break his solution. I'm just waiting
to check it compiles against all the fun kernel configs.
--
kind regards
Maciej Wieczór-Retman
On Wed, Dec 3, 2025 at 5:24 PM Maciej Wieczór-Retman
<m.wieczorretman@pm.me> wrote:
>
> >I'm thinking what you can do here is:
> >
> >vms[area]->addr = set_tag(addr, tag);
> >__kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
>
>
> I noticed that something like this wouldn't work once I started trying
> to rebase my work onto Jiayuan's. The line:
> + u8 tag = get_tag(vms[0]->addr);
> is wrong and should be
> + u8 tag = kasan_random_tag();
Ah, right.
> I was sure the vms[0]->addr was already tagged (I recall checking this
> so I'm not sure if something changed or my previous check was wrong) but
> the problem here is that vms[0]->addr, vms[1]->addr ... were unpoisoned
> with random addresses, specifically different random addresses. So then
> later in the pcpu chunk code vms[1] related pointers would get the tag
> from vms[0]->addr.
>
> So I think we still need a separate way to do __kasan_unpoison_vmalloc
> with a specific tag.
Why?
Assuming KASAN_VMALLOC_KEEP_TAG takes the tag from the pointer, just do:
tag = kasan_random_tag();
for (area = 0; ...) {
vms[area]->addr = set_tag(vms[area]->addr, tag);
__kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, flags |
KASAN_VMALLOC_KEEP_TAG);
}
Or maybe even better:
vms[0]->addr = __kasan_unpoison_vmalloc(vms[0]->addr, vms[0]->size, flags);
tag = get_tag(vms[0]->addr);
for (area = 1; ...) {
vms[area]->addr = set_tag(vms[area]->addr, tag);
__kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, flags |
KASAN_VMALLOC_KEEP_TAG);
}
This way we won't assign a random tag unless it's actually needed
(i.e. when KASAN_VMALLOC_PROT_NORMAL is not provided; assuming we care
to support that case).
On 2025-12-04 at 01:43:36 +0100, Andrey Konovalov wrote:
>On Wed, Dec 3, 2025 at 5:24 PM Maciej Wieczór-Retman
><m.wieczorretman@pm.me> wrote:
>> I was sure the vms[0]->addr was already tagged (I recall checking this
>> so I'm not sure if something changed or my previous check was wrong) but
>> the problem here is that vms[0]->addr, vms[1]->addr ... were unpoisoned
>> with random addresses, specifically different random addresses. So then
>> later in the pcpu chunk code vms[1] related pointers would get the tag
>> from vms[0]->addr.
>>
>> So I think we still need a separate way to do __kasan_unpoison_vmalloc
>> with a specific tag.
>
>Why?
>
>Assuming KASAN_VMALLOC_KEEP_TAG takes the tag from the pointer, just do:
>
>tag = kasan_random_tag();
>for (area = 0; ...) {
> vms[area]->addr = set_tag(vms[area]->addr, tag);
> __kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, flags |
>KASAN_VMALLOC_KEEP_TAG);
>}
>
>Or maybe even better:
>
>vms[0]->addr = __kasan_unpoison_vmalloc(vms[0]->addr, vms[0]->size, flags);
>tag = get_tag(vms[0]->addr);
>for (area = 1; ...) {
> vms[area]->addr = set_tag(vms[area]->addr, tag);
> __kasan_unpoison_vmalloc(vms[area]->addr, vms[area]->size, flags |
>KASAN_VMALLOC_KEEP_TAG);
>}
>
>This way we won't assign a random tag unless it's actually needed
>(i.e. when KASAN_VMALLOC_PROT_NORMAL is not provided; assuming we care
>to support that case).
Oh, right yes, that would work nicely. I thought putting these behind
helpers would end up clean but this is very neat too.
I suppose I'll wait for Jiayuan to update his patch and then I'll make
these changes on top of that.
Thanks! :)
--
Kind regards
Maciej Wieczór-Retman
© 2016 - 2026 Red Hat, Inc.