kernel/events/core.c | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-)
From: Haocheng Yu <yuhaocheng035@gmail.com>
Syzkaller reported a refcount_t: addition on 0; use-after-free warning
in perf_mmap.
The issue is caused by a race condition between a failing mmap() setup
and a concurrent mmap() on a dependent event (e.g., using output
redirection).
In perf_mmap(), the ring_buffer (rb) is allocated and assigned to
event->rb with the mmap_mutex held. The mutex is then released to
perform map_range().
If map_range() fails, perf_mmap_close() is called to clean up.
However, since the mutex was dropped, another thread attaching to
this event (via inherited events or output redirection) can acquire
the mutex, observe the valid event->rb pointer, and attempt to
increment its reference count. If the cleanup path has already
dropped the reference count to zero, this results in a
use-after-free or refcount saturation warning.
Fix this by extending the scope of mmap_mutex to cover the
map_range() call. This ensures that the ring buffer initialization
and mapping (or cleanup on failure) happens atomically effectively,
preventing other threads from accessing a half-initialized or
dying ring buffer.
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202602020208.m7KIjdzW-lkp@intel.com/
Signed-off-by: Haocheng Yu <yuhaocheng035@gmail.com>
---
kernel/events/core.c | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2c35acc2722b..abefd1213582 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7167,28 +7167,28 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
ret = perf_mmap_aux(vma, event, nr_pages);
if (ret)
return ret;
- }
- /*
- * Since pinned accounting is per vm we cannot allow fork() to copy our
- * vma.
- */
- vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
- vma->vm_ops = &perf_mmap_vmops;
+ /*
+ * Since pinned accounting is per vm we cannot allow fork() to copy our
+ * vma.
+ */
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &perf_mmap_vmops;
- mapped = get_mapped(event, event_mapped);
- if (mapped)
- mapped(event, vma->vm_mm);
+ mapped = get_mapped(event, event_mapped);
+ if (mapped)
+ mapped(event, vma->vm_mm);
- /*
- * Try to map it into the page table. On fail, invoke
- * perf_mmap_close() to undo the above, as the callsite expects
- * full cleanup in this case and therefore does not invoke
- * vmops::close().
- */
- ret = map_range(event->rb, vma);
- if (ret)
- perf_mmap_close(vma);
+ /*
+ * Try to map it into the page table. On fail, invoke
+ * perf_mmap_close() to undo the above, as the callsite expects
+ * full cleanup in this case and therefore does not invoke
+ * vmops::close().
+ */
+ ret = map_range(event->rb, vma);
+ if (ret)
+ perf_mmap_close(vma);
+ }
return ret;
}
base-commit: 7d0a66e4bb9081d75c82ec4957c50034cb0ea449
--
2.51.0
On Tue, Feb 03, 2026 at 12:20:56AM +0800, yuhaocheng035@gmail.com wrote: > From: Haocheng Yu <yuhaocheng035@gmail.com> > > Syzkaller reported a refcount_t: addition on 0; use-after-free warning > in perf_mmap. > > The issue is caused by a race condition between a failing mmap() setup > and a concurrent mmap() on a dependent event (e.g., using output > redirection). > > In perf_mmap(), the ring_buffer (rb) is allocated and assigned to > event->rb with the mmap_mutex held. The mutex is then released to > perform map_range(). > > If map_range() fails, perf_mmap_close() is called to clean up. > However, since the mutex was dropped, another thread attaching to > this event (via inherited events or output redirection) can acquire > the mutex, observe the valid event->rb pointer, and attempt to > increment its reference count. If the cleanup path has already > dropped the reference count to zero, this results in a > use-after-free or refcount saturation warning. > > Fix this by extending the scope of mmap_mutex to cover the > map_range() call. This ensures that the ring buffer initialization > and mapping (or cleanup on failure) happens atomically effectively, > preventing other threads from accessing a half-initialized or > dying ring buffer. And you're sure this time? To me it feels bit like talking to an LLM. I suppose there is nothing wrong with having an LLM process syzkaller output and even have it propose patches, but before you send it out an actual human should get involved and apply critical thinking skills. Just throwing stuff at a maintainer and hoping he does the thinking for you is not appreciated. > Reported-by: kernel test robot <lkp@intel.com> > Closes: https://lore.kernel.org/oe-kbuild-all/202602020208.m7KIjdzW-lkp@intel.com/ > Signed-off-by: Haocheng Yu <yuhaocheng035@gmail.com> > --- > kernel/events/core.c | 38 +++++++++++++++++++------------------- > 1 file changed, 19 insertions(+), 19 deletions(-) > > diff --git a/kernel/events/core.c b/kernel/events/core.c > index 2c35acc2722b..abefd1213582 100644 > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -7167,28 +7167,28 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) > ret = perf_mmap_aux(vma, event, nr_pages); > if (ret) > return ret; > - } > > - /* > - * Since pinned accounting is per vm we cannot allow fork() to copy our > - * vma. > - */ > - vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); > - vma->vm_ops = &perf_mmap_vmops; > + /* > + * Since pinned accounting is per vm we cannot allow fork() to copy our > + * vma. > + */ > + vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP); > + vma->vm_ops = &perf_mmap_vmops; > > - mapped = get_mapped(event, event_mapped); > - if (mapped) > - mapped(event, vma->vm_mm); > + mapped = get_mapped(event, event_mapped); > + if (mapped) > + mapped(event, vma->vm_mm); > > - /* > - * Try to map it into the page table. On fail, invoke > - * perf_mmap_close() to undo the above, as the callsite expects > - * full cleanup in this case and therefore does not invoke > - * vmops::close(). > - */ > - ret = map_range(event->rb, vma); > - if (ret) > - perf_mmap_close(vma); > + /* > + * Try to map it into the page table. On fail, invoke > + * perf_mmap_close() to undo the above, as the callsite expects > + * full cleanup in this case and therefore does not invoke > + * vmops::close(). > + */ > + ret = map_range(event->rb, vma); > + if (ret) > + perf_mmap_close(vma); > + } > > return ret; > } > > base-commit: 7d0a66e4bb9081d75c82ec4957c50034cb0ea449 > -- > 2.51.0 >
© 2016 - 2026 Red Hat, Inc.