From: Thomas Gleixner <tglx@linutronix.de>
To prepare for splitting the buffer allocation out into seperate functions
for the ring buffer and the AUX buffer, split out mlock limit handling into
a helper function, which can be called from both.
No functional change intended.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lkml.kernel.org/r/20250811070620.463634790@linutronix.de
---
kernel/events/core.c | 77 +++++++++++++++++++++++++--------------------------
1 file changed, 38 insertions(+), 39 deletions(-)
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer
return err;
}
+static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
+{
+ unsigned long user_locked, user_lock_limit, locked, lock_limit;
+ struct user_struct *user = current_user();
+
+ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
+ /* Increase the limit linearly with more CPUs */
+ user_lock_limit *= num_online_cpus();
+
+ user_locked = atomic_long_read(&user->locked_vm);
+
+ /*
+ * sysctl_perf_event_mlock may have changed, so that
+ * user->locked_vm > user_lock_limit
+ */
+ if (user_locked > user_lock_limit)
+ user_locked = user_lock_limit;
+ user_locked += *user_extra;
+
+ if (user_locked > user_lock_limit) {
+ /*
+ * charge locked_vm until it hits user_lock_limit;
+ * charge the rest from pinned_vm
+ */
+ *extra = user_locked - user_lock_limit;
+ *user_extra -= *extra;
+ }
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+ locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
+
+ return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
+}
+
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
- unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ unsigned long vma_size, nr_pages;
+ long user_extra = 0, extra = 0;
struct mutex *aux_mutex = NULL;
struct perf_buffer *rb = NULL;
- unsigned long locked, lock_limit;
- unsigned long vma_size;
- unsigned long nr_pages;
- long user_extra = 0, extra = 0;
int ret, flags = 0;
mapped_f mapped;
@@ -7063,38 +7093,7 @@ static int perf_mmap(struct file *file,
}
}
- user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
-
- /*
- * Increase the limit linearly with more CPUs:
- */
- user_lock_limit *= num_online_cpus();
-
- user_locked = atomic_long_read(&user->locked_vm);
-
- /*
- * sysctl_perf_event_mlock may have changed, so that
- * user->locked_vm > user_lock_limit
- */
- if (user_locked > user_lock_limit)
- user_locked = user_lock_limit;
- user_locked += user_extra;
-
- if (user_locked > user_lock_limit) {
- /*
- * charge locked_vm until it hits user_lock_limit;
- * charge the rest from pinned_vm
- */
- extra = user_locked - user_lock_limit;
- user_extra -= extra;
- }
-
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
- locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
-
- if ((locked > lock_limit) && perf_is_paranoid() &&
- !capable(CAP_IPC_LOCK)) {
+ if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
ret = -EPERM;
goto unlock;
}
On Tue, Aug 12, 2025 at 12:39:00PM +0200, Peter Zijlstra wrote: > From: Thomas Gleixner <tglx@linutronix.de> > > To prepare for splitting the buffer allocation out into seperate functions NIT: Same comment as 1/2, seperate -> separate, again doesn't hugely matter but just FYI! > for the ring buffer and the AUX buffer, split out mlock limit handling into > a helper function, which can be called from both. > > No functional change intended. > > Signed-off-by: Thomas Gleixner <tglx@linutronix.de> > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> > Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> > Link: https://lkml.kernel.org/r/20250811070620.463634790@linutronix.de > --- > kernel/events/core.c | 77 +++++++++++++++++++++++++-------------------------- > 1 file changed, 38 insertions(+), 39 deletions(-) > > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer > return err; > } > > +static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra) > +{ > + unsigned long user_locked, user_lock_limit, locked, lock_limit; > + struct user_struct *user = current_user(); > + > + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); > + /* Increase the limit linearly with more CPUs */ > + user_lock_limit *= num_online_cpus(); > + > + user_locked = atomic_long_read(&user->locked_vm); > + > + /* > + * sysctl_perf_event_mlock may have changed, so that > + * user->locked_vm > user_lock_limit > + */ > + if (user_locked > user_lock_limit) > + user_locked = user_lock_limit; > + user_locked += *user_extra; > + > + if (user_locked > user_lock_limit) { > + /* > + * charge locked_vm until it hits user_lock_limit; > + * charge the rest from pinned_vm > + */ > + *extra = user_locked - user_lock_limit; > + *user_extra -= *extra; > + } > + > + lock_limit = rlimit(RLIMIT_MEMLOCK); > + lock_limit >>= PAGE_SHIFT; > + locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra; > + > + return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK); > +} > + > static int perf_mmap(struct file *file, struct vm_area_struct *vma) > { > struct perf_event *event = file->private_data; > - unsigned long user_locked, user_lock_limit; > struct user_struct *user = current_user(); > + unsigned long vma_size, nr_pages; > + long user_extra = 0, extra = 0; > struct mutex *aux_mutex = NULL; > struct perf_buffer *rb = NULL; > - unsigned long locked, lock_limit; > - unsigned long vma_size; > - unsigned long nr_pages; > - long user_extra = 0, extra = 0; > int ret, flags = 0; > mapped_f mapped; > > @@ -7063,38 +7093,7 @@ static int perf_mmap(struct file *file, > } > } > > - user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); > - > - /* > - * Increase the limit linearly with more CPUs: > - */ > - user_lock_limit *= num_online_cpus(); > - > - user_locked = atomic_long_read(&user->locked_vm); > - > - /* > - * sysctl_perf_event_mlock may have changed, so that > - * user->locked_vm > user_lock_limit > - */ > - if (user_locked > user_lock_limit) > - user_locked = user_lock_limit; > - user_locked += user_extra; > - > - if (user_locked > user_lock_limit) { > - /* > - * charge locked_vm until it hits user_lock_limit; > - * charge the rest from pinned_vm > - */ > - extra = user_locked - user_lock_limit; > - user_extra -= extra; > - } > - > - lock_limit = rlimit(RLIMIT_MEMLOCK); > - lock_limit >>= PAGE_SHIFT; > - locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; > - > - if ((locked > lock_limit) && perf_is_paranoid() && > - !capable(CAP_IPC_LOCK)) { > + if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) { > ret = -EPERM; > goto unlock; > } > >
The following commit has been merged into the perf/core branch of tip:
Commit-ID: 81e026ca47b386e4213c1beff069038a3ba8bb76
Gitweb: https://git.kernel.org/tip/81e026ca47b386e4213c1beff069038a3ba8bb76
Author: Thomas Gleixner <tglx@linutronix.de>
AuthorDate: Tue, 12 Aug 2025 12:39:00 +02:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 15 Aug 2025 13:12:58 +02:00
perf: Split out mlock limit handling
To prepare for splitting the buffer allocation out into separate functions
for the ring buffer and the AUX buffer, split out mlock limit handling into
a helper function, which can be called from both.
No functional change intended.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/r/20250812104018.541975109@infradead.org
---
kernel/events/core.c | 75 +++++++++++++++++++++----------------------
1 file changed, 38 insertions(+), 37 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eea3a7d..f629901 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
return err;
}
+static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
+{
+ unsigned long user_locked, user_lock_limit, locked, lock_limit;
+ struct user_struct *user = current_user();
+
+ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
+ /* Increase the limit linearly with more CPUs */
+ user_lock_limit *= num_online_cpus();
+
+ user_locked = atomic_long_read(&user->locked_vm);
+
+ /*
+ * sysctl_perf_event_mlock may have changed, so that
+ * user->locked_vm > user_lock_limit
+ */
+ if (user_locked > user_lock_limit)
+ user_locked = user_lock_limit;
+ user_locked += *user_extra;
+
+ if (user_locked > user_lock_limit) {
+ /*
+ * charge locked_vm until it hits user_lock_limit;
+ * charge the rest from pinned_vm
+ */
+ *extra = user_locked - user_lock_limit;
+ *user_extra -= *extra;
+ }
+
+ lock_limit = rlimit(RLIMIT_MEMLOCK);
+ lock_limit >>= PAGE_SHIFT;
+ locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
+
+ return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
+}
+
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
- unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
+ unsigned long vma_size, nr_pages;
+ long user_extra = 0, extra = 0;
struct mutex *aux_mutex = NULL;
struct perf_buffer *rb = NULL;
- unsigned long locked, lock_limit;
- unsigned long vma_size;
- unsigned long nr_pages;
- long user_extra = 0, extra = 0;
int ret, flags = 0;
mapped_f mapped;
@@ -7063,38 +7095,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
}
}
- user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
-
- /*
- * Increase the limit linearly with more CPUs:
- */
- user_lock_limit *= num_online_cpus();
-
- user_locked = atomic_long_read(&user->locked_vm);
-
- /*
- * sysctl_perf_event_mlock may have changed, so that
- * user->locked_vm > user_lock_limit
- */
- if (user_locked > user_lock_limit)
- user_locked = user_lock_limit;
- user_locked += user_extra;
-
- if (user_locked > user_lock_limit) {
- /*
- * charge locked_vm until it hits user_lock_limit;
- * charge the rest from pinned_vm
- */
- extra = user_locked - user_lock_limit;
- user_extra -= extra;
- }
-
- lock_limit = rlimit(RLIMIT_MEMLOCK);
- lock_limit >>= PAGE_SHIFT;
- locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
-
- if ((locked > lock_limit) && perf_is_paranoid() &&
- !capable(CAP_IPC_LOCK)) {
+ if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
ret = -EPERM;
goto unlock;
}
© 2016 - 2025 Red Hat, Inc.