[PATCH v3 10/15] perf: Split out the AUX buffer allocation

Peter Zijlstra posted 15 patches 1 month, 3 weeks ago
[PATCH v3 10/15] perf: Split out the AUX buffer allocation
Posted by Peter Zijlstra 1 month, 3 weeks ago
Move the AUX buffer allocation branch into its own function.

Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
 kernel/events/core.c |  144 +++++++++++++++++++++++++++------------------------
 1 file changed, 77 insertions(+), 67 deletions(-)

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6970,6 +6970,82 @@ static void perf_mmap_account(struct vm_
 	atomic64_add(extra, &vma->vm_mm->pinned_vm);
 }
 
+static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
+			 unsigned long nr_pages)
+{
+	long extra = 0, user_extra = nr_pages;
+	u64 aux_offset, aux_size;
+	struct perf_buffer *rb;
+	int ret, rb_flags = 0;
+
+	rb = event->rb;
+	if (!rb)
+		return -EINVAL;
+
+	guard(mutex)(&rb->aux_mutex);
+
+	/*
+	 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
+	 * mapped, all subsequent mappings should have the same size
+	 * and offset. Must be above the normal perf buffer.
+	 */
+	aux_offset = READ_ONCE(rb->user_page->aux_offset);
+	aux_size = READ_ONCE(rb->user_page->aux_size);
+
+	if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
+		return -EINVAL;
+
+	if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
+		return -EINVAL;
+
+	/* already mapped with a different offset */
+	if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
+		return -EINVAL;
+
+	if (aux_size != nr_pages * PAGE_SIZE)
+		return -EINVAL;
+
+	/* already mapped with a different size */
+	if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
+		return -EINVAL;
+
+	if (!is_power_of_2(nr_pages))
+		return -EINVAL;
+
+	if (!atomic_inc_not_zero(&rb->mmap_count))
+		return -EINVAL;
+
+	if (rb_has_aux(rb)) {
+		atomic_inc(&rb->aux_mmap_count);
+
+	} else {
+		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
+			atomic_dec(&rb->mmap_count);
+			return -EPERM;
+		}
+
+		WARN_ON(!rb && event->rb);
+
+		if (vma->vm_flags & VM_WRITE)
+			rb_flags |= RING_BUFFER_WRITABLE;
+
+		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+				   event->attr.aux_watermark, rb_flags);
+		if (ret) {
+			atomic_dec(&rb->mmap_count);
+			return ret;
+		}
+
+		atomic_set(&rb->aux_mmap_count, 1);
+		rb->aux_mmap_locked = extra;
+	}
+
+	perf_mmap_account(vma, user_extra, extra);
+	atomic_inc(&event->mmap_count);
+
+	return 0;
+}
+
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct perf_event *event = file->private_data;
@@ -7087,73 +7163,7 @@ static int perf_mmap(struct file *file,
 		perf_mmap_account(vma, user_extra, extra);
 		atomic_inc(&event->mmap_count);
 	} else {
-		/*
-		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
-		 * mapped, all subsequent mappings should have the same size
-		 * and offset. Must be above the normal perf buffer.
-		 */
-		u64 aux_offset, aux_size;
-
-		rb = event->rb;
-		if (!rb)
-			goto unlock;
-
-		guard(mutex)(&rb->aux_mutex);
-
-		aux_offset = READ_ONCE(rb->user_page->aux_offset);
-		aux_size = READ_ONCE(rb->user_page->aux_size);
-
-		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
-			goto unlock;
-
-		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
-			goto unlock;
-
-		/* already mapped with a different offset */
-		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
-			goto unlock;
-
-		if (aux_size != nr_pages * PAGE_SIZE)
-			goto unlock;
-
-		/* already mapped with a different size */
-		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
-			goto unlock;
-
-		if (!is_power_of_2(nr_pages))
-			goto unlock;
-
-		if (!atomic_inc_not_zero(&rb->mmap_count))
-			goto unlock;
-
-		if (rb_has_aux(rb)) {
-			atomic_inc(&rb->aux_mmap_count);
-			ret = 0;
-
-		} else {
-			if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
-				ret = -EPERM;
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			WARN_ON(!rb && event->rb);
-
-			if (vma->vm_flags & VM_WRITE)
-				flags |= RING_BUFFER_WRITABLE;
-
-			ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
-					   event->attr.aux_watermark, flags);
-			if (ret) {
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			atomic_set(&rb->aux_mmap_count, 1);
-			rb->aux_mmap_locked = extra;
-		}
-		perf_mmap_account(vma, user_extra, extra);
-		atomic_inc(&event->mmap_count);
+		ret = perf_mmap_aux(vma, event, nr_pages);
 	}
 
 unlock:
Re: [PATCH v3 10/15] perf: Split out the AUX buffer allocation
Posted by Lorenzo Stoakes 1 month, 3 weeks ago
On Tue, Aug 12, 2025 at 12:39:08PM +0200, Peter Zijlstra wrote:
> Move the AUX buffer allocation branch into its own function.
>
> Originally-by: Thomas Gleixner <tglx@linutronix.de>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>

LGTM (one nitty note below :), so:

Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>

> ---
>  kernel/events/core.c |  144 +++++++++++++++++++++++++++------------------------
>  1 file changed, 77 insertions(+), 67 deletions(-)
>
> --- a/kernel/events/core.c
> +++ b/kernel/events/core.c
> @@ -6970,6 +6970,82 @@ static void perf_mmap_account(struct vm_
>  	atomic64_add(extra, &vma->vm_mm->pinned_vm);
>  }
>
> +static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
> +			 unsigned long nr_pages)
> +{
> +	long extra = 0, user_extra = nr_pages;
> +	u64 aux_offset, aux_size;
> +	struct perf_buffer *rb;
> +	int ret, rb_flags = 0;
> +
> +	rb = event->rb;
> +	if (!rb)
> +		return -EINVAL;
> +
> +	guard(mutex)(&rb->aux_mutex);
> +
> +	/*
> +	 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
> +	 * mapped, all subsequent mappings should have the same size
> +	 * and offset. Must be above the normal perf buffer.
> +	 */
> +	aux_offset = READ_ONCE(rb->user_page->aux_offset);
> +	aux_size = READ_ONCE(rb->user_page->aux_size);
> +
> +	if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
> +		return -EINVAL;
> +
> +	if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
> +		return -EINVAL;
> +
> +	/* already mapped with a different offset */
> +	if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
> +		return -EINVAL;
> +
> +	if (aux_size != nr_pages * PAGE_SIZE)
> +		return -EINVAL;
> +
> +	/* already mapped with a different size */
> +	if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
> +		return -EINVAL;
> +
> +	if (!is_power_of_2(nr_pages))
> +		return -EINVAL;
> +
> +	if (!atomic_inc_not_zero(&rb->mmap_count))
> +		return -EINVAL;
> +
> +	if (rb_has_aux(rb)) {
> +		atomic_inc(&rb->aux_mmap_count);
> +

Still that extra line :>)

> +	} else {
> +		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
> +			atomic_dec(&rb->mmap_count);
> +			return -EPERM;
> +		}
> +
> +		WARN_ON(!rb && event->rb);
> +
> +		if (vma->vm_flags & VM_WRITE)
> +			rb_flags |= RING_BUFFER_WRITABLE;
> +
> +		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
> +				   event->attr.aux_watermark, rb_flags);
> +		if (ret) {
> +			atomic_dec(&rb->mmap_count);
> +			return ret;
> +		}
> +
> +		atomic_set(&rb->aux_mmap_count, 1);
> +		rb->aux_mmap_locked = extra;
> +	}
> +
> +	perf_mmap_account(vma, user_extra, extra);
> +	atomic_inc(&event->mmap_count);
> +
> +	return 0;
> +}
> +
>  static int perf_mmap(struct file *file, struct vm_area_struct *vma)
>  {
>  	struct perf_event *event = file->private_data;
> @@ -7087,73 +7163,7 @@ static int perf_mmap(struct file *file,
>  		perf_mmap_account(vma, user_extra, extra);
>  		atomic_inc(&event->mmap_count);
>  	} else {
> -		/*
> -		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
> -		 * mapped, all subsequent mappings should have the same size
> -		 * and offset. Must be above the normal perf buffer.
> -		 */
> -		u64 aux_offset, aux_size;
> -
> -		rb = event->rb;
> -		if (!rb)
> -			goto unlock;
> -
> -		guard(mutex)(&rb->aux_mutex);
> -
> -		aux_offset = READ_ONCE(rb->user_page->aux_offset);
> -		aux_size = READ_ONCE(rb->user_page->aux_size);
> -
> -		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
> -			goto unlock;
> -
> -		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
> -			goto unlock;
> -
> -		/* already mapped with a different offset */
> -		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
> -			goto unlock;
> -
> -		if (aux_size != nr_pages * PAGE_SIZE)
> -			goto unlock;
> -
> -		/* already mapped with a different size */
> -		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
> -			goto unlock;
> -
> -		if (!is_power_of_2(nr_pages))
> -			goto unlock;
> -
> -		if (!atomic_inc_not_zero(&rb->mmap_count))
> -			goto unlock;
> -
> -		if (rb_has_aux(rb)) {
> -			atomic_inc(&rb->aux_mmap_count);
> -			ret = 0;
> -
> -		} else {
> -			if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
> -				ret = -EPERM;
> -				atomic_dec(&rb->mmap_count);
> -				goto unlock;
> -			}
> -
> -			WARN_ON(!rb && event->rb);
> -
> -			if (vma->vm_flags & VM_WRITE)
> -				flags |= RING_BUFFER_WRITABLE;
> -
> -			ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
> -					   event->attr.aux_watermark, flags);
> -			if (ret) {
> -				atomic_dec(&rb->mmap_count);
> -				goto unlock;
> -			}
> -
> -			atomic_set(&rb->aux_mmap_count, 1);
> -			rb->aux_mmap_locked = extra;
> -		}
> -		perf_mmap_account(vma, user_extra, extra);
> -		atomic_inc(&event->mmap_count);
> +		ret = perf_mmap_aux(vma, event, nr_pages);
>  	}
>
>  unlock:
>
>
[tip: perf/core] perf: Split out the AUX buffer allocation
Posted by tip-bot2 for Peter Zijlstra 1 month, 2 weeks ago
The following commit has been merged into the perf/core branch of tip:

Commit-ID:     2aee37682391332d26c01e703170e0d9358c7252
Gitweb:        https://git.kernel.org/tip/2aee37682391332d26c01e703170e0d9358c7252
Author:        Peter Zijlstra <peterz@infradead.org>
AuthorDate:    Tue, 12 Aug 2025 12:39:08 +02:00
Committer:     Peter Zijlstra <peterz@infradead.org>
CommitterDate: Fri, 15 Aug 2025 13:13:00 +02:00

perf: Split out the AUX buffer allocation

Move the AUX buffer allocation branch into its own function.

Originally-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/r/20250812104019.494205648@infradead.org
---
 kernel/events/core.c | 144 ++++++++++++++++++++++--------------------
 1 file changed, 77 insertions(+), 67 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5bbea81..e76afd9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6970,6 +6970,82 @@ static void perf_mmap_account(struct vm_area_struct *vma, long user_extra, long 
 	atomic64_add(extra, &vma->vm_mm->pinned_vm);
 }
 
+static int perf_mmap_aux(struct vm_area_struct *vma, struct perf_event *event,
+			 unsigned long nr_pages)
+{
+	long extra = 0, user_extra = nr_pages;
+	u64 aux_offset, aux_size;
+	struct perf_buffer *rb;
+	int ret, rb_flags = 0;
+
+	rb = event->rb;
+	if (!rb)
+		return -EINVAL;
+
+	guard(mutex)(&rb->aux_mutex);
+
+	/*
+	 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
+	 * mapped, all subsequent mappings should have the same size
+	 * and offset. Must be above the normal perf buffer.
+	 */
+	aux_offset = READ_ONCE(rb->user_page->aux_offset);
+	aux_size = READ_ONCE(rb->user_page->aux_size);
+
+	if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
+		return -EINVAL;
+
+	if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
+		return -EINVAL;
+
+	/* already mapped with a different offset */
+	if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
+		return -EINVAL;
+
+	if (aux_size != nr_pages * PAGE_SIZE)
+		return -EINVAL;
+
+	/* already mapped with a different size */
+	if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
+		return -EINVAL;
+
+	if (!is_power_of_2(nr_pages))
+		return -EINVAL;
+
+	if (!atomic_inc_not_zero(&rb->mmap_count))
+		return -EINVAL;
+
+	if (rb_has_aux(rb)) {
+		atomic_inc(&rb->aux_mmap_count);
+
+	} else {
+		if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
+			atomic_dec(&rb->mmap_count);
+			return -EPERM;
+		}
+
+		WARN_ON(!rb && event->rb);
+
+		if (vma->vm_flags & VM_WRITE)
+			rb_flags |= RING_BUFFER_WRITABLE;
+
+		ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+				   event->attr.aux_watermark, rb_flags);
+		if (ret) {
+			atomic_dec(&rb->mmap_count);
+			return ret;
+		}
+
+		atomic_set(&rb->aux_mmap_count, 1);
+		rb->aux_mmap_locked = extra;
+	}
+
+	perf_mmap_account(vma, user_extra, extra);
+	atomic_inc(&event->mmap_count);
+
+	return 0;
+}
+
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct perf_event *event = file->private_data;
@@ -7088,73 +7164,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 		perf_mmap_account(vma, user_extra, extra);
 		atomic_inc(&event->mmap_count);
 	} else {
-		/*
-		 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
-		 * mapped, all subsequent mappings should have the same size
-		 * and offset. Must be above the normal perf buffer.
-		 */
-		u64 aux_offset, aux_size;
-
-		rb = event->rb;
-		if (!rb)
-			goto unlock;
-
-		guard(mutex)(&rb->aux_mutex);
-
-		aux_offset = READ_ONCE(rb->user_page->aux_offset);
-		aux_size = READ_ONCE(rb->user_page->aux_size);
-
-		if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
-			goto unlock;
-
-		if (aux_offset != vma->vm_pgoff << PAGE_SHIFT)
-			goto unlock;
-
-		/* already mapped with a different offset */
-		if (rb_has_aux(rb) && rb->aux_pgoff != vma->vm_pgoff)
-			goto unlock;
-
-		if (aux_size != nr_pages * PAGE_SIZE)
-			goto unlock;
-
-		/* already mapped with a different size */
-		if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages)
-			goto unlock;
-
-		if (!is_power_of_2(nr_pages))
-			goto unlock;
-
-		if (!atomic_inc_not_zero(&rb->mmap_count))
-			goto unlock;
-
-		if (rb_has_aux(rb)) {
-			atomic_inc(&rb->aux_mmap_count);
-			ret = 0;
-
-		} else {
-			if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
-				ret = -EPERM;
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			WARN_ON(!rb && event->rb);
-
-			if (vma->vm_flags & VM_WRITE)
-				flags |= RING_BUFFER_WRITABLE;
-
-			ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
-					   event->attr.aux_watermark, flags);
-			if (ret) {
-				atomic_dec(&rb->mmap_count);
-				goto unlock;
-			}
-
-			atomic_set(&rb->aux_mmap_count, 1);
-			rb->aux_mmap_locked = extra;
-		}
-		perf_mmap_account(vma, user_extra, extra);
-		atomic_inc(&event->mmap_count);
+		ret = perf_mmap_aux(vma, event, nr_pages);
 	}
 
 unlock: