From nobody Sun Oct 5 01:46:20 2025 Received: from galois.linutronix.de (Galois.linutronix.de [193.142.43.55]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 777C82D9ED1 for ; Mon, 11 Aug 2025 07:06:49 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=193.142.43.55 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1754896011; cv=none; b=Vc770rTPn+0GEzuX0AogOqnr+AqDmP43A9Etl3uS2555i8uU+Lhy4fALCNhDFLZb3+hVm4Jha8M3KlFoY4Zp4aJUOEmmxf4RtZSZdxteDsdYBkzrB+SDRkSNn9Zeabdq/tCudcbSYUyJfbBQzO/lYMuyt1YjYaUU7fFxvm+rIds= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1754896011; c=relaxed/simple; bh=14nOKx/Eo3YWzs2S19uxfHs+tq7XhufaaVNtlTboJaY=; h=Message-ID:From:To:Cc:Subject:References:MIME-Version: Content-Type:Date; b=nbxLclIy+cEPOM1aYNygCpC0lLmcir04454hWtDgPAlJaA7EcEN02lUEDM0p/ZAONnLTrrtHhAH+fv24yyPtfF/USGKVBk7YN4yd+UfdMDVfQDVWBGoI9ebYycFg+zrHW2X16GW2nvYhxHFIpHlFb8Rz330fYswhep7DrUz1N7Q= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linutronix.de; spf=pass smtp.mailfrom=linutronix.de; dkim=pass (2048-bit key) header.d=linutronix.de header.i=@linutronix.de header.b=iJnCvnxZ; dkim=permerror (0-bit key) header.d=linutronix.de header.i=@linutronix.de header.b=Jjcm/ZQP; arc=none smtp.client-ip=193.142.43.55 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linutronix.de Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linutronix.de Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=linutronix.de header.i=@linutronix.de header.b="iJnCvnxZ"; dkim=permerror (0-bit key) header.d=linutronix.de header.i=@linutronix.de header.b="Jjcm/ZQP" Message-ID: <20250811070620.716309215@linutronix.de> DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1754896007; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=lZam1tnQGIwZnhQsLpFa9COd+bhP3LHvYPOyHAsD+o8=; b=iJnCvnxZJjwVyWyp/VozK98ehvmJvxXxx8Z7lMkE6ROJ9d8nCPG5TyWI8cIXernwS0nfdK OmmfwRaTFNhEt6Bng4h3bBEpYDDpMmtAspbbEzk3Bza9YkY4Uo+c0AqG4qlGB6aDYs/1df BPeO+rWKp91p6FiAPfvvOsNJ1OawAeTcuUzhXVjfAEZrzeAfcS44QNAPCvymDhdvKF+7vo fy8oaVOVEPRTfSOVDLn6M/vTgGZvsOp8AvwM1REGZ34pHVxOgehkT2f25afYRePC98Fn5D az3q18/G269vJHiLKOfU6yXNF1rJsIhhnqILrEHtFV2tFxjpMupAq/WQi1alYA== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1754896007; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version:content-type:content-type: references:references; bh=lZam1tnQGIwZnhQsLpFa9COd+bhP3LHvYPOyHAsD+o8=; b=Jjcm/ZQPaB419qiS4C21pLdBPN9x4nRT0lQLFjVGOoLHdZaNGjBV+3+GMFekX8te9cbxLL /vcCfxJ6YNC5JhCA== From: Thomas Gleixner To: LKML Cc: Linus Torvalds , Peter Zijlstra , Ingo Molnar , Namhyung Kim , Arnaldo Carvalho de Melo , Lorenzo Stoakes , Kees Cook Subject: [patch V2 m@/6] perf/core: Convert mmap() refcounts to refcount_t References: <20250811065859.660930338@linutronix.de> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Date: Mon, 11 Aug 2025 09:06:46 +0200 (CEST) Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The recently fixed reference count leaks could have been detected by using refcount_t and refcount_t would have mitigated the potential overflow at least. Now that the code is properly structured, convert the mmap() related mmap_count variants over to refcount_t. No functional change intended. Signed-off-by: Thomas Gleixner --- include/linux/perf_event.h | 2 +- kernel/events/core.c | 40 ++++++++++++++++++++-------------------- kernel/events/internal.h | 4 ++-- kernel/events/ring_buffer.c | 2 +- 4 files changed, 24 insertions(+), 24 deletions(-) --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -859,7 +859,7 @@ struct perf_event { =20 /* mmap bits */ struct mutex mmap_mutex; - atomic_t mmap_count; + refcount_t mmap_count; =20 struct perf_buffer *rb; struct list_head rb_entry; --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3968,7 +3968,7 @@ static noinline int visit_groups_merge(s */ static inline bool event_update_userpage(struct perf_event *event) { - if (likely(!atomic_read(&event->mmap_count))) + if (likely(!refcount_read(&event->mmap_count))) return false; =20 perf_event_update_time(event); @@ -6704,11 +6704,11 @@ static void perf_mmap_open(struct vm_are struct perf_event *event =3D vma->vm_file->private_data; mapped_f mapped =3D get_mapped(event, event_mapped); =20 - atomic_inc(&event->mmap_count); - atomic_inc(&event->rb->mmap_count); + refcount_inc(&event->mmap_count); + refcount_inc(&event->rb->mmap_count); =20 if (vma->vm_pgoff) - atomic_inc(&event->rb->aux_mmap_count); + refcount_inc(&event->rb->aux_mmap_count); =20 if (mapped) mapped(event, vma->vm_mm); @@ -6743,7 +6743,7 @@ static void perf_mmap_close(struct vm_ar * to avoid complications. */ if (rb_has_aux(rb) && vma->vm_pgoff =3D=3D rb->aux_pgoff && - atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) { + refcount_dec_and_mutex_lock(&rb->aux_mmap_count, &rb->aux_mutex)) { /* * Stop all AUX events that are writing to this buffer, * so that we can free its AUX pages and corresponding PMU @@ -6763,10 +6763,10 @@ static void perf_mmap_close(struct vm_ar mutex_unlock(&rb->aux_mutex); } =20 - if (atomic_dec_and_test(&rb->mmap_count)) + if (refcount_dec_and_test(&rb->mmap_count)) detach_rest =3D true; =20 - if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) + if (!refcount_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) goto out_put; =20 ring_buffer_attach(event, NULL); @@ -6991,17 +6991,17 @@ static int perf_mmap_rb(struct vm_area_s if (data_page_nr(rb) !=3D nr_pages) return -EINVAL; =20 - if (atomic_inc_not_zero(&event->rb->mmap_count)) { + if (refcount_inc_not_zero(&event->rb->mmap_count)) { /* * Success -- managed to mmap() the same buffer * multiple times. */ - atomic_inc(&event->mmap_count); + refcount_inc(&event->mmap_count); return 0; } /* * Raced against perf_mmap_close()'s - * atomic_dec_and_mutex_lock() remove the event and + * refcount_dec_and_mutex_lock() remove the event and * continue as if !event->rb */ ring_buffer_attach(event, NULL); @@ -7019,7 +7019,7 @@ static int perf_mmap_rb(struct vm_area_s if (!rb) return -ENOMEM; =20 - atomic_set(&rb->mmap_count, 1); + refcount_set(&rb->mmap_count, 1); rb->mmap_user =3D get_current_user(); rb->mmap_locked =3D extra; =20 @@ -7030,7 +7030,7 @@ static int perf_mmap_rb(struct vm_area_s perf_event_update_userpage(event); =20 perf_mmap_account(vma, user_extra, extra); - atomic_set(&event->mmap_count, 1); + refcount_set(&event->mmap_count, 1); return 0; } =20 @@ -7071,17 +7071,17 @@ static int perf_mmap_aux(struct vm_area_ return -EINVAL; =20 /* If this succeeds, subsequent failures have to undo it */ - if (!atomic_inc_not_zero(&rb->mmap_count)) + if (!refcount_inc_not_zero(&rb->mmap_count)) return -EINVAL; =20 /* If mapped, attach to it */ if (rb_has_aux(rb)) { - atomic_inc(&rb->aux_mmap_count); + refcount_inc(&rb->aux_mmap_count); return 0; } =20 if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) { - atomic_dec(&rb->mmap_count); + refcount_dec(&rb->mmap_count); return -EPERM; } =20 @@ -7091,14 +7091,14 @@ static int perf_mmap_aux(struct vm_area_ ret =3D rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, event->attr.aux_watermark, rb_flags); if (ret) { - atomic_dec(&rb->mmap_count); + refcount_dec(&rb->mmap_count); return ret; } =20 - atomic_set(&rb->aux_mmap_count, 1); + refcount_set(&rb->aux_mmap_count, 1); rb->aux_mmap_locked =3D extra; perf_mmap_account(vma, user_extra, extra); - atomic_inc(&event->mmap_count); + refcount_inc(&event->mmap_count); return 0; } =20 @@ -13247,7 +13247,7 @@ perf_event_set_output(struct perf_event mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); set: /* Can't redirect output if we've got an active mmap() */ - if (atomic_read(&event->mmap_count)) + if (refcount_read(&event->mmap_count)) goto unlock; =20 if (output_event) { @@ -13260,7 +13260,7 @@ perf_event_set_output(struct perf_event goto unlock; =20 /* did we race against perf_mmap_close() */ - if (!atomic_read(&rb->mmap_count)) { + if (!refcount_read(&rb->mmap_count)) { ring_buffer_put(rb); goto unlock; } --- a/kernel/events/internal.h +++ b/kernel/events/internal.h @@ -35,7 +35,7 @@ struct perf_buffer { spinlock_t event_lock; struct list_head event_list; =20 - atomic_t mmap_count; + refcount_t mmap_count; unsigned long mmap_locked; struct user_struct *mmap_user; =20 @@ -47,7 +47,7 @@ struct perf_buffer { unsigned long aux_pgoff; int aux_nr_pages; int aux_overwrite; - atomic_t aux_mmap_count; + refcount_t aux_mmap_count; unsigned long aux_mmap_locked; void (*free_aux)(void *); refcount_t aux_refcount; --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -400,7 +400,7 @@ void *perf_aux_output_begin(struct perf_ * the same order, see perf_mmap_close. Otherwise we end up freeing * aux pages in this path, which is a bug, because in_atomic(). */ - if (!atomic_read(&rb->aux_mmap_count)) + if (!refcount_read(&rb->aux_mmap_count)) goto err; =20 if (!refcount_inc_not_zero(&rb->aux_refcount))