arch/x86/events/core.c | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-)
Using guard(mutex) and scoped_guard() instead of mutex_lock/mutex_unlock
pair. Simplifies the error handling to just return in case of error. No
need for the fail_unlock: label anymore so remove it.
Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
---
arch/x86/events/core.c | 21 +++++++--------------
1 file changed, 7 insertions(+), 14 deletions(-)
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 745caa6c15a3..107bed5c9d71 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -411,7 +411,7 @@ int x86_reserve_hardware(void)
int err = 0;
if (!atomic_inc_not_zero(&pmc_refcount)) {
- mutex_lock(&pmc_reserve_mutex);
+ guard(mutex)(&pmc_reserve_mutex);
if (atomic_read(&pmc_refcount) == 0) {
if (!reserve_pmc_hardware()) {
err = -EBUSY;
@@ -422,7 +422,6 @@ int x86_reserve_hardware(void)
}
if (!err)
atomic_inc(&pmc_refcount);
- mutex_unlock(&pmc_reserve_mutex);
}
return err;
@@ -444,8 +443,6 @@ void x86_release_hardware(void)
*/
int x86_add_exclusive(unsigned int what)
{
- int i;
-
/*
* When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
* LBR and BTS are still mutually exclusive.
@@ -454,22 +451,18 @@ int x86_add_exclusive(unsigned int what)
goto out;
if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
- mutex_lock(&pmc_reserve_mutex);
- for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
- if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
- goto fail_unlock;
+ scoped_guard(mutex, &pmc_reserve_mutex) {
+ for (int i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
+ if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
+ return -EBUSY;
+ }
+ atomic_inc(&x86_pmu.lbr_exclusive[what]);
}
- atomic_inc(&x86_pmu.lbr_exclusive[what]);
- mutex_unlock(&pmc_reserve_mutex);
}
out:
atomic_inc(&active_events);
return 0;
-
-fail_unlock:
- mutex_unlock(&pmc_reserve_mutex);
- return -EBUSY;
}
void x86_del_exclusive(unsigned int what)
--
2.34.1
On 2025-08-29 4:48 a.m., Liao Yuanhong wrote:
> Using guard(mutex) and scoped_guard() instead of mutex_lock/mutex_unlock
> pair. Simplifies the error handling to just return in case of error. No
> need for the fail_unlock: label anymore so remove it.
>
> Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
> ---
> arch/x86/events/core.c | 21 +++++++--------------
> 1 file changed, 7 insertions(+), 14 deletions(-)
>
> diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
> index 745caa6c15a3..107bed5c9d71 100644
> --- a/arch/x86/events/core.c
> +++ b/arch/x86/events/core.c
> @@ -411,7 +411,7 @@ int x86_reserve_hardware(void)
> int err = 0;
>
> if (!atomic_inc_not_zero(&pmc_refcount)) {
> - mutex_lock(&pmc_reserve_mutex);
> + guard(mutex)(&pmc_reserve_mutex);
Shouldn't it be a scoped_guard() as well?
Thanks,
Kan
> if (atomic_read(&pmc_refcount) == 0) {
> if (!reserve_pmc_hardware()) {
> err = -EBUSY;
> @@ -422,7 +422,6 @@ int x86_reserve_hardware(void)
> }
> if (!err)
> atomic_inc(&pmc_refcount);
> - mutex_unlock(&pmc_reserve_mutex);
> }
>
> return err;
> @@ -444,8 +443,6 @@ void x86_release_hardware(void)
> */
> int x86_add_exclusive(unsigned int what)
> {
> - int i;
> -
> /*
> * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
> * LBR and BTS are still mutually exclusive.
> @@ -454,22 +451,18 @@ int x86_add_exclusive(unsigned int what)
> goto out;
>
> if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
> - mutex_lock(&pmc_reserve_mutex);
> - for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
> - if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
> - goto fail_unlock;
> + scoped_guard(mutex, &pmc_reserve_mutex) {
> + for (int i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
> + if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
> + return -EBUSY;
> + }
> + atomic_inc(&x86_pmu.lbr_exclusive[what]);
> }
> - atomic_inc(&x86_pmu.lbr_exclusive[what]);
> - mutex_unlock(&pmc_reserve_mutex);
> }
>
> out:
> atomic_inc(&active_events);
> return 0;
> -
> -fail_unlock:
> - mutex_unlock(&pmc_reserve_mutex);
> - return -EBUSY;
> }
>
> void x86_del_exclusive(unsigned int what)
© 2016 - 2026 Red Hat, Inc.