[PATCH 2/8] x86/itmt: Use guard() for itmt_update_mutex

K Prateek Nayak posted 8 patches 1 year ago
There is a newer version of this series
[PATCH 2/8] x86/itmt: Use guard() for itmt_update_mutex
Posted by K Prateek Nayak 1 year ago
Use guard() for itmt_update_mutex which avoids the extra mutex_unlock()
in the bailout and return paths.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
---
 arch/x86/kernel/itmt.c | 29 ++++++++---------------------
 1 file changed, 8 insertions(+), 21 deletions(-)

diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
index 28f449123d68..ee43d1bd41d0 100644
--- a/arch/x86/kernel/itmt.c
+++ b/arch/x86/kernel/itmt.c
@@ -44,12 +44,10 @@ static int sched_itmt_update_handler(const struct ctl_table *table, int write,
 	unsigned int old_sysctl;
 	int ret;
 
-	mutex_lock(&itmt_update_mutex);
+	guard(mutex)(&itmt_update_mutex);
 
-	if (!sched_itmt_capable) {
-		mutex_unlock(&itmt_update_mutex);
+	if (!sched_itmt_capable)
 		return -EINVAL;
-	}
 
 	old_sysctl = sysctl_sched_itmt_enabled;
 	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
@@ -59,8 +57,6 @@ static int sched_itmt_update_handler(const struct ctl_table *table, int write,
 		rebuild_sched_domains();
 	}
 
-	mutex_unlock(&itmt_update_mutex);
-
 	return ret;
 }
 
@@ -97,18 +93,14 @@ static struct ctl_table_header *itmt_sysctl_header;
  */
 int sched_set_itmt_support(void)
 {
-	mutex_lock(&itmt_update_mutex);
+	guard(mutex)(&itmt_update_mutex);
 
-	if (sched_itmt_capable) {
-		mutex_unlock(&itmt_update_mutex);
+	if (sched_itmt_capable)
 		return 0;
-	}
 
 	itmt_sysctl_header = register_sysctl("kernel", itmt_kern_table);
-	if (!itmt_sysctl_header) {
-		mutex_unlock(&itmt_update_mutex);
+	if (!itmt_sysctl_header)
 		return -ENOMEM;
-	}
 
 	sched_itmt_capable = true;
 
@@ -117,8 +109,6 @@ int sched_set_itmt_support(void)
 	x86_topology_update = true;
 	rebuild_sched_domains();
 
-	mutex_unlock(&itmt_update_mutex);
-
 	return 0;
 }
 
@@ -134,12 +124,11 @@ int sched_set_itmt_support(void)
  */
 void sched_clear_itmt_support(void)
 {
-	mutex_lock(&itmt_update_mutex);
+	guard(mutex)(&itmt_update_mutex);
 
-	if (!sched_itmt_capable) {
-		mutex_unlock(&itmt_update_mutex);
+	if (!sched_itmt_capable)
 		return;
-	}
+
 	sched_itmt_capable = false;
 
 	if (itmt_sysctl_header) {
@@ -153,8 +142,6 @@ void sched_clear_itmt_support(void)
 		x86_topology_update = true;
 		rebuild_sched_domains();
 	}
-
-	mutex_unlock(&itmt_update_mutex);
 }
 
 int arch_asym_cpu_priority(int cpu)
-- 
2.34.1
Re: [PATCH 2/8] x86/itmt: Use guard() for itmt_update_mutex
Posted by Tim Chen 1 year ago
On Wed, 2024-12-11 at 18:55 +0000, K Prateek Nayak wrote:
> Use guard() for itmt_update_mutex which avoids the extra mutex_unlock()
> in the bailout and return paths.

Reviewed-by: Tim Chen <tim.c.chen@linux.intel.com>

Tim

> 
> Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
> ---
>  arch/x86/kernel/itmt.c | 29 ++++++++---------------------
>  1 file changed, 8 insertions(+), 21 deletions(-)
> 
> diff --git a/arch/x86/kernel/itmt.c b/arch/x86/kernel/itmt.c
> index 28f449123d68..ee43d1bd41d0 100644
> --- a/arch/x86/kernel/itmt.c
> +++ b/arch/x86/kernel/itmt.c
> @@ -44,12 +44,10 @@ static int sched_itmt_update_handler(const struct ctl_table *table, int write,
>  	unsigned int old_sysctl;
>  	int ret;
>  
> -	mutex_lock(&itmt_update_mutex);
> +	guard(mutex)(&itmt_update_mutex);
>  
> -	if (!sched_itmt_capable) {
> -		mutex_unlock(&itmt_update_mutex);
> +	if (!sched_itmt_capable)
>  		return -EINVAL;
> -	}
>  
>  	old_sysctl = sysctl_sched_itmt_enabled;
>  	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
> @@ -59,8 +57,6 @@ static int sched_itmt_update_handler(const struct ctl_table *table, int write,
>  		rebuild_sched_domains();
>  	}
>  
> -	mutex_unlock(&itmt_update_mutex);
> -
>  	return ret;
>  }
>  
> @@ -97,18 +93,14 @@ static struct ctl_table_header *itmt_sysctl_header;
>   */
>  int sched_set_itmt_support(void)
>  {
> -	mutex_lock(&itmt_update_mutex);
> +	guard(mutex)(&itmt_update_mutex);
>  
> -	if (sched_itmt_capable) {
> -		mutex_unlock(&itmt_update_mutex);
> +	if (sched_itmt_capable)
>  		return 0;
> -	}
>  
>  	itmt_sysctl_header = register_sysctl("kernel", itmt_kern_table);
> -	if (!itmt_sysctl_header) {
> -		mutex_unlock(&itmt_update_mutex);
> +	if (!itmt_sysctl_header)
>  		return -ENOMEM;
> -	}
>  
>  	sched_itmt_capable = true;
>  
> @@ -117,8 +109,6 @@ int sched_set_itmt_support(void)
>  	x86_topology_update = true;
>  	rebuild_sched_domains();
>  
> -	mutex_unlock(&itmt_update_mutex);
> -
>  	return 0;
>  }
>  
> @@ -134,12 +124,11 @@ int sched_set_itmt_support(void)
>   */
>  void sched_clear_itmt_support(void)
>  {
> -	mutex_lock(&itmt_update_mutex);
> +	guard(mutex)(&itmt_update_mutex);
>  
> -	if (!sched_itmt_capable) {
> -		mutex_unlock(&itmt_update_mutex);
> +	if (!sched_itmt_capable)
>  		return;
> -	}
> +
>  	sched_itmt_capable = false;
>  
>  	if (itmt_sysctl_header) {
> @@ -153,8 +142,6 @@ void sched_clear_itmt_support(void)
>  		x86_topology_update = true;
>  		rebuild_sched_domains();
>  	}
> -
> -	mutex_unlock(&itmt_update_mutex);
>  }
>  
>  int arch_asym_cpu_priority(int cpu)