[PATCH] cpufreq: fix locking order in store_local_boost to prevent deadlock

Seyediman Seyedarab posted 1 patch 9 months, 2 weeks ago
There is a newer version of this series
drivers/cpufreq/cpufreq.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
[PATCH] cpufreq: fix locking order in store_local_boost to prevent deadlock
Posted by Seyediman Seyedarab 9 months, 2 weeks ago
Lockdep reports a possible circular locking dependency[1] when
writing to /sys/devices/system/cpu/cpufreq/policyN/boost,
triggered by power-profiles-daemon at boot.

store_local_boost() acquires cpu_hotplug_lock *AFTER* policy->rwsem
has already been taken by the store() handler. However, the expected
locking hierarchy is to acquire cpu_hotplug_lock before policy->rwsem.
This inverted lock order creates a *theoretical* deadlock possibility.

Take cpu_hotplug_lock in the store() before down_write(&policy->rwsem),
and remove the internal cpus_read_lock/unlock pair
inside store_local_boost().

 [1]
 ======================================================
 WARNING: possible circular locking dependency detected
 6.15.0-rc3-debug #28 Not tainted
 ------------------------------------------------------
 power-profiles-/596 is trying to acquire lock:
 ffffffffb147e910 (cpu_hotplug_lock){++++}-{0:0}, at: store_local_boost+0x6a/0xd0

 but task is already holding lock:
 ffff9eaa48377b80 (&policy->rwsem){++++}-{4:4}, at: store+0x37/0x90

 which lock already depends on the new lock.

 the existing dependency chain (in reverse order) is:

 -> #2 (&policy->rwsem){++++}-{4:4}:
        down_write+0x29/0xb0
        cpufreq_online+0x841/0xa00
        cpufreq_add_dev+0x71/0x80
        subsys_interface_register+0x14b/0x170
        cpufreq_register_driver+0x154/0x250
        amd_pstate_register_driver+0x36/0x70
        amd_pstate_init+0x1e7/0x270
        do_one_initcall+0x67/0x2c0
        kernel_init_freeable+0x230/0x270
        kernel_init+0x15/0x130
        ret_from_fork+0x2c/0x50
        ret_from_fork_asm+0x11/0x20

 -> #1 (subsys mutex#3){+.+.}-{4:4}:
        __mutex_lock+0xc2/0x930
        subsys_interface_register+0x83/0x170
        cpufreq_register_driver+0x154/0x250
        amd_pstate_register_driver+0x36/0x70
        amd_pstate_init+0x1e7/0x270
        do_one_initcall+0x67/0x2c0
        kernel_init_freeable+0x230/0x270
        kernel_init+0x15/0x130
        ret_from_fork+0x2c/0x50
        ret_from_fork_asm+0x11/0x20

 -> #0 (cpu_hotplug_lock){++++}-{0:0}:
        __lock_acquire+0x1087/0x17e0
        lock_acquire.part.0+0x66/0x1b0
        cpus_read_lock+0x2a/0xc0
        store_local_boost+0x6a/0xd0
        store+0x50/0x90
        kernfs_fop_write_iter+0x135/0x200
        vfs_write+0x2ab/0x540
        ksys_write+0x6c/0xe0
        do_syscall_64+0xbb/0x1d0
        entry_SYSCALL_64_after_hwframe+0x56/0x5e

Signed-off-by: Seyediman Seyedarab <ImanDevel@gmail.com>
---
 drivers/cpufreq/cpufreq.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index acf19b004..6e672dcba 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -653,10 +653,7 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
 
 	policy->boost_enabled = enable;
 
-	cpus_read_lock();
 	ret = cpufreq_driver->set_boost(policy, enable);
-	cpus_read_unlock();
-
 	if (ret) {
 		policy->boost_enabled = !policy->boost_enabled;
 		return ret;
@@ -1045,10 +1042,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
 	if (!fattr->store)
 		return -EIO;
 
+	cpus_read_lock();
 	down_write(&policy->rwsem);
 	if (likely(!policy_is_inactive(policy)))
 		ret = fattr->store(policy, buf, count);
 	up_write(&policy->rwsem);
+	cpus_read_unlock();
 
 	return ret;
 }
-- 
2.49.0
Re: [PATCH] cpufreq: fix locking order in store_local_boost to prevent deadlock
Posted by Rafael J. Wysocki 9 months, 2 weeks ago
On Mon, Apr 28, 2025 at 6:31 AM Seyediman Seyedarab <imandevel@gmail.com> wrote:
>
> Lockdep reports a possible circular locking dependency[1] when
> writing to /sys/devices/system/cpu/cpufreq/policyN/boost,
> triggered by power-profiles-daemon at boot.
>
> store_local_boost() acquires cpu_hotplug_lock *AFTER* policy->rwsem
> has already been taken by the store() handler. However, the expected
> locking hierarchy is to acquire cpu_hotplug_lock before policy->rwsem.
> This inverted lock order creates a *theoretical* deadlock possibility.
>
> Take cpu_hotplug_lock in the store() before down_write(&policy->rwsem),
> and remove the internal cpus_read_lock/unlock pair
> inside store_local_boost().

The patch does more than this, though.  It adds CPU offline/online
locking to multiple cpufreq sysfs attributes where it is not needed.

>
>  [1]
>  ======================================================
>  WARNING: possible circular locking dependency detected
>  6.15.0-rc3-debug #28 Not tainted
>  ------------------------------------------------------
>  power-profiles-/596 is trying to acquire lock:
>  ffffffffb147e910 (cpu_hotplug_lock){++++}-{0:0}, at: store_local_boost+0x6a/0xd0
>
>  but task is already holding lock:
>  ffff9eaa48377b80 (&policy->rwsem){++++}-{4:4}, at: store+0x37/0x90
>
>  which lock already depends on the new lock.
>
>  the existing dependency chain (in reverse order) is:
>
>  -> #2 (&policy->rwsem){++++}-{4:4}:
>         down_write+0x29/0xb0
>         cpufreq_online+0x841/0xa00
>         cpufreq_add_dev+0x71/0x80
>         subsys_interface_register+0x14b/0x170
>         cpufreq_register_driver+0x154/0x250
>         amd_pstate_register_driver+0x36/0x70
>         amd_pstate_init+0x1e7/0x270
>         do_one_initcall+0x67/0x2c0
>         kernel_init_freeable+0x230/0x270
>         kernel_init+0x15/0x130
>         ret_from_fork+0x2c/0x50
>         ret_from_fork_asm+0x11/0x20
>
>  -> #1 (subsys mutex#3){+.+.}-{4:4}:
>         __mutex_lock+0xc2/0x930
>         subsys_interface_register+0x83/0x170
>         cpufreq_register_driver+0x154/0x250
>         amd_pstate_register_driver+0x36/0x70
>         amd_pstate_init+0x1e7/0x270
>         do_one_initcall+0x67/0x2c0
>         kernel_init_freeable+0x230/0x270
>         kernel_init+0x15/0x130
>         ret_from_fork+0x2c/0x50
>         ret_from_fork_asm+0x11/0x20
>
>  -> #0 (cpu_hotplug_lock){++++}-{0:0}:
>         __lock_acquire+0x1087/0x17e0
>         lock_acquire.part.0+0x66/0x1b0
>         cpus_read_lock+0x2a/0xc0
>         store_local_boost+0x6a/0xd0
>         store+0x50/0x90
>         kernfs_fop_write_iter+0x135/0x200
>         vfs_write+0x2ab/0x540
>         ksys_write+0x6c/0xe0
>         do_syscall_64+0xbb/0x1d0
>         entry_SYSCALL_64_after_hwframe+0x56/0x5e
>
> Signed-off-by: Seyediman Seyedarab <ImanDevel@gmail.com>
> ---
>  drivers/cpufreq/cpufreq.c | 5 ++---
>  1 file changed, 2 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
> index acf19b004..6e672dcba 100644
> --- a/drivers/cpufreq/cpufreq.c
> +++ b/drivers/cpufreq/cpufreq.c
> @@ -653,10 +653,7 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
>
>         policy->boost_enabled = enable;
>
> -       cpus_read_lock();
>         ret = cpufreq_driver->set_boost(policy, enable);
> -       cpus_read_unlock();
> -
>         if (ret) {
>                 policy->boost_enabled = !policy->boost_enabled;
>                 return ret;
> @@ -1045,10 +1042,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
>         if (!fattr->store)
>                 return -EIO;
>
> +       cpus_read_lock();
>         down_write(&policy->rwsem);
>         if (likely(!policy_is_inactive(policy)))
>                 ret = fattr->store(policy, buf, count);
>         up_write(&policy->rwsem);
> +       cpus_read_unlock();

So you'd need to do this for local_boost only, not for all attributes
using store().

>
>         return ret;
>  }
> --