From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Add separate macros for walking links to suppliers and consumers of a
device to help device links users to avoid exposing the internals of
struct dev_links_info in their code and possible coding mistakes related
to that.
Accordingly, use the new macros to replace open-coded device links list
walks in the core power management code.
No intentional functional impact.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
---
drivers/base/base.h | 8 ++++++++
drivers/base/power/main.c | 18 +++++++-----------
drivers/base/power/runtime.c | 3 +--
3 files changed, 16 insertions(+), 13 deletions(-)
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -251,6 +251,14 @@
void fw_devlink_drivers_done(void);
void fw_devlink_probing_done(void);
+#define dev_for_each_link_to_supplier(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
+ device_links_read_lock_held())
+
+#define dev_for_each_link_to_consumer(__link, __dev) \
+ list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
+ device_links_read_lock_held())
+
/* device pm support */
void device_pm_move_to_tail(struct device *dev);
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -40,10 +40,6 @@
typedef int (*pm_callback_t)(struct device *);
-#define list_for_each_entry_srcu_locked(pos, head, member) \
- list_for_each_entry_srcu(pos, head, member, \
- device_links_read_lock_held())
-
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -281,7 +277,7 @@
* callbacks freeing the link objects for the links in the list we're
* walking.
*/
- list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->supplier, async);
@@ -338,7 +334,7 @@
* continue instead of trying to continue in parallel with its
* unregistration).
*/
- list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_wait(link->consumer, async);
@@ -675,7 +671,7 @@
idx = device_links_read_lock();
/* Start processing the device's "async" consumers. */
- list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
+ dev_for_each_link_to_consumer(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->consumer, func);
@@ -1330,7 +1326,7 @@
idx = device_links_read_lock();
/* Start processing the device's "async" suppliers. */
- list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
if (READ_ONCE(link->status) != DL_STATE_DORMANT)
dpm_async_with_cleanup(link->supplier, func);
@@ -1384,7 +1380,7 @@
idx = device_links_read_lock();
- list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
+ dev_for_each_link_to_supplier(link, dev)
link->supplier->power.must_resume = true;
device_links_read_unlock(idx);
@@ -1813,7 +1809,7 @@
idx = device_links_read_lock();
- list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
spin_lock_irq(&link->supplier->power.lock);
link->supplier->power.direct_complete = false;
spin_unlock_irq(&link->supplier->power.lock);
@@ -2065,7 +2061,7 @@
idx = device_links_read_lock();
- list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
+ dev_for_each_link_to_supplier(link, dev) {
if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
continue;
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1903,8 +1903,7 @@
idx = device_links_read_lock();
- list_for_each_entry_srcu(link, &dev->links.suppliers, c_node,
- device_links_read_lock_held())
+ dev_for_each_link_to_supplier(link, dev)
if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
link->supplier_preactivated = true;
pm_runtime_get_sync(link->supplier);
On Tue, 2 Sept 2025 at 15:45, Rafael J. Wysocki <rafael@kernel.org> wrote:
>
> From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
>
> Add separate macros for walking links to suppliers and consumers of a
> device to help device links users to avoid exposing the internals of
> struct dev_links_info in their code and possible coding mistakes related
> to that.
>
> Accordingly, use the new macros to replace open-coded device links list
> walks in the core power management code.
>
> No intentional functional impact.
>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Reviewed-by: Ulf Hansson <ulf.hansson@linaro.org>
Kind regards
Uffe
> ---
> drivers/base/base.h | 8 ++++++++
> drivers/base/power/main.c | 18 +++++++-----------
> drivers/base/power/runtime.c | 3 +--
> 3 files changed, 16 insertions(+), 13 deletions(-)
>
> --- a/drivers/base/base.h
> +++ b/drivers/base/base.h
> @@ -251,6 +251,14 @@
> void fw_devlink_drivers_done(void);
> void fw_devlink_probing_done(void);
>
> +#define dev_for_each_link_to_supplier(__link, __dev) \
> + list_for_each_entry_srcu(__link, &(__dev)->links.suppliers, c_node, \
> + device_links_read_lock_held())
> +
> +#define dev_for_each_link_to_consumer(__link, __dev) \
> + list_for_each_entry_srcu(__link, &(__dev)->links.consumers, s_node, \
> + device_links_read_lock_held())
> +
> /* device pm support */
> void device_pm_move_to_tail(struct device *dev);
>
> --- a/drivers/base/power/main.c
> +++ b/drivers/base/power/main.c
> @@ -40,10 +40,6 @@
>
> typedef int (*pm_callback_t)(struct device *);
>
> -#define list_for_each_entry_srcu_locked(pos, head, member) \
> - list_for_each_entry_srcu(pos, head, member, \
> - device_links_read_lock_held())
> -
> /*
> * The entries in the dpm_list list are in a depth first order, simply
> * because children are guaranteed to be discovered after parents, and
> @@ -281,7 +277,7 @@
> * callbacks freeing the link objects for the links in the list we're
> * walking.
> */
> - list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
> + dev_for_each_link_to_supplier(link, dev)
> if (READ_ONCE(link->status) != DL_STATE_DORMANT)
> dpm_wait(link->supplier, async);
>
> @@ -338,7 +334,7 @@
> * continue instead of trying to continue in parallel with its
> * unregistration).
> */
> - list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
> + dev_for_each_link_to_consumer(link, dev)
> if (READ_ONCE(link->status) != DL_STATE_DORMANT)
> dpm_wait(link->consumer, async);
>
> @@ -675,7 +671,7 @@
> idx = device_links_read_lock();
>
> /* Start processing the device's "async" consumers. */
> - list_for_each_entry_srcu_locked(link, &dev->links.consumers, s_node)
> + dev_for_each_link_to_consumer(link, dev)
> if (READ_ONCE(link->status) != DL_STATE_DORMANT)
> dpm_async_with_cleanup(link->consumer, func);
>
> @@ -1330,7 +1326,7 @@
> idx = device_links_read_lock();
>
> /* Start processing the device's "async" suppliers. */
> - list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
> + dev_for_each_link_to_supplier(link, dev)
> if (READ_ONCE(link->status) != DL_STATE_DORMANT)
> dpm_async_with_cleanup(link->supplier, func);
>
> @@ -1384,7 +1380,7 @@
>
> idx = device_links_read_lock();
>
> - list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node)
> + dev_for_each_link_to_supplier(link, dev)
> link->supplier->power.must_resume = true;
>
> device_links_read_unlock(idx);
> @@ -1813,7 +1809,7 @@
>
> idx = device_links_read_lock();
>
> - list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
> + dev_for_each_link_to_supplier(link, dev) {
> spin_lock_irq(&link->supplier->power.lock);
> link->supplier->power.direct_complete = false;
> spin_unlock_irq(&link->supplier->power.lock);
> @@ -2065,7 +2061,7 @@
>
> idx = device_links_read_lock();
>
> - list_for_each_entry_srcu_locked(link, &dev->links.suppliers, c_node) {
> + dev_for_each_link_to_supplier(link, dev) {
> if (!device_link_test(link, DL_FLAG_PM_RUNTIME))
> continue;
>
> --- a/drivers/base/power/runtime.c
> +++ b/drivers/base/power/runtime.c
> @@ -1903,8 +1903,7 @@
>
> idx = device_links_read_lock();
>
> - list_for_each_entry_srcu(link, &dev->links.suppliers, c_node,
> - device_links_read_lock_held())
> + dev_for_each_link_to_supplier(link, dev)
> if (device_link_test(link, DL_FLAG_PM_RUNTIME)) {
> link->supplier_preactivated = true;
> pm_runtime_get_sync(link->supplier);
>
>
>
© 2016 - 2026 Red Hat, Inc.