[PATCH 4/9] pmdomain: core: Add initial fine grained sync_state support

Ulf Hansson posted 9 patches 1 month, 1 week ago
[PATCH 4/9] pmdomain: core: Add initial fine grained sync_state support
Posted by Ulf Hansson 1 month, 1 week ago
A onecell (#power-domain-cells = <1 or 2>; in DT) power domain provider
typically provides multiple independent power domains, each with their own
corresponding consumers. In these cases we have to wait for all consumers
for all the provided power domains before the ->sync_state() callback gets
called for the supplier.

In a first step to improve this, let's implement support for fine grained
sync_state support a per genpd basis by using the ->queue_sync_state()
callback. To take step by step, let's initially limit the improvement to
the internal genpd provider driver and to its corresponding genpd devices
for onecell providers.

Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
---
 drivers/pmdomain/core.c   | 128 ++++++++++++++++++++++++++++++++++++++
 include/linux/pm_domain.h |   1 +
 2 files changed, 129 insertions(+)

diff --git a/drivers/pmdomain/core.c b/drivers/pmdomain/core.c
index ad57846f02a3..53401db2a386 100644
--- a/drivers/pmdomain/core.c
+++ b/drivers/pmdomain/core.c
@@ -2699,6 +2699,123 @@ static struct generic_pm_domain *genpd_get_from_provider(
 	return genpd;
 }
 
+static bool genpd_should_wait_for_consumer(struct device_node *np)
+{
+	struct generic_pm_domain *genpd;
+	bool should_wait = false;
+
+	mutex_lock(&gpd_list_lock);
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+		if (genpd->provider == of_fwnode_handle(np)) {
+			genpd_lock(genpd);
+
+			/* Clear the previous state before reevaluating. */
+			genpd->wait_for_consumer = false;
+
+			/*
+			 * Unless there is at least one genpd for the provider
+			 * that is being kept powered-on, we don't have to care
+			 * about waiting for consumers.
+			 */
+			if (genpd->stay_on)
+				should_wait = true;
+
+			genpd_unlock(genpd);
+		}
+	}
+	mutex_unlock(&gpd_list_lock);
+
+	return should_wait;
+}
+
+static void genpd_parse_for_consumer(struct device_node *sup,
+				     struct device_node *con)
+{
+	struct generic_pm_domain *genpd;
+	int i;
+
+	for (i = 0; ; i++) {
+		struct of_phandle_args pd_args;
+
+		if (of_parse_phandle_with_args(con, "power-domains",
+					       "#power-domain-cells",
+					        i, &pd_args))
+			break;
+
+		/*
+		 * The phandle must correspond to the supplier's genpd provider
+		 * to be relevant else let's move to the next index.
+		 */
+		if (sup != pd_args.np) {
+			of_node_put(pd_args.np);
+			continue;
+		}
+
+		mutex_lock(&gpd_list_lock);
+		genpd = genpd_get_from_provider(&pd_args);
+		if (!IS_ERR(genpd)) {
+			genpd_lock(genpd);
+			genpd->wait_for_consumer = true;
+			genpd_unlock(genpd);
+		}
+		mutex_unlock(&gpd_list_lock);
+
+		of_node_put(pd_args.np);
+	}
+}
+
+static void _genpd_queue_sync_state(struct device_node *np)
+{
+	struct generic_pm_domain *genpd;
+
+	mutex_lock(&gpd_list_lock);
+	list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+		if (genpd->provider == of_fwnode_handle(np)) {
+			genpd_lock(genpd);
+			if (genpd->stay_on && !genpd->wait_for_consumer) {
+				genpd->stay_on = false;
+				genpd_queue_power_off_work(genpd);
+			}
+			genpd_unlock(genpd);
+		}
+	}
+	mutex_unlock(&gpd_list_lock);
+}
+
+static void genpd_queue_sync_state(struct device *dev)
+{
+	struct device_node *np = dev->of_node;
+	struct device_link *link;
+
+	if (!genpd_should_wait_for_consumer(np))
+		return;
+
+	list_for_each_entry(link, &dev->links.consumers, s_node) {
+		struct device *consumer = link->consumer;
+
+		pr_info("%s:%s con=%s\n", __func__, dev_name(dev),
+			dev_name(consumer));
+
+		if (!device_link_test(link, DL_FLAG_MANAGED))
+			continue;
+
+		if (link->status == DL_STATE_ACTIVE)
+			continue;
+
+		if (!consumer->of_node)
+			continue;
+
+		/*
+		 * A consumer device has not been probed yet. Let's parse its
+		 * device node for the power-domains property, to find out the
+		 * genpds it may belong to and then prevent sync state for them.
+		 */
+		genpd_parse_for_consumer(np, consumer->of_node);
+	}
+
+	_genpd_queue_sync_state(np);
+}
+
 static void genpd_sync_state(struct device *dev)
 {
 	return of_genpd_sync_state(dev->of_node);
@@ -3531,6 +3648,16 @@ static int genpd_provider_probe(struct device *dev)
 	return 0;
 }
 
+static void genpd_provider_queue_sync_state(struct device *dev)
+{
+	struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
+
+	if (genpd->sync_state != GENPD_SYNC_STATE_ONECELL)
+		return;
+
+	genpd_queue_sync_state(dev);
+}
+
 static void genpd_provider_sync_state(struct device *dev)
 {
 	struct generic_pm_domain *genpd = container_of(dev, struct generic_pm_domain, dev);
@@ -3559,6 +3686,7 @@ static struct device_driver genpd_provider_drv = {
 	.name = "genpd_provider",
 	.bus = &genpd_provider_bus_type,
 	.probe = genpd_provider_probe,
+	.queue_sync_state = genpd_provider_queue_sync_state,
 	.sync_state = genpd_provider_sync_state,
 	.suppress_bind_attrs = true,
 };
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index b299dc0128d6..7aa49721cde5 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -215,6 +215,7 @@ struct generic_pm_domain {
 	cpumask_var_t cpus;		/* A cpumask of the attached CPUs */
 	bool synced_poweroff;		/* A consumer needs a synced poweroff */
 	bool stay_on;			/* Stay powered-on during boot. */
+	bool wait_for_consumer;		/* Consumers awaits to be probed. */
 	enum genpd_sync_state sync_state; /* How sync_state is managed. */
 	int (*power_off)(struct generic_pm_domain *domain);
 	int (*power_on)(struct generic_pm_domain *domain);
-- 
2.43.0
Re: [PATCH 4/9] pmdomain: core: Add initial fine grained sync_state support
Posted by Geert Uytterhoeven 2 weeks, 6 days ago
Hi Ulf,

Thanks for your patch!

On Tue, 3 Mar 2026 at 14:23, Ulf Hansson <ulf.hansson@linaro.org> wrote:
> A onecell (#power-domain-cells = <1 or 2>; in DT) power domain provider
> typically provides multiple independent power domains, each with their own
> corresponding consumers. In these cases we have to wait for all consumers
> for all the provided power domains before the ->sync_state() callback gets
> called for the supplier.
>
> In a first step to improve this, let's implement support for fine grained
> sync_state support a per genpd basis by using the ->queue_sync_state()

... support on a ...

> callback. To take step by step, let's initially limit the improvement to
> the internal genpd provider driver and to its corresponding genpd devices
> for onecell providers.
>
> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>

> --- a/drivers/pmdomain/core.c
> +++ b/drivers/pmdomain/core.c

> +static void genpd_queue_sync_state(struct device *dev)
> +{
> +       struct device_node *np = dev->of_node;
> +       struct device_link *link;
> +
> +       if (!genpd_should_wait_for_consumer(np))
> +               return;
> +
> +       list_for_each_entry(link, &dev->links.consumers, s_node) {
> +               struct device *consumer = link->consumer;
> +
> +               pr_info("%s:%s con=%s\n", __func__, dev_name(dev),
> +                       dev_name(consumer));

pr_debug? Or better, dev_dbg(), so you don't have to add dev_name(dev)
explicitly.

However, the printed provider name is again the name of the first
domain.  This is incorrect for all but the first domain of a onecell
genpd provider, and thus confusing.

> +
> +               if (!device_link_test(link, DL_FLAG_MANAGED))
> +                       continue;
> +
> +               if (link->status == DL_STATE_ACTIVE)
> +                       continue;
> +
> +               if (!consumer->of_node)
> +                       continue;
> +
> +               /*
> +                * A consumer device has not been probed yet. Let's parse its
> +                * device node for the power-domains property, to find out the
> +                * genpds it may belong to and then prevent sync state for them.
> +                */
> +               genpd_parse_for_consumer(np, consumer->of_node);
> +       }
> +
> +       _genpd_queue_sync_state(np);
> +}
> +
>  static void genpd_sync_state(struct device *dev)
>  {
>         return of_genpd_sync_state(dev->of_node);

Gr{oetje,eeting}s,

                        Geert

-- 
Geert Uytterhoeven -- There's lots of Linux beyond ia32 -- geert@linux-m68k.org

In personal conversations with technical people, I call myself a hacker. But
when I'm talking to journalists I just say "programmer" or something like that.
                                -- Linus Torvalds
Re: [PATCH 4/9] pmdomain: core: Add initial fine grained sync_state support
Posted by Ulf Hansson 2 weeks, 6 days ago
On Fri, 20 Mar 2026 at 10:29, Geert Uytterhoeven <geert@linux-m68k.org> wrote:
>
> Hi Ulf,
>
> Thanks for your patch!
>
> On Tue, 3 Mar 2026 at 14:23, Ulf Hansson <ulf.hansson@linaro.org> wrote:
> > A onecell (#power-domain-cells = <1 or 2>; in DT) power domain provider
> > typically provides multiple independent power domains, each with their own
> > corresponding consumers. In these cases we have to wait for all consumers
> > for all the provided power domains before the ->sync_state() callback gets
> > called for the supplier.
> >
> > In a first step to improve this, let's implement support for fine grained
> > sync_state support a per genpd basis by using the ->queue_sync_state()
>
> ... support on a ...
>
> > callback. To take step by step, let's initially limit the improvement to
> > the internal genpd provider driver and to its corresponding genpd devices
> > for onecell providers.
> >
> > Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
>
> > --- a/drivers/pmdomain/core.c
> > +++ b/drivers/pmdomain/core.c
>
> > +static void genpd_queue_sync_state(struct device *dev)
> > +{
> > +       struct device_node *np = dev->of_node;
> > +       struct device_link *link;
> > +
> > +       if (!genpd_should_wait_for_consumer(np))
> > +               return;
> > +
> > +       list_for_each_entry(link, &dev->links.consumers, s_node) {
> > +               struct device *consumer = link->consumer;
> > +
> > +               pr_info("%s:%s con=%s\n", __func__, dev_name(dev),
> > +                       dev_name(consumer));
>
> pr_debug? Or better, dev_dbg(), so you don't have to add dev_name(dev)
> explicitly.
>
> However, the printed provider name is again the name of the first
> domain.  This is incorrect for all but the first domain of a onecell
> genpd provider, and thus confusing.

I agree, I think we should drop the print. In fact, this is a
left-over from my debugging, I didn't intend for it to be part of the
submission.

Although, perhaps there is a need for pr_debug/dev_dbg somewhere, but
I think we can better add those in separate patches on top instead.

>
> > +
> > +               if (!device_link_test(link, DL_FLAG_MANAGED))
> > +                       continue;
> > +
> > +               if (link->status == DL_STATE_ACTIVE)
> > +                       continue;
> > +
> > +               if (!consumer->of_node)
> > +                       continue;
> > +
> > +               /*
> > +                * A consumer device has not been probed yet. Let's parse its
> > +                * device node for the power-domains property, to find out the
> > +                * genpds it may belong to and then prevent sync state for them.
> > +                */
> > +               genpd_parse_for_consumer(np, consumer->of_node);
> > +       }
> > +
> > +       _genpd_queue_sync_state(np);
> > +}
> > +
> >  static void genpd_sync_state(struct device *dev)
> >  {
> >         return of_genpd_sync_state(dev->of_node);
>

Thanks for reviewing!

Kind regards
Uffe