drivers/cpufreq/intel_pstate.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-)
From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
On some hybrid platforms a group of cores (referred to as a module) may
share an L2 cache in which case they also share a voltage regulator and
always run at the same frequency (while not in idle states).
For this reason, make hybrid_register_perf_domain() in the intel_pstate
driver add all CPUs sharing an L2 cache to the same perf domain for EAS.
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
---
New in v1.
---
drivers/cpufreq/intel_pstate.c | 23 +++++++++++++++++++++--
1 file changed, 21 insertions(+), 2 deletions(-)
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -999,8 +999,11 @@
{
static const struct em_data_callback cb
= EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
+ struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
+ const struct cpumask *cpumask = cpumask_of(cpu);
struct cpudata *cpudata = all_cpu_data[cpu];
struct device *cpu_dev;
+ int ret;
/*
* Registering EM perf domains without enabling asymmetric CPU capacity
@@ -1014,9 +1017,25 @@
if (!cpu_dev)
return false;
- if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
- cpumask_of(cpu), false))
+ if (cacheinfo) {
+ unsigned int i;
+
+ /* Find the L2 cache and the CPUs sharing it. */
+ for (i = 0; i < cacheinfo->num_leaves; i++) {
+ if (cacheinfo->info_list[i].level == 2) {
+ cpumask = &cacheinfo->info_list[i].shared_cpu_map;
+ break;
+ }
+ }
+ }
+
+ ret = em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
+ cpumask, false);
+ if (ret) {
+ cpudata->em_registered = ret == -EEXIST;
+
return false;
+ }
cpudata->em_registered = true;
On 16/04/2025 20:10, Rafael J. Wysocki wrote:
> From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
>
> On some hybrid platforms a group of cores (referred to as a module) may
> share an L2 cache in which case they also share a voltage regulator and
> always run at the same frequency (while not in idle states).
>
> For this reason, make hybrid_register_perf_domain() in the intel_pstate
> driver add all CPUs sharing an L2 cache to the same perf domain for EAS.
>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> ---
>
> New in v1.
>
> ---
> drivers/cpufreq/intel_pstate.c | 23 +++++++++++++++++++++--
> 1 file changed, 21 insertions(+), 2 deletions(-)
>
> --- a/drivers/cpufreq/intel_pstate.c
> +++ b/drivers/cpufreq/intel_pstate.c
> @@ -999,8 +999,11 @@
> {
> static const struct em_data_callback cb
> = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
> + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
> + const struct cpumask *cpumask = cpumask_of(cpu);
> struct cpudata *cpudata = all_cpu_data[cpu];
> struct device *cpu_dev;
> + int ret;
>
> /*
> * Registering EM perf domains without enabling asymmetric CPU capacity
> @@ -1014,9 +1017,25 @@
> if (!cpu_dev)
> return false;
>
> - if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> - cpumask_of(cpu), false))
> + if (cacheinfo) {
> + unsigned int i;
> +
> + /* Find the L2 cache and the CPUs sharing it. */
> + for (i = 0; i < cacheinfo->num_leaves; i++) {
> + if (cacheinfo->info_list[i].level == 2) {
> + cpumask = &cacheinfo->info_list[i].shared_cpu_map;
> + break;
> + }
> + }
> + }
> +
> + ret = em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> + cpumask, false);
> + if (ret) {
> + cpudata->em_registered = ret == -EEXIST;
> +
> return false;
> + }
>
> cpudata->em_registered = true;
There seems to be an issue with late CPU-hotplug-in and this alignment
on L2 cache boundaries.
Example:
i7-13700K [P-cores: [0,2,4,6,8,10,12,14] E-cores: [16-23] w/ 'nosmt' and
'maxcpus=12', [16-19] & [20-23] share L2 cache.
root:/sys/kernel/debug/energy_model# cat cpu*/cpus
0
10
12
14
16-19
2
4
6
8
# echo 1 > /sys/devices/system/cpu/cpu20/online
...
[ 496.616050] root_domain 0,2,4,6,8,10,12,14,16-20: pd20:{ cpus=20
nr_pstate=4 } pd16:{ cpus=16-19 nr_pstate=4 } pd14:{ cpus=14 nr_pstate=4
} pd12:{ cpus=12 nr_pstate=4 } pd10:{ cpus=10 nr_pstate=4 } pd8:{ cpus=8
nr_pstate=4 } pd6:{ cpus=6 nr_pstate=4 } pd4:{ cpus=4 nr_pstate=4 }
pd2:{ cpus=2 nr_pstate=4 } pd0:{ cpus=0 nr_pstate=4 }
...
root:/sys/kernel/debug/energy_model# cat cpu*/cpus
0
10
12
14
16-19
20
2
4
6
8
# echo 1 > /sys/devices/system/cpu/cpu21/online
...
[ 589.001256] root domain span: 0,2,4,6,8,10,12,14,16-21
[ 589.001265] pd_init: no EM found for CPU21
[ 589.001266] sched_energy_set: stopping EAS
...
root:/sys/kernel/debug/energy_model# cat cpu*/cpus
0
10
12
14
16-19
20
2
4
6
8
On Sun, Apr 27, 2025 at 6:23 PM Dietmar Eggemann
<dietmar.eggemann@arm.com> wrote:
>
> On 16/04/2025 20:10, Rafael J. Wysocki wrote:
> > From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> >
> > On some hybrid platforms a group of cores (referred to as a module) may
> > share an L2 cache in which case they also share a voltage regulator and
> > always run at the same frequency (while not in idle states).
> >
> > For this reason, make hybrid_register_perf_domain() in the intel_pstate
> > driver add all CPUs sharing an L2 cache to the same perf domain for EAS.
> >
> > Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> > ---
> >
> > New in v1.
> >
> > ---
> > drivers/cpufreq/intel_pstate.c | 23 +++++++++++++++++++++--
> > 1 file changed, 21 insertions(+), 2 deletions(-)
> >
> > --- a/drivers/cpufreq/intel_pstate.c
> > +++ b/drivers/cpufreq/intel_pstate.c
> > @@ -999,8 +999,11 @@
> > {
> > static const struct em_data_callback cb
> > = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
> > + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
> > + const struct cpumask *cpumask = cpumask_of(cpu);
> > struct cpudata *cpudata = all_cpu_data[cpu];
> > struct device *cpu_dev;
> > + int ret;
> >
> > /*
> > * Registering EM perf domains without enabling asymmetric CPU capacity
> > @@ -1014,9 +1017,25 @@
> > if (!cpu_dev)
> > return false;
> >
> > - if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> > - cpumask_of(cpu), false))
> > + if (cacheinfo) {
> > + unsigned int i;
> > +
> > + /* Find the L2 cache and the CPUs sharing it. */
> > + for (i = 0; i < cacheinfo->num_leaves; i++) {
> > + if (cacheinfo->info_list[i].level == 2) {
> > + cpumask = &cacheinfo->info_list[i].shared_cpu_map;
> > + break;
> > + }
> > + }
> > + }
> > +
> > + ret = em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> > + cpumask, false);
> > + if (ret) {
> > + cpudata->em_registered = ret == -EEXIST;
> > +
> > return false;
> > + }
> >
> > cpudata->em_registered = true;
>
> There seems to be an issue with late CPU-hotplug-in and this alignment
> on L2 cache boundaries.
>
> Example:
>
> i7-13700K [P-cores: [0,2,4,6,8,10,12,14] E-cores: [16-23] w/ 'nosmt' and
> 'maxcpus=12', [16-19] & [20-23] share L2 cache.
>
> root:/sys/kernel/debug/energy_model# cat cpu*/cpus
> 0
> 10
> 12
> 14
> 16-19
> 2
> 4
> 6
> 8
>
> # echo 1 > /sys/devices/system/cpu/cpu20/online
>
> ...
> [ 496.616050] root_domain 0,2,4,6,8,10,12,14,16-20: pd20:{ cpus=20
> nr_pstate=4 } pd16:{ cpus=16-19 nr_pstate=4 } pd14:{ cpus=14 nr_pstate=4
> } pd12:{ cpus=12 nr_pstate=4 } pd10:{ cpus=10 nr_pstate=4 } pd8:{ cpus=8
> nr_pstate=4 } pd6:{ cpus=6 nr_pstate=4 } pd4:{ cpus=4 nr_pstate=4 }
> pd2:{ cpus=2 nr_pstate=4 } pd0:{ cpus=0 nr_pstate=4 }
> ...
>
> root:/sys/kernel/debug/energy_model# cat cpu*/cpus
> 0
> 10
> 12
> 14
> 16-19
> 20
> 2
> 4
> 6
> 8
>
> # echo 1 > /sys/devices/system/cpu/cpu21/online
>
> ...
> [ 589.001256] root domain span: 0,2,4,6,8,10,12,14,16-21
> [ 589.001265] pd_init: no EM found for CPU21
> [ 589.001266] sched_energy_set: stopping EAS
> ...
>
> root:/sys/kernel/debug/energy_model# cat cpu*/cpus
> 0
> 10
> 12
> 14
> 16-19
> 20
> 2
> 4
> 6
> 8
I see.
What happens is that cpu_cacheinfo hides information on offline CPUs,
so when CPU20 goes online, it doesn't see any other CPUs sharing the
L2 with it. Accordingly, a PD is created just for itself.
When CPU21 goes online, it sees that CPU20 shares the L2 with it, so
the code attempts to create a PD for them both which fails.
This could be addressed, but the code would need to be a bit more
complex and the current hardware seems to do better with a PD per CPU,
so I'll drop the $subject patch for now.
On 30/04/2025 21:29, Rafael J. Wysocki wrote: > On Sun, Apr 27, 2025 at 6:23 PM Dietmar Eggemann > <dietmar.eggemann@arm.com> wrote: >> >> On 16/04/2025 20:10, Rafael J. Wysocki wrote: >>> From: Rafael J. Wysocki <rafael.j.wysocki@intel.com> [...] >> There seems to be an issue with late CPU-hotplug-in and this alignment >> on L2 cache boundaries. [...] > I see. > > What happens is that cpu_cacheinfo hides information on offline CPUs, > so when CPU20 goes online, it doesn't see any other CPUs sharing the > L2 with it. Accordingly, a PD is created just for itself. > > When CPU21 goes online, it sees that CPU20 shares the L2 with it, so > the code attempts to create a PD for them both which fails. > > This could be addressed, but the code would need to be a bit more > complex and the current hardware seems to do better with a PD per CPU, > so I'll drop the $subject patch for now. Ah OK, thanks!
On 4/16/25 19:10, Rafael J. Wysocki wrote:
> From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
>
> On some hybrid platforms a group of cores (referred to as a module) may
> share an L2 cache in which case they also share a voltage regulator and
> always run at the same frequency (while not in idle states).
>
> For this reason, make hybrid_register_perf_domain() in the intel_pstate
> driver add all CPUs sharing an L2 cache to the same perf domain for EAS.
>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> ---
>
> New in v1.
>
> ---
> drivers/cpufreq/intel_pstate.c | 23 +++++++++++++++++++++--
> 1 file changed, 21 insertions(+), 2 deletions(-)
>
> --- a/drivers/cpufreq/intel_pstate.c
> +++ b/drivers/cpufreq/intel_pstate.c
> @@ -999,8 +999,11 @@
> {
> static const struct em_data_callback cb
> = EM_ADV_DATA_CB(hybrid_active_power, hybrid_get_cost);
> + struct cpu_cacheinfo *cacheinfo = get_cpu_cacheinfo(cpu);
> + const struct cpumask *cpumask = cpumask_of(cpu);
> struct cpudata *cpudata = all_cpu_data[cpu];
> struct device *cpu_dev;
> + int ret;
>
> /*
> * Registering EM perf domains without enabling asymmetric CPU capacity
> @@ -1014,9 +1017,25 @@
> if (!cpu_dev)
> return false;
>
> - if (em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> - cpumask_of(cpu), false))
> + if (cacheinfo) {
> + unsigned int i;
> +
> + /* Find the L2 cache and the CPUs sharing it. */
> + for (i = 0; i < cacheinfo->num_leaves; i++) {
> + if (cacheinfo->info_list[i].level == 2) {
> + cpumask = &cacheinfo->info_list[i].shared_cpu_map;
> + break;
> + }
> + }
> + }
> +
> + ret = em_dev_register_perf_domain(cpu_dev, HYBRID_EM_STATE_COUNT, &cb,
> + cpumask, false);
> + if (ret) {
> + cpudata->em_registered = ret == -EEXIST;
> +
> return false;
> + }
>
> cpudata->em_registered = true;
>
>
debugfs already provides a way to retrieve that information, but with more
complex perf domain constructions like here maybe this would be useful
(maybe it already is):
--->8---
Subject: [PATCH] PM: EM: Print CPUs of perf domains
In preparation for future EAS users who make the relation from CPU
to perf-domain not strictly based on cpufreq policies print the
affected CPUs when registering a perf-domain.
Signed-off-by: Christian Loehle <christian.loehle@arm.com>
---
kernel/power/energy_model.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c
index 99a1ae324c2d..a202968b2ee9 100644
--- a/kernel/power/energy_model.c
+++ b/kernel/power/energy_model.c
@@ -627,7 +627,7 @@ int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
em_cpufreq_update_efficiencies(dev, em_table->state);
em_debug_create_pd(dev);
- dev_info(dev, "EM: created perf domain\n");
+ dev_info(dev, "EM: created perf domain for CPUs %*pbl\n", cpumask_pr_args(cpus));
unlock:
mutex_unlock(&em_pd_mutex);
--
2.34.1
© 2016 - 2025 Red Hat, Inc.