After commit ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf"),
on AMD processors that support extended CPUID leaf 0x80000026, the
topology_die_cpumask() and topology_logical_die_id() macros, no longer
return the package cpumask and package id, instead they return the CCD
(Core Complex Die) mask and id respectively. This leads to the energy-pkg
event scope to be modified to CCD instead of package.
Replacing these macros with their package counterparts fixes the
energy-pkg event for AMD CPUs.
However due to the difference between the scope of energy-pkg event for
Intel and AMD CPUs, we have to replace these macros conditionally only for
AMD CPUs.
On a 12 CCD 1 Package AMD Zen4 Genoa machine:
Before:
$ cat /sys/devices/power/cpumask
0,8,16,24,32,40,48,56,64,72,80,88.
The expected cpumask here is supposed to be just "0", as it is a package
scope event, only one CPU will be collecting the event for all the CPUs in
the package.
After:
$ cat /sys/devices/power/cpumask
0
Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
---
Changes in v4:
* Invert the pkg scope check in init_rapl_pmus() (Peter)
* Add comments to explain the pkg scope check (Peter)
PS: Scope check logic is still kept the same (i.e., all Intel systems being
considered as die scope), Rui will be modifying it to limit the die-scope
only to Cascadelake-AP in a future patch on top of this patchset.
---
arch/x86/events/rapl.c | 39 ++++++++++++++++++++++++++++++++++-----
1 file changed, 34 insertions(+), 5 deletions(-)
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 0c5e7a7c43ac..df71f38ad98d 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -103,6 +103,13 @@ static struct perf_pmu_events_attr event_attr_##v = { \
.event_str = str, \
};
+/*
+ * RAPL PMU scope for AMD is package whereas for Intel it is die.
+ */
+#define rapl_pmu_is_pkg_scope() \
+ (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
+ boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
+
struct rapl_pmu {
raw_spinlock_t lock;
int n_active;
@@ -140,9 +147,25 @@ static unsigned int rapl_cntr_mask;
static u64 rapl_timer_ms;
static struct perf_msr *rapl_msrs;
+/*
+ * Helper functions to get the correct topology macros according to the
+ * RAPL PMU scope.
+ */
+static inline unsigned int get_rapl_pmu_idx(int cpu)
+{
+ return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
+ topology_logical_die_id(cpu);
+}
+
+static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
+{
+ return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
+ topology_die_cpumask(cpu);
+}
+
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
{
- unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
+ unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
/*
* The unsigned check also catches the '-1' return value for non
@@ -543,6 +566,7 @@ static struct perf_msr amd_rapl_msrs[] = {
static int rapl_cpu_offline(unsigned int cpu)
{
+ const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
@@ -552,7 +576,7 @@ static int rapl_cpu_offline(unsigned int cpu)
pmu->cpu = -1;
/* Find a new cpu to collect rapl events */
- target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
+ target = cpumask_any_but(rapl_pmu_cpumask, cpu);
/* Migrate rapl events to the new target */
if (target < nr_cpu_ids) {
@@ -565,6 +589,8 @@ static int rapl_cpu_offline(unsigned int cpu)
static int rapl_cpu_online(unsigned int cpu)
{
+ unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
+ const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
int target;
@@ -579,14 +605,14 @@ static int rapl_cpu_online(unsigned int cpu)
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
rapl_hrtimer_init(pmu);
- rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
+ rapl_pmus->pmus[rapl_pmu_idx] = pmu;
}
/*
* Check if there is an online cpu in the package which collects rapl
* events already.
*/
- target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
+ target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
if (target < nr_cpu_ids)
return 0;
@@ -675,7 +701,10 @@ static const struct attribute_group *rapl_attr_update[] = {
static int __init init_rapl_pmus(void)
{
- int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
+ int nr_rapl_pmu = topology_max_packages();
+
+ if (!rapl_pmu_is_pkg_scope())
+ nr_rapl_pmu *= topology_max_dies_per_package();
rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
if (!rapl_pmus)
--
2.34.1
Hello Peter, Rui,
After Ian's comments on the series, I have decided to rethink the approach of
adding a new PMU for the per-core RAPL counters.
However this patch is still needed as a fix for the
commit ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf"), I will be
sending this separately along with a similar fix for powercap/intel_rapl_common.
Thanks,
Dhananjay
On 7/11/2024 3:54 PM, Dhananjay Ugwekar wrote:
> After commit ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf"),
> on AMD processors that support extended CPUID leaf 0x80000026, the
> topology_die_cpumask() and topology_logical_die_id() macros, no longer
> return the package cpumask and package id, instead they return the CCD
> (Core Complex Die) mask and id respectively. This leads to the energy-pkg
> event scope to be modified to CCD instead of package.
>
> Replacing these macros with their package counterparts fixes the
> energy-pkg event for AMD CPUs.
>
> However due to the difference between the scope of energy-pkg event for
> Intel and AMD CPUs, we have to replace these macros conditionally only for
> AMD CPUs.
>
> On a 12 CCD 1 Package AMD Zen4 Genoa machine:
>
> Before:
> $ cat /sys/devices/power/cpumask
> 0,8,16,24,32,40,48,56,64,72,80,88.
>
> The expected cpumask here is supposed to be just "0", as it is a package
> scope event, only one CPU will be collecting the event for all the CPUs in
> the package.
>
> After:
> $ cat /sys/devices/power/cpumask
> 0
>
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
> Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD 0x80000026 leaf")
> ---
> Changes in v4:
> * Invert the pkg scope check in init_rapl_pmus() (Peter)
> * Add comments to explain the pkg scope check (Peter)
>
> PS: Scope check logic is still kept the same (i.e., all Intel systems being
> considered as die scope), Rui will be modifying it to limit the die-scope
> only to Cascadelake-AP in a future patch on top of this patchset.
> ---
> arch/x86/events/rapl.c | 39 ++++++++++++++++++++++++++++++++++-----
> 1 file changed, 34 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
> index 0c5e7a7c43ac..df71f38ad98d 100644
> --- a/arch/x86/events/rapl.c
> +++ b/arch/x86/events/rapl.c
> @@ -103,6 +103,13 @@ static struct perf_pmu_events_attr event_attr_##v = { \
> .event_str = str, \
> };
>
> +/*
> + * RAPL PMU scope for AMD is package whereas for Intel it is die.
> + */
> +#define rapl_pmu_is_pkg_scope() \
> + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
> + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
> +
> struct rapl_pmu {
> raw_spinlock_t lock;
> int n_active;
> @@ -140,9 +147,25 @@ static unsigned int rapl_cntr_mask;
> static u64 rapl_timer_ms;
> static struct perf_msr *rapl_msrs;
>
> +/*
> + * Helper functions to get the correct topology macros according to the
> + * RAPL PMU scope.
> + */
> +static inline unsigned int get_rapl_pmu_idx(int cpu)
> +{
> + return rapl_pmu_is_pkg_scope() ? topology_logical_package_id(cpu) :
> + topology_logical_die_id(cpu);
> +}
> +
> +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
> +{
> + return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
> + topology_die_cpumask(cpu);
> +}
> +
> static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
> {
> - unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>
> /*
> * The unsigned check also catches the '-1' return value for non
> @@ -543,6 +566,7 @@ static struct perf_msr amd_rapl_msrs[] = {
>
> static int rapl_cpu_offline(unsigned int cpu)
> {
> + const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> @@ -552,7 +576,7 @@ static int rapl_cpu_offline(unsigned int cpu)
>
> pmu->cpu = -1;
> /* Find a new cpu to collect rapl events */
> - target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
> + target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>
> /* Migrate rapl events to the new target */
> if (target < nr_cpu_ids) {
> @@ -565,6 +589,8 @@ static int rapl_cpu_offline(unsigned int cpu)
>
> static int rapl_cpu_online(unsigned int cpu)
> {
> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
> + const struct cpumask *rapl_pmu_cpumask = get_rapl_pmu_cpumask(cpu);
> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> @@ -579,14 +605,14 @@ static int rapl_cpu_online(unsigned int cpu)
> pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
> rapl_hrtimer_init(pmu);
>
> - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
> + rapl_pmus->pmus[rapl_pmu_idx] = pmu;
> }
>
> /*
> * Check if there is an online cpu in the package which collects rapl
> * events already.
> */
> - target = cpumask_any_and(&rapl_cpu_mask, topology_die_cpumask(cpu));
> + target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
> if (target < nr_cpu_ids)
> return 0;
>
> @@ -675,7 +701,10 @@ static const struct attribute_group *rapl_attr_update[] = {
>
> static int __init init_rapl_pmus(void)
> {
> - int nr_rapl_pmu = topology_max_packages() * topology_max_dies_per_package();
> + int nr_rapl_pmu = topology_max_packages();
> +
> + if (!rapl_pmu_is_pkg_scope())
> + nr_rapl_pmu *= topology_max_dies_per_package();
>
> rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus, nr_rapl_pmu), GFP_KERNEL);
> if (!rapl_pmus)
On Thu, 2024-07-11 at 10:24 +0000, Dhananjay Ugwekar wrote:
> After commit ("x86/cpu/topology: Add support for the AMD 0x80000026
> leaf"),
> on AMD processors that support extended CPUID leaf 0x80000026, the
> topology_die_cpumask() and topology_logical_die_id() macros, no
> longer
> return the package cpumask and package id, instead they return the
> CCD
> (Core Complex Die) mask and id respectively. This leads to the
> energy-pkg
> event scope to be modified to CCD instead of package.
>
> Replacing these macros with their package counterparts fixes the
> energy-pkg event for AMD CPUs.
>
> However due to the difference between the scope of energy-pkg event
> for
> Intel and AMD CPUs, we have to replace these macros conditionally
> only for
> AMD CPUs.
>
> On a 12 CCD 1 Package AMD Zen4 Genoa machine:
>
> Before:
> $ cat /sys/devices/power/cpumask
> 0,8,16,24,32,40,48,56,64,72,80,88.
>
> The expected cpumask here is supposed to be just "0", as it is a
> package
> scope event, only one CPU will be collecting the event for all the
> CPUs in
> the package.
>
> After:
> $ cat /sys/devices/power/cpumask
> 0
>
> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
> Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD
> 0x80000026 leaf")
You still missed my Reviewed-by tag for this one. :)
https://lore.kernel.org/all/e1f70a09f85dbd0ee3f32dffea37993e141269d0.camel@intel.com/
thanks,
rui
> ---
> Changes in v4:
> * Invert the pkg scope check in init_rapl_pmus() (Peter)
> * Add comments to explain the pkg scope check (Peter)
>
> PS: Scope check logic is still kept the same (i.e., all Intel systems
> being
> considered as die scope), Rui will be modifying it to limit the die-
> scope
> only to Cascadelake-AP in a future patch on top of this patchset.
> ---
> arch/x86/events/rapl.c | 39 ++++++++++++++++++++++++++++++++++-----
> 1 file changed, 34 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
> index 0c5e7a7c43ac..df71f38ad98d 100644
> --- a/arch/x86/events/rapl.c
> +++ b/arch/x86/events/rapl.c
> @@ -103,6 +103,13 @@ static struct perf_pmu_events_attr
> event_attr_##v = { \
> .event_str =
> str, \
> };
>
> +/*
> + * RAPL PMU scope for AMD is package whereas for Intel it is die.
> + */
> +#define rapl_pmu_is_pkg_scope() \
> + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
> + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
> +
> struct rapl_pmu {
> raw_spinlock_t lock;
> int n_active;
> @@ -140,9 +147,25 @@ static unsigned int rapl_cntr_mask;
> static u64 rapl_timer_ms;
> static struct perf_msr *rapl_msrs;
>
> +/*
> + * Helper functions to get the correct topology macros according to
> the
> + * RAPL PMU scope.
> + */
> +static inline unsigned int get_rapl_pmu_idx(int cpu)
> +{
> + return rapl_pmu_is_pkg_scope() ?
> topology_logical_package_id(cpu) :
> +
> topology_logical_die_id(cpu);
> +}
> +
> +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
> +{
> + return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
> + topology_die_cpumask(cpu);
> +}
> +
> static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
> {
> - unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>
> /*
> * The unsigned check also catches the '-1' return value for
> non
> @@ -543,6 +566,7 @@ static struct perf_msr amd_rapl_msrs[] = {
>
> static int rapl_cpu_offline(unsigned int cpu)
> {
> + const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> @@ -552,7 +576,7 @@ static int rapl_cpu_offline(unsigned int cpu)
>
> pmu->cpu = -1;
> /* Find a new cpu to collect rapl events */
> - target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
> + target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>
> /* Migrate rapl events to the new target */
> if (target < nr_cpu_ids) {
> @@ -565,6 +589,8 @@ static int rapl_cpu_offline(unsigned int cpu)
>
> static int rapl_cpu_online(unsigned int cpu)
> {
> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
> + const struct cpumask *rapl_pmu_cpumask =
> get_rapl_pmu_cpumask(cpu);
> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
> int target;
>
> @@ -579,14 +605,14 @@ static int rapl_cpu_online(unsigned int cpu)
> pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
> rapl_hrtimer_init(pmu);
>
> - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
> + rapl_pmus->pmus[rapl_pmu_idx] = pmu;
> }
>
> /*
> * Check if there is an online cpu in the package which
> collects rapl
> * events already.
> */
> - target = cpumask_any_and(&rapl_cpu_mask,
> topology_die_cpumask(cpu));
> + target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
> if (target < nr_cpu_ids)
> return 0;
>
> @@ -675,7 +701,10 @@ static const struct attribute_group
> *rapl_attr_update[] = {
>
> static int __init init_rapl_pmus(void)
> {
> - int nr_rapl_pmu = topology_max_packages() *
> topology_max_dies_per_package();
> + int nr_rapl_pmu = topology_max_packages();
> +
> + if (!rapl_pmu_is_pkg_scope())
> + nr_rapl_pmu *= topology_max_dies_per_package();
>
> rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus,
> nr_rapl_pmu), GFP_KERNEL);
> if (!rapl_pmus)
Hello Rui,
On 7/12/2024 7:34 AM, Zhang, Rui wrote:
> On Thu, 2024-07-11 at 10:24 +0000, Dhananjay Ugwekar wrote:
>> After commit ("x86/cpu/topology: Add support for the AMD 0x80000026
>> leaf"),
>> on AMD processors that support extended CPUID leaf 0x80000026, the
>> topology_die_cpumask() and topology_logical_die_id() macros, no
>> longer
>> return the package cpumask and package id, instead they return the
>> CCD
>> (Core Complex Die) mask and id respectively. This leads to the
>> energy-pkg
>> event scope to be modified to CCD instead of package.
>>
>> Replacing these macros with their package counterparts fixes the
>> energy-pkg event for AMD CPUs.
>>
>> However due to the difference between the scope of energy-pkg event
>> for
>> Intel and AMD CPUs, we have to replace these macros conditionally
>> only for
>> AMD CPUs.
>>
>> On a 12 CCD 1 Package AMD Zen4 Genoa machine:
>>
>> Before:
>> $ cat /sys/devices/power/cpumask
>> 0,8,16,24,32,40,48,56,64,72,80,88.
>>
>> The expected cpumask here is supposed to be just "0", as it is a
>> package
>> scope event, only one CPU will be collecting the event for all the
>> CPUs in
>> the package.
>>
>> After:
>> $ cat /sys/devices/power/cpumask
>> 0
>>
>> Signed-off-by: Dhananjay Ugwekar <Dhananjay.Ugwekar@amd.com>
>> Fixes: 63edbaa48a57 ("x86/cpu/topology: Add support for the AMD
>> 0x80000026 leaf")
>
> You still missed my Reviewed-by tag for this one. :)
> https://lore.kernel.org/all/e1f70a09f85dbd0ee3f32dffea37993e141269d0.camel@intel.com/
I didnt forget this time, just wanted to give you a chance to review the changes :) (although they are minimal)
Will surely add it in next version!
Thanks,
Dhananjay
>
> thanks,
> rui
>
>> ---
>> Changes in v4:
>> * Invert the pkg scope check in init_rapl_pmus() (Peter)
>> * Add comments to explain the pkg scope check (Peter)
>>
>> PS: Scope check logic is still kept the same (i.e., all Intel systems
>> being
>> considered as die scope), Rui will be modifying it to limit the die-
>> scope
>> only to Cascadelake-AP in a future patch on top of this patchset.
>> ---
>> arch/x86/events/rapl.c | 39 ++++++++++++++++++++++++++++++++++-----
>> 1 file changed, 34 insertions(+), 5 deletions(-)
>>
>> diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
>> index 0c5e7a7c43ac..df71f38ad98d 100644
>> --- a/arch/x86/events/rapl.c
>> +++ b/arch/x86/events/rapl.c
>> @@ -103,6 +103,13 @@ static struct perf_pmu_events_attr
>> event_attr_##v = { \
>> .event_str =
>> str, \
>> };
>>
>> +/*
>> + * RAPL PMU scope for AMD is package whereas for Intel it is die.
>> + */
>> +#define rapl_pmu_is_pkg_scope() \
>> + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || \
>> + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
>> +
>> struct rapl_pmu {
>> raw_spinlock_t lock;
>> int n_active;
>> @@ -140,9 +147,25 @@ static unsigned int rapl_cntr_mask;
>> static u64 rapl_timer_ms;
>> static struct perf_msr *rapl_msrs;
>>
>> +/*
>> + * Helper functions to get the correct topology macros according to
>> the
>> + * RAPL PMU scope.
>> + */
>> +static inline unsigned int get_rapl_pmu_idx(int cpu)
>> +{
>> + return rapl_pmu_is_pkg_scope() ?
>> topology_logical_package_id(cpu) :
>> +
>> topology_logical_die_id(cpu);
>> +}
>> +
>> +static inline const struct cpumask *get_rapl_pmu_cpumask(int cpu)
>> +{
>> + return rapl_pmu_is_pkg_scope() ? topology_core_cpumask(cpu) :
>> + topology_die_cpumask(cpu);
>> +}
>> +
>> static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
>> {
>> - unsigned int rapl_pmu_idx = topology_logical_die_id(cpu);
>> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>>
>> /*
>> * The unsigned check also catches the '-1' return value for
>> non
>> @@ -543,6 +566,7 @@ static struct perf_msr amd_rapl_msrs[] = {
>>
>> static int rapl_cpu_offline(unsigned int cpu)
>> {
>> + const struct cpumask *rapl_pmu_cpumask =
>> get_rapl_pmu_cpumask(cpu);
>> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>> int target;
>>
>> @@ -552,7 +576,7 @@ static int rapl_cpu_offline(unsigned int cpu)
>>
>> pmu->cpu = -1;
>> /* Find a new cpu to collect rapl events */
>> - target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
>> + target = cpumask_any_but(rapl_pmu_cpumask, cpu);
>>
>> /* Migrate rapl events to the new target */
>> if (target < nr_cpu_ids) {
>> @@ -565,6 +589,8 @@ static int rapl_cpu_offline(unsigned int cpu)
>>
>> static int rapl_cpu_online(unsigned int cpu)
>> {
>> + unsigned int rapl_pmu_idx = get_rapl_pmu_idx(cpu);
>> + const struct cpumask *rapl_pmu_cpumask =
>> get_rapl_pmu_cpumask(cpu);
>> struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
>> int target;
>>
>> @@ -579,14 +605,14 @@ static int rapl_cpu_online(unsigned int cpu)
>> pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
>> rapl_hrtimer_init(pmu);
>>
>> - rapl_pmus->pmus[topology_logical_die_id(cpu)] = pmu;
>> + rapl_pmus->pmus[rapl_pmu_idx] = pmu;
>> }
>>
>> /*
>> * Check if there is an online cpu in the package which
>> collects rapl
>> * events already.
>> */
>> - target = cpumask_any_and(&rapl_cpu_mask,
>> topology_die_cpumask(cpu));
>> + target = cpumask_any_and(&rapl_cpu_mask, rapl_pmu_cpumask);
>> if (target < nr_cpu_ids)
>> return 0;
>>
>> @@ -675,7 +701,10 @@ static const struct attribute_group
>> *rapl_attr_update[] = {
>>
>> static int __init init_rapl_pmus(void)
>> {
>> - int nr_rapl_pmu = topology_max_packages() *
>> topology_max_dies_per_package();
>> + int nr_rapl_pmu = topology_max_packages();
>> +
>> + if (!rapl_pmu_is_pkg_scope())
>> + nr_rapl_pmu *= topology_max_dies_per_package();
>>
>> rapl_pmus = kzalloc(struct_size(rapl_pmus, pmus,
>> nr_rapl_pmu), GFP_KERNEL);
>> if (!rapl_pmus)
>
© 2016 - 2025 Red Hat, Inc.