There's no need to invoke CPUID yet another time. This way two of the
static booleans can also go away.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/acpi/cpufreq/hwp.c
+++ b/xen/arch/x86/acpi/cpufreq/hwp.c
@@ -18,9 +18,6 @@
static bool __ro_after_init hwp_in_use;
-static bool __ro_after_init feature_hwp_notification;
-static bool __ro_after_init feature_hwp_activity_window;
-
static bool __read_mostly feature_hdc;
static bool __ro_after_init opt_cpufreq_hdc = true;
@@ -165,8 +162,6 @@ bool hwp_active(void)
static bool __init hwp_available(void)
{
- unsigned int eax;
-
if ( boot_cpu_data.cpuid_level < CPUID_PM_LEAF )
{
hwp_verbose("cpuid_level (%#x) lacks HWP support\n",
@@ -183,29 +178,25 @@ static bool __init hwp_available(void)
return false;
}
- eax = cpuid_eax(CPUID_PM_LEAF);
-
hwp_verbose("%d notify: %d act-window: %d energy-perf: %d pkg-level: %d peci: %d\n",
- !!(eax & CPUID6_EAX_HWP),
- !!(eax & CPUID6_EAX_HWP_NOTIFICATION),
- !!(eax & CPUID6_EAX_HWP_ACTIVITY_WINDOW),
- !!(eax & CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE),
- !!(eax & CPUID6_EAX_HWP_PACKAGE_LEVEL_REQUEST),
- !!(eax & CPUID6_EAX_HWP_PECI));
+ host_cpu_policy.basic.pm.hwp,
+ host_cpu_policy.basic.pm.hwp_notification,
+ host_cpu_policy.basic.pm.hwp_activity_window,
+ host_cpu_policy.basic.pm.hwp_epp,
+ host_cpu_policy.basic.pm.hwp_plr,
+ host_cpu_policy.basic.pm.hwp_peci);
- if ( !(eax & CPUID6_EAX_HWP) )
+ if ( !host_cpu_policy.basic.pm.hwp )
return false;
- if ( !(eax & CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE) )
+ if ( !host_cpu_policy.basic.pm.hwp_epp )
{
hwp_verbose("disabled: No energy/performance preference available");
return false;
}
- feature_hwp_notification = eax & CPUID6_EAX_HWP_NOTIFICATION;
- feature_hwp_activity_window = eax & CPUID6_EAX_HWP_ACTIVITY_WINDOW;
- feature_hdc = eax & CPUID6_EAX_HDC;
+ feature_hdc = host_cpu_policy.basic.pm.hdc;
hwp_verbose("Hardware Duty Cycling (HDC) %ssupported%s\n",
feature_hdc ? "" : "not ",
@@ -213,7 +204,7 @@ static bool __init hwp_available(void)
: "");
hwp_verbose("HW_FEEDBACK %ssupported\n",
- (eax & CPUID6_EAX_HW_FEEDBACK) ? "" : "not ");
+ host_cpu_policy.basic.pm.hw_feedback ? "" : "not ");
hwp_in_use = true;
@@ -226,7 +217,8 @@ static int cf_check hwp_cpufreq_verify(s
{
struct hwp_drv_data *data = per_cpu(hwp_drv_data, policy->cpu);
- if ( !feature_hwp_activity_window && data->activity_window )
+ if ( !host_cpu_policy.basic.pm.hwp_activity_window &&
+ data->activity_window )
{
hwp_verbose("HWP activity window not supported\n");
@@ -268,7 +260,7 @@ static int cf_check hwp_cpufreq_target(s
hwp_req.max_perf = data->maximum;
hwp_req.desired = data->desired;
hwp_req.energy_perf = data->energy_perf;
- if ( feature_hwp_activity_window )
+ if ( host_cpu_policy.basic.pm.hwp_activity_window )
hwp_req.activity_window = data->activity_window;
if ( hwp_req.raw == data->curr_req.raw )
@@ -365,7 +357,7 @@ static void cf_check hwp_init_msrs(void
}
/* Ensure we don't generate interrupts */
- if ( feature_hwp_notification )
+ if ( host_cpu_policy.basic.pm.hwp_notification )
wrmsr_safe(MSR_HWP_INTERRUPT, 0);
if ( !(val & PM_ENABLE_HWP_ENABLE) )
@@ -537,7 +529,8 @@ int get_hwp_para(unsigned int cpu,
return -ENODATA;
cppc_para->features =
- (feature_hwp_activity_window ? XEN_SYSCTL_CPPC_FEAT_ACT_WINDOW : 0);
+ (host_cpu_policy.basic.pm.hwp_activity_window
+ ? XEN_SYSCTL_CPPC_FEAT_ACT_WINDOW : 0);
cppc_para->lowest = data->hw.lowest;
cppc_para->lowest_nonlinear = data->hw.most_efficient;
cppc_para->nominal = data->hw.guaranteed;
@@ -585,7 +578,7 @@ int set_hwp_para(struct cpufreq_policy *
/* Clear out activity window if lacking HW supported. */
if ( (set_cppc->set_params & XEN_SYSCTL_CPPC_SET_ACT_WINDOW) &&
- !feature_hwp_activity_window )
+ !host_cpu_policy.basic.pm.hwp_activity_window )
{
set_cppc->set_params &= ~XEN_SYSCTL_CPPC_SET_ACT_WINDOW;
cleared_act_window = true;
--- a/xen/arch/x86/include/asm/cpufeature.h
+++ b/xen/arch/x86/include/asm/cpufeature.h
@@ -115,14 +115,6 @@ static inline bool boot_cpu_has(unsigned
}
#define CPUID_PM_LEAF 6
-#define CPUID6_EAX_HWP BIT(7, U)
-#define CPUID6_EAX_HWP_NOTIFICATION BIT(8, U)
-#define CPUID6_EAX_HWP_ACTIVITY_WINDOW BIT(9, U)
-#define CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE BIT(10, U)
-#define CPUID6_EAX_HWP_PACKAGE_LEVEL_REQUEST BIT(11, U)
-#define CPUID6_EAX_HDC BIT(13, U)
-#define CPUID6_EAX_HWP_PECI BIT(16, U)
-#define CPUID6_EAX_HW_FEEDBACK BIT(19, U)
/* CPUID level 0x00000001.edx */
#define cpu_has_fpu 1
On 18/11/2025 3:09 pm, Jan Beulich wrote:
> --- a/xen/arch/x86/acpi/cpufreq/hwp.c
> +++ b/xen/arch/x86/acpi/cpufreq/hwp.c
> @@ -183,29 +178,25 @@ static bool __init hwp_available(void)
> return false;
> }
>
> - eax = cpuid_eax(CPUID_PM_LEAF);
> -
> hwp_verbose("%d notify: %d act-window: %d energy-perf: %d pkg-level: %d peci: %d\n",
> - !!(eax & CPUID6_EAX_HWP),
> - !!(eax & CPUID6_EAX_HWP_NOTIFICATION),
> - !!(eax & CPUID6_EAX_HWP_ACTIVITY_WINDOW),
> - !!(eax & CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE),
> - !!(eax & CPUID6_EAX_HWP_PACKAGE_LEVEL_REQUEST),
> - !!(eax & CPUID6_EAX_HWP_PECI));
> + host_cpu_policy.basic.pm.hwp,
> + host_cpu_policy.basic.pm.hwp_notification,
> + host_cpu_policy.basic.pm.hwp_activity_window,
> + host_cpu_policy.basic.pm.hwp_epp,
> + host_cpu_policy.basic.pm.hwp_plr,
> + host_cpu_policy.basic.pm.hwp_peci);
>
> - if ( !(eax & CPUID6_EAX_HWP) )
> + if ( !host_cpu_policy.basic.pm.hwp )
I think this justifies a cpu_has_hwp like we have for turbo/arat/etc.
Similarly for the other features.
> return false;
>
> - if ( !(eax & CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE) )
> + if ( !host_cpu_policy.basic.pm.hwp_epp )
> {
> hwp_verbose("disabled: No energy/performance preference available");
>
> return false;
> }
>
> - feature_hwp_notification = eax & CPUID6_EAX_HWP_NOTIFICATION;
> - feature_hwp_activity_window = eax & CPUID6_EAX_HWP_ACTIVITY_WINDOW;
> - feature_hdc = eax & CPUID6_EAX_HDC;
> + feature_hdc = host_cpu_policy.basic.pm.hdc;
Looking at how feature_hdc is used, I think it should be the bit within
the host policy, rather than a separate boolean.
The host policy "is" what Xen is using, so if the HWP code decides it
doesn't like HDC, then that does want reflecting.
Unrelated to this patch, but as I've been looking at the code. What
happens when hwp_init_msrs() fails on an AP? I can't see anything which
unwinds the initialised HDC state on the prior CPUs...
> @@ -365,7 +357,7 @@ static void cf_check hwp_init_msrs(void
> }
>
> /* Ensure we don't generate interrupts */
> - if ( feature_hwp_notification )
> + if ( host_cpu_policy.basic.pm.hwp_notification )
> wrmsr_safe(MSR_HWP_INTERRUPT, 0);
Again, unrelated to the patch, but why is this a wrmsr_safe() ?
If the feature is enumerated, the MSR had better work.
Things like this start to matter more when we consider the code
generation for wrmsr_safe() using MSR_IMM.
> --- a/xen/arch/x86/include/asm/cpufeature.h
> +++ b/xen/arch/x86/include/asm/cpufeature.h
> @@ -115,14 +115,6 @@ static inline bool boot_cpu_has(unsigned
> }
>
> #define CPUID_PM_LEAF 6
Doesn't this patch drop the final user?
~Andrew
On 2025-11-18 15:04, Andrew Cooper wrote:
> On 18/11/2025 3:09 pm, Jan Beulich wrote:
>> --- a/xen/arch/x86/acpi/cpufreq/hwp.c
>> +++ b/xen/arch/x86/acpi/cpufreq/hwp.c
>> @@ -183,29 +178,25 @@ static bool __init hwp_available(void)
>> return false;
>>
>> - if ( !(eax & CPUID6_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE) )
>> + if ( !host_cpu_policy.basic.pm.hwp_epp )
>> {
>> hwp_verbose("disabled: No energy/performance preference available");
>>
>> return false;
>> }
>>
>> - feature_hwp_notification = eax & CPUID6_EAX_HWP_NOTIFICATION;
>> - feature_hwp_activity_window = eax & CPUID6_EAX_HWP_ACTIVITY_WINDOW;
>> - feature_hdc = eax & CPUID6_EAX_HDC;
>> + feature_hdc = host_cpu_policy.basic.pm.hdc;
>
> Looking at how feature_hdc is used, I think it should be the bit within
> the host policy, rather than a separate boolean.
>
> The host policy "is" what Xen is using, so if the HWP code decides it
> doesn't like HDC, then that does want reflecting.
>
> Unrelated to this patch, but as I've been looking at the code. What
> happens when hwp_init_msrs() fails on an AP? I can't see anything which
> unwinds the initialised HDC state on the prior CPUs...
Assuming symmetry, it'd fail on the BSP and never get to an AP. Yes, I
didn't consider unwinding.
>> @@ -365,7 +357,7 @@ static void cf_check hwp_init_msrs(void
>> }
>>
>> /* Ensure we don't generate interrupts */
>> - if ( feature_hwp_notification )
>> + if ( host_cpu_policy.basic.pm.hwp_notification )
>> wrmsr_safe(MSR_HWP_INTERRUPT, 0);
>
> Again, unrelated to the patch, but why is this a wrmsr_safe() ?
It seemed safer ;)
> If the feature is enumerated, the MSR had better work.
Yes, but I wasn't sure how much to trust it. I only tested with 2
processor models, so I wanted to be safer. IIRC, I saw a #GP or two
during development. Those might have been from writing a reserved bit
and fixed during development.
Given my small testing size, I wanted to keep it safe and not take down Xen.
Regards,
Jason
On 2025-11-18 10:09, Jan Beulich wrote: > There's no need to invoke CPUID yet another time. This way two of the > static booleans can also go away. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Reviewed-by: Jason Andryuk <jason.andryuk@amd.com> Thanks, Jason
© 2016 - 2025 Red Hat, Inc.