The arch lbr state has 2 dependencies:
* Arch lbr feature bit (CPUID 0x7.0x0:EDX[bit 19]):
This bit also depends on pmu property. Mask it off if pmu is disabled
in x86_cpu_expand_features(), so that it is not needed to repeatedly
check whether this bit is set as well as pmu is enabled.
Note this doesn't need compat option, since even KVM hasn't support
arch lbr yet.
The supported xstate is constructed based such dependency in
cpuid_has_xsave_feature(), so if pmu is disabled and arch lbr bit is
masked off, then arch lbr state won't be included in supported
xstates.
Thus it's safe to drop the check on arch lbr bit in CPUID 0xD
encoding.
* XSAVES feature bit (CPUID 0xD.0x1.EAX[bit 3]):
Arch lbr state is a supervisor state, which requires the XSAVES
feature support. Enumerate supported supervisor state based on XSAVES
feature bit in x86_cpu_enable_xsave_components().
Then it's safe to drop the check on XSAVES feature support during
CPUID 0XD encoding.
Suggested-by: Zide Chen <zide.chen@intel.com>
Tested-by: Farrah Chen <farrah.chen@intel.com>
Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
---
target/i386/cpu.c | 22 ++++++++++------------
1 file changed, 10 insertions(+), 12 deletions(-)
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 236a2f3a9426..5b7a81fcdb1b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -8174,16 +8174,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = xsave_area_size(xstate, true);
*ecx = env->features[FEAT_XSAVE_XSS_LO];
*edx = env->features[FEAT_XSAVE_XSS_HI];
- if (kvm_enabled() && cpu->enable_pmu &&
- (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
- (*eax & CPUID_XSAVE_XSAVES)) {
- *ecx |= XSTATE_ARCH_LBR_MASK;
- } else {
- *ecx &= ~XSTATE_ARCH_LBR_MASK;
- }
- } else if (count == 0xf && cpu->enable_pmu
- && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
- x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
} else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
const ExtSaveArea *esa = &x86_ext_save_areas[count];
@@ -8902,6 +8892,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
mask = 0;
for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ /* Skip supervisor states if XSAVES is not supported. */
+ if (CPUID_XSTATE_XSS_MASK & (1 << i) &&
+ !(env->features[FEAT_XSAVE] & CPUID_XSAVE_XSAVES)) {
+ continue;
+ }
+
const ExtSaveArea *esa = &x86_ext_save_areas[i];
if (cpuid_has_xsave_feature(env, esa)) {
mask |= (1ULL << i);
@@ -9019,11 +9015,13 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
}
}
- if (!cpu->pdcm_on_even_without_pmu) {
+ if (!cpu->enable_pmu) {
/* PDCM is fixed1 bit for TDX */
- if (!cpu->enable_pmu && !is_tdx_vm()) {
+ if (!cpu->pdcm_on_even_without_pmu && !is_tdx_vm()) {
env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
}
+
+ env->features[FEAT_7_0_EDX] &= ~CPUID_7_0_EDX_ARCH_LBR;
}
for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
--
2.34.1
On 10/24/2025 2:56 PM, Zhao Liu wrote:
> The arch lbr state has 2 dependencies:
> * Arch lbr feature bit (CPUID 0x7.0x0:EDX[bit 19]):
>
> This bit also depends on pmu property. Mask it off if pmu is disabled
> in x86_cpu_expand_features(), so that it is not needed to repeatedly
> check whether this bit is set as well as pmu is enabled.
>
> Note this doesn't need compat option, since even KVM hasn't support
> arch lbr yet.
>
> The supported xstate is constructed based such dependency in
> cpuid_has_xsave_feature(), so if pmu is disabled and arch lbr bit is
> masked off, then arch lbr state won't be included in supported
> xstates.
>
> Thus it's safe to drop the check on arch lbr bit in CPUID 0xD
> encoding.
>
> * XSAVES feature bit (CPUID 0xD.0x1.EAX[bit 3]):
>
> Arch lbr state is a supervisor state, which requires the XSAVES
> feature support. Enumerate supported supervisor state based on XSAVES
> feature bit in x86_cpu_enable_xsave_components().
>
> Then it's safe to drop the check on XSAVES feature support during
> CPUID 0XD encoding.
>
> Suggested-by: Zide Chen <zide.chen@intel.com>
> Tested-by: Farrah Chen <farrah.chen@intel.com>
> Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
> ---
> target/i386/cpu.c | 22 ++++++++++------------
> 1 file changed, 10 insertions(+), 12 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 236a2f3a9426..5b7a81fcdb1b 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -8174,16 +8174,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> *ebx = xsave_area_size(xstate, true);
> *ecx = env->features[FEAT_XSAVE_XSS_LO];
> *edx = env->features[FEAT_XSAVE_XSS_HI];
> - if (kvm_enabled() && cpu->enable_pmu &&
> - (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
> - (*eax & CPUID_XSAVE_XSAVES)) {
> - *ecx |= XSTATE_ARCH_LBR_MASK;
> - } else {
> - *ecx &= ~XSTATE_ARCH_LBR_MASK;
> - }
> - } else if (count == 0xf && cpu->enable_pmu
> - && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
> - x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
This chunk needs to be a separate patch. It's a functional change.
> } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
> const ExtSaveArea *esa = &x86_ext_save_areas[count];
>
> @@ -8902,6 +8892,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
>
> mask = 0;
> for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
> + /* Skip supervisor states if XSAVES is not supported. */
> + if (CPUID_XSTATE_XSS_MASK & (1 << i) &&
> + !(env->features[FEAT_XSAVE] & CPUID_XSAVE_XSAVES)) {
> + continue;
> + }
> +
> const ExtSaveArea *esa = &x86_ext_save_areas[i];
> if (cpuid_has_xsave_feature(env, esa)) {
> mask |= (1ULL << i);
> @@ -9019,11 +9015,13 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
> }
> }
>
> - if (!cpu->pdcm_on_even_without_pmu) {
> + if (!cpu->enable_pmu) {
> /* PDCM is fixed1 bit for TDX */
> - if (!cpu->enable_pmu && !is_tdx_vm()) {
> + if (!cpu->pdcm_on_even_without_pmu && !is_tdx_vm()) {
> env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
> }
> +
> + env->features[FEAT_7_0_EDX] &= ~CPUID_7_0_EDX_ARCH_LBR;
> }
>
> for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
> > * XSAVES feature bit (CPUID 0xD.0x1.EAX[bit 3]):
> >
> > Arch lbr state is a supervisor state, which requires the XSAVES
> > feature support. Enumerate supported supervisor state based on XSAVES
> > feature bit in x86_cpu_enable_xsave_components().
> >
> > Then it's safe to drop the check on XSAVES feature support during
> > CPUID 0XD encoding.
...
> > +++ b/target/i386/cpu.c
> > @@ -8174,16 +8174,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> > *ebx = xsave_area_size(xstate, true);
> > *ecx = env->features[FEAT_XSAVE_XSS_LO];
> > *edx = env->features[FEAT_XSAVE_XSS_HI];
> > - if (kvm_enabled() && cpu->enable_pmu &&
> > - (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
> > - (*eax & CPUID_XSAVE_XSAVES)) {
> > - *ecx |= XSTATE_ARCH_LBR_MASK;
> > - } else {
> > - *ecx &= ~XSTATE_ARCH_LBR_MASK;
> > - }
>
> > - } else if (count == 0xf && cpu->enable_pmu
> > - && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
> > - x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
>
> This chunk needs to be a separate patch. It's a functional change.
Already mentioned this in commit message.
On 10/27/2025 6:12 PM, Zhao Liu wrote:
>>> * XSAVES feature bit (CPUID 0xD.0x1.EAX[bit 3]):
>>>
>>> Arch lbr state is a supervisor state, which requires the XSAVES
>>> feature support. Enumerate supported supervisor state based on XSAVES
>>> feature bit in x86_cpu_enable_xsave_components().
>>>
>>> Then it's safe to drop the check on XSAVES feature support during
>>> CPUID 0XD encoding.
>
> ...
>
>>> +++ b/target/i386/cpu.c
>>> @@ -8174,16 +8174,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
>>> *ebx = xsave_area_size(xstate, true);
>>> *ecx = env->features[FEAT_XSAVE_XSS_LO];
>>> *edx = env->features[FEAT_XSAVE_XSS_HI];
>>> - if (kvm_enabled() && cpu->enable_pmu &&
>>> - (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
>>> - (*eax & CPUID_XSAVE_XSAVES)) {
>>> - *ecx |= XSTATE_ARCH_LBR_MASK;
>>> - } else {
>>> - *ecx &= ~XSTATE_ARCH_LBR_MASK;
>>> - }
>>
>>> - } else if (count == 0xf && cpu->enable_pmu
>>> - && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
>>> - x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
>>
>> This chunk needs to be a separate patch. It's a functional change.
>
> Already mentioned this in commit message.
Before this patch, if pmu is enabled and ARCH_LBR is configured, the
leaf (0xd, 0xf) is constructed by
x86_cpu_get_supported_cpuid()
after this patch, it's constructed to
*eax = esa->size;
*ebx = 0;
*ecx = 1;
I'm not sure which part of the commit message mention it and clarify it.
On 10/23/2025 11:56 PM, Zhao Liu wrote:
> The arch lbr state has 2 dependencies:
> * Arch lbr feature bit (CPUID 0x7.0x0:EDX[bit 19]):
>
> This bit also depends on pmu property. Mask it off if pmu is disabled
> in x86_cpu_expand_features(), so that it is not needed to repeatedly
> check whether this bit is set as well as pmu is enabled.
>
> Note this doesn't need compat option, since even KVM hasn't support
> arch lbr yet.
>
> The supported xstate is constructed based such dependency in
> cpuid_has_xsave_feature(), so if pmu is disabled and arch lbr bit is
> masked off, then arch lbr state won't be included in supported
> xstates.
>
> Thus it's safe to drop the check on arch lbr bit in CPUID 0xD
> encoding.
>
> * XSAVES feature bit (CPUID 0xD.0x1.EAX[bit 3]):
>
> Arch lbr state is a supervisor state, which requires the XSAVES
> feature support. Enumerate supported supervisor state based on XSAVES
> feature bit in x86_cpu_enable_xsave_components().
>
> Then it's safe to drop the check on XSAVES feature support during
> CPUID 0XD encoding.
>
> Suggested-by: Zide Chen <zide.chen@intel.com>
> Tested-by: Farrah Chen <farrah.chen@intel.com>
> Signed-off-by: Zhao Liu <zhao1.liu@intel.com>
Reviewed-by: Zide Chen <zide.chen@intel.com>
> ---
> target/i386/cpu.c | 22 ++++++++++------------
> 1 file changed, 10 insertions(+), 12 deletions(-)
>
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index 236a2f3a9426..5b7a81fcdb1b 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -8174,16 +8174,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
> *ebx = xsave_area_size(xstate, true);
> *ecx = env->features[FEAT_XSAVE_XSS_LO];
> *edx = env->features[FEAT_XSAVE_XSS_HI];
> - if (kvm_enabled() && cpu->enable_pmu &&
> - (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR) &&
> - (*eax & CPUID_XSAVE_XSAVES)) {
> - *ecx |= XSTATE_ARCH_LBR_MASK;
> - } else {
> - *ecx &= ~XSTATE_ARCH_LBR_MASK;
> - }
> - } else if (count == 0xf && cpu->enable_pmu
> - && (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
> - x86_cpu_get_supported_cpuid(0xD, count, eax, ebx, ecx, edx);
> } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
> const ExtSaveArea *esa = &x86_ext_save_areas[count];
>
> @@ -8902,6 +8892,12 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu)
>
> mask = 0;
> for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
> + /* Skip supervisor states if XSAVES is not supported. */
> + if (CPUID_XSTATE_XSS_MASK & (1 << i) &&
> + !(env->features[FEAT_XSAVE] & CPUID_XSAVE_XSAVES)) {
> + continue;
> + }
> +
> const ExtSaveArea *esa = &x86_ext_save_areas[i];
> if (cpuid_has_xsave_feature(env, esa)) {
> mask |= (1ULL << i);
> @@ -9019,11 +9015,13 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
> }
> }
>
> - if (!cpu->pdcm_on_even_without_pmu) {
> + if (!cpu->enable_pmu) {
> /* PDCM is fixed1 bit for TDX */
> - if (!cpu->enable_pmu && !is_tdx_vm()) {
> + if (!cpu->pdcm_on_even_without_pmu && !is_tdx_vm()) {
> env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM;
> }
> +
> + env->features[FEAT_7_0_EDX] &= ~CPUID_7_0_EDX_ARCH_LBR;
> }
>
> for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) {
© 2016 - 2025 Red Hat, Inc.