Sapphire Rapids have no model-specific LBRs, and instead only expose
architectural LBRs. As documented in the Architectural Last Branch
Records specification, processors not supporting model-specific LBRs
MSR_IA32_DEBUGCTLMSR.LBR has no meaning, and can be written to 0 or 1,
but reads will always return 0.
Implement support in vmx_msr_write_intercept() by adding generic
detection of lack of model-specific LBRs by checking if the LBR format
reported in PERF_CAPABILITIES matches 0x3f, which is explicitly listed
in the manual as a way to signal lack of model-specific LBRs
presence.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Note the indentation change in vmx_msr_write_intercept() as a result
of the addition of a new condition is left for a following patch in
order to aid readability of the change.
---
xen/arch/x86/hvm/vmx/vmx.c | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index cf428a4849..3f45ac05c6 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3007,6 +3007,8 @@ static const struct lbr_info {
{ MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
{ MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
{ 0, 0 }
+}, no_lbr[] = {
+ {0, 0}
};
static const struct lbr_info *last_branch_msr_get(void)
@@ -3070,6 +3072,21 @@ static const struct lbr_info *last_branch_msr_get(void)
/* Goldmont */
case 0x5c: case 0x5f:
return gm_lbr;
+
+ default:
+ if ( cpu_has_pdcm )
+ {
+ uint64_t cap;
+
+ rdmsrl(MSR_IA32_PERF_CAPABILITIES, cap);
+ if ( (cap & MSR_IA32_PERF_CAP_LBR_FORMAT) == 0x3f )
+ /*
+ * On processors that do not support model-specific LBRs,
+ * PERF_CAPABILITIES.LBR_FMT will have the value 0x3f.
+ */
+ return no_lbr;
+ }
+ break;
}
break;
@@ -3521,6 +3538,8 @@ static int cf_check vmx_msr_write_intercept(
return X86EMUL_OKAY;
}
+ if ( lbr->count )
+ {
for ( ; lbr->count; lbr++ )
{
unsigned int i;
@@ -3546,6 +3565,10 @@ static int cf_check vmx_msr_write_intercept(
v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_TSX;
if ( ler_to_fixup_needed )
v->arch.hvm.vmx.lbr_flags |= LBR_FIXUP_LER_TO;
+ }
+ else
+ /* No model specific LBRs, ignore DEBUGCTLMSR.LBR. */
+ msr_content &= ~IA32_DEBUGCTLMSR_LBR;
}
__vmwrite(GUEST_IA32_DEBUGCTL, msr_content);
--
2.36.0
On 20.05.2022 15:37, Roger Pau Monne wrote:
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -3007,6 +3007,8 @@ static const struct lbr_info {
> { MSR_GM_LASTBRANCH_0_FROM_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
> { MSR_GM_LASTBRANCH_0_TO_IP, NUM_MSR_GM_LASTBRANCH_FROM_TO },
> { 0, 0 }
> +}, no_lbr[] = {
> + {0, 0}
> };
Instead of introducing this and ...
> @@ -3070,6 +3072,21 @@ static const struct lbr_info *last_branch_msr_get(void)
> /* Goldmont */
> case 0x5c: case 0x5f:
> return gm_lbr;
> +
> + default:
> + if ( cpu_has_pdcm )
> + {
> + uint64_t cap;
> +
> + rdmsrl(MSR_IA32_PERF_CAPABILITIES, cap);
> + if ( (cap & MSR_IA32_PERF_CAP_LBR_FORMAT) == 0x3f )
> + /*
> + * On processors that do not support model-specific LBRs,
> + * PERF_CAPABILITIES.LBR_FMT will have the value 0x3f.
> + */
> + return no_lbr;
... doing this MSR read every time, can't you store a mask value
once during boot, which you apply to msr_content ...
> @@ -3521,6 +3538,8 @@ static int cf_check vmx_msr_write_intercept(
> return X86EMUL_OKAY;
> }
>
> + if ( lbr->count )
> + {
> for ( ; lbr->count; lbr++ )
> {
> unsigned int i;
... ahead of the bigger if() enclosing this code (thus also avoiding
the need to re-indent)?
Jan
© 2016 - 2026 Red Hat, Inc.