Dedup the handling of MSR_IA32_PRED_CMD across VMX and SVM by moving the
logic to kvm_set_msr_common(). Now that the MSR interception toggling is
handled as part of setting guest CPUID, the VMX and SVM paths are
identical.
Opportunistically massage the code to make it a wee bit denser.
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/kvm/svm/svm.c | 14 --------------
arch/x86/kvm/vmx/vmx.c | 14 --------------
arch/x86/kvm/x86.c | 11 +++++++++++
3 files changed, 11 insertions(+), 28 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f757b436ffae..85bb535fc321 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -2942,20 +2942,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
*/
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
break;
- case MSR_IA32_PRED_CMD:
- if (!msr->host_initiated &&
- !guest_has_pred_cmd_msr(vcpu))
- return 1;
-
- if (data & ~PRED_CMD_IBPB)
- return 1;
- if (!boot_cpu_has(X86_FEATURE_IBPB))
- return 1;
- if (!data)
- break;
-
- wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
- break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr->host_initiated &&
!guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 5c01c76c0d45..29807be219b9 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2285,20 +2285,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR))
return 1;
goto find_uret_msr;
- case MSR_IA32_PRED_CMD:
- if (!msr_info->host_initiated &&
- !guest_has_pred_cmd_msr(vcpu))
- return 1;
-
- if (data & ~PRED_CMD_IBPB)
- return 1;
- if (!boot_cpu_has(X86_FEATURE_IBPB))
- return 1;
- if (!data)
- break;
-
- wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
- break;
case MSR_IA32_CR_PAT:
if (!kvm_pat_valid(data))
return 1;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 237c483b1230..c83ec88da043 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3617,6 +3617,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vcpu->arch.perf_capabilities = data;
kvm_pmu_refresh(vcpu);
return 0;
+ case MSR_IA32_PRED_CMD:
+ if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu))
+ return 1;
+
+ if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB))
+ return 1;
+ if (!data)
+ break;
+
+ wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
+ break;
case MSR_EFER:
return set_efer(vcpu, msr_info);
case MSR_K7_HWCR:
--
2.40.0.rc2.332.ga46443480c-goog
On 3/22/2023 9:14 AM, Sean Christopherson wrote: > Dedup the handling of MSR_IA32_PRED_CMD across VMX and SVM by moving the > logic to kvm_set_msr_common(). Now that the MSR interception toggling is > handled as part of setting guest CPUID, the VMX and SVM paths are > identical. > > Opportunistically massage the code to make it a wee bit denser. > Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com> > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/svm/svm.c | 14 -------------- > arch/x86/kvm/vmx/vmx.c | 14 -------------- > arch/x86/kvm/x86.c | 11 +++++++++++ > 3 files changed, 11 insertions(+), 28 deletions(-) > > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c > index f757b436ffae..85bb535fc321 100644 > --- a/arch/x86/kvm/svm/svm.c > +++ b/arch/x86/kvm/svm/svm.c > @@ -2942,20 +2942,6 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) > */ > set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); > break; > - case MSR_IA32_PRED_CMD: > - if (!msr->host_initiated && > - !guest_has_pred_cmd_msr(vcpu)) > - return 1; > - > - if (data & ~PRED_CMD_IBPB) > - return 1; > - if (!boot_cpu_has(X86_FEATURE_IBPB)) > - return 1; > - if (!data) > - break; > - > - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); > - break; > case MSR_AMD64_VIRT_SPEC_CTRL: > if (!msr->host_initiated && > !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD)) > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 5c01c76c0d45..29807be219b9 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -2285,20 +2285,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > if (data & ~(TSX_CTRL_RTM_DISABLE | TSX_CTRL_CPUID_CLEAR)) > return 1; > goto find_uret_msr; > - case MSR_IA32_PRED_CMD: > - if (!msr_info->host_initiated && > - !guest_has_pred_cmd_msr(vcpu)) > - return 1; > - > - if (data & ~PRED_CMD_IBPB) > - return 1; > - if (!boot_cpu_has(X86_FEATURE_IBPB)) > - return 1; > - if (!data) > - break; > - > - wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); > - break; > case MSR_IA32_CR_PAT: > if (!kvm_pat_valid(data)) > return 1; > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 237c483b1230..c83ec88da043 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -3617,6 +3617,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) > vcpu->arch.perf_capabilities = data; > kvm_pmu_refresh(vcpu); > return 0; > + case MSR_IA32_PRED_CMD: > + if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu)) > + return 1; > + > + if (!boot_cpu_has(X86_FEATURE_IBPB) || (data & ~PRED_CMD_IBPB)) > + return 1; > + if (!data) > + break; > + > + wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB); > + break; > case MSR_EFER: > return set_efer(vcpu, msr_info); > case MSR_K7_HWCR:
© 2016 - 2026 Red Hat, Inc.