Store each "disabled exit" boolean in a single bit rather than a byte.
No functional change intended.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Jim Mattson <jmattson@google.com>
---
arch/x86/include/asm/kvm_host.h | 5 +----
arch/x86/kvm/svm/svm.c | 2 +-
arch/x86/kvm/vmx/vmx.c | 2 +-
arch/x86/kvm/x86.c | 8 ++++----
arch/x86/kvm/x86.h | 13 +++++++++----
5 files changed, 16 insertions(+), 14 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 67b464651c8d..fa912b2e7591 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1390,10 +1390,7 @@ struct kvm_arch {
gpa_t wall_clock;
- bool mwait_in_guest;
- bool hlt_in_guest;
- bool pause_in_guest;
- bool cstate_in_guest;
+ u64 disabled_exits;
unsigned long irq_sources_bitmap;
s64 kvmclock_offset;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index ffb34dadff1c..6d2d97fd967a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5102,7 +5102,7 @@ static int svm_vm_init(struct kvm *kvm)
}
if (!pause_filter_count || !pause_filter_thresh)
- kvm->arch.pause_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
if (enable_apicv) {
int ret = avic_vm_init(kvm);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index b12414108cbf..136be14e6db0 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7619,7 +7619,7 @@ int vmx_vcpu_create(struct kvm_vcpu *vcpu)
int vmx_vm_init(struct kvm *kvm)
{
if (!ple_gap)
- kvm->arch.pause_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
if (boot_cpu_has(X86_BUG_L1TF) && enable_ept) {
switch (l1tf_mitigation) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 570e7f8cbf64..8c20afda4398 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6605,13 +6605,13 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
pr_warn_once(SMT_RSB_MSG);
if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE)
- kvm->arch.pause_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE);
if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT)
- kvm->arch.mwait_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_MWAIT);
if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT)
- kvm->arch.hlt_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_HLT);
if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE)
- kvm->arch.cstate_in_guest = true;
+ kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_CSTATE);
r = 0;
disable_exits_unlock:
mutex_unlock(&kvm->lock);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 88a9475899c8..0ad36851df4c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -481,24 +481,29 @@ static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
__rem; \
})
+static inline void kvm_disable_exits(struct kvm *kvm, u64 mask)
+{
+ kvm->arch.disabled_exits |= mask;
+}
+
static inline bool kvm_mwait_in_guest(struct kvm *kvm)
{
- return kvm->arch.mwait_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_MWAIT;
}
static inline bool kvm_hlt_in_guest(struct kvm *kvm)
{
- return kvm->arch.hlt_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_HLT;
}
static inline bool kvm_pause_in_guest(struct kvm *kvm)
{
- return kvm->arch.pause_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_PAUSE;
}
static inline bool kvm_cstate_in_guest(struct kvm *kvm)
{
- return kvm->arch.cstate_in_guest;
+ return kvm->arch.disabled_exits & KVM_X86_DISABLE_EXITS_CSTATE;
}
static inline bool kvm_notify_vmexit_enabled(struct kvm *kvm)
--
2.49.0.1204.g71687c7c1d-goog
On Fri, May 30, 2025, Jim Mattson wrote: > diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c > index 570e7f8cbf64..8c20afda4398 100644 > --- a/arch/x86/kvm/x86.c > +++ b/arch/x86/kvm/x86.c > @@ -6605,13 +6605,13 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, > pr_warn_once(SMT_RSB_MSG); > > if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) > - kvm->arch.pause_in_guest = true; > + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_PAUSE); > if (cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) > - kvm->arch.mwait_in_guest = true; > + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_MWAIT); > if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) > - kvm->arch.hlt_in_guest = true; > + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_HLT); > if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) > - kvm->arch.cstate_in_guest = true; > + kvm_disable_exits(kvm, KVM_X86_DISABLE_EXITS_CSTATE); > r = 0; > disable_exits_unlock: > mutex_unlock(&kvm->lock); Can't this simply be? The set of capabilities to disable has already been vetted, so I don't see any reason to manually process each flag. mutex_lock(&kvm->lock); if (kvm->created_vcpus) goto disable_exits_unlock; #define SMT_RSB_MSG "This processor is affected by the Cross-Thread Return Predictions vulnerability. " \ "KVM_CAP_X86_DISABLE_EXITS should only be used with SMT disabled or trusted guests." if (!mitigate_smt_rsb && boot_cpu_has_bug(X86_BUG_SMT_RSB) && cpu_smt_possible() && (cap->args[0] & ~(KVM_X86_DISABLE_EXITS_PAUSE | KVM_X86_DISABLE_EXITS_APERFMPERF))) pr_warn_once(SMT_RSB_MSG); kvm_disable_exits(kvm, cap->args[0]); r = 0; disable_exits_unlock: mutex_unlock(&kvm->lock); break;
On Tue, Jun 24, 2025 at 2:25 PM Sean Christopherson <seanjc@google.com> wrote: > Can't this simply be? The set of capabilities to disable has already been vetted, > so I don't see any reason to manually process each flag. I love it! Thank you.
© 2016 - 2025 Red Hat, Inc.