>diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
>index 03f42b218554..bfdd10773136 100644
>--- a/arch/x86/kvm/vmx/vmx.c
>+++ b/arch/x86/kvm/vmx/vmx.c
>@@ -8009,6 +8009,10 @@ void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
> kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM);
> kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_FRED);
>
>+ /* Don't allow CR4.FRED=1 before all of FRED KVM support is in place. */
>+ if (!guest_can_use(vcpu, X86_FEATURE_FRED))
>+ vcpu->arch.cr4_guest_rsvd_bits |= X86_CR4_FRED;
is this necessary? __kvm_is_valid_cr4() ensures that guests cannot set any bit
which isn't supported by the hardware.
To account for hardware/KVM caps, I think the following changes will work. This
will fix all other bits besides X86_CR4_FRED.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4a93ac1b9be9..2bec3ba8e47d 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1873,6 +1873,7 @@ struct kvm_arch_async_pf {
extern u32 __read_mostly kvm_nr_uret_msrs;
extern bool __read_mostly allow_smaller_maxphyaddr;
extern bool __read_mostly enable_apicv;
+extern u64 __read_mostly cr4_reserved_bits;
extern struct kvm_x86_ops kvm_x86_ops;
#define kvm_x86_call(func) static_call(kvm_x86_##func)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2617be544480..57d82fbcfd3f 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -393,8 +393,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
kvm_pmu_refresh(vcpu);
- vcpu->arch.cr4_guest_rsvd_bits =
- __cr4_reserved_bits(guest_cpuid_has, vcpu);
+ vcpu->arch.cr4_guest_rsvd_bits = cr4_reserved_bits |
+ __cr4_reserved_bits(guest_cpuid_has, vcpu);
kvm_hv_set_cpuid(vcpu, kvm_cpuid_has_hyperv(vcpu->arch.cpuid_entries,
vcpu->arch.cpuid_nent));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 34b52b49f5e6..08b42bbd2342 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -119,7 +119,7 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
-static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
+u64 __read_mostly cr4_reserved_bits;
#define KVM_EXIT_HYPERCALL_VALID_MASK (1 << KVM_HC_MAP_GPA_RANGE)
@@ -1110,13 +1110,7 @@ EXPORT_SYMBOL_GPL(kvm_emulate_xsetbv);
bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- if (cr4 & cr4_reserved_bits)
- return false;
-
- if (cr4 & vcpu->arch.cr4_guest_rsvd_bits)
- return false;
-
- return true;
+ return !(cr4 & vcpu->arch.cr4_guest_rsvd_bits);
}
EXPORT_SYMBOL_GPL(__kvm_is_valid_cr4);
>+
> vmx_setup_uret_msrs(vmx);
>
> if (cpu_has_secondary_exec_ctrls())
>diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
>index 992e73ee2ec5..0ed91512b757 100644
>--- a/arch/x86/kvm/x86.h
>+++ b/arch/x86/kvm/x86.h
>@@ -561,6 +561,8 @@ enum kvm_msr_access {
> __reserved_bits |= X86_CR4_PCIDE; \
> if (!__cpu_has(__c, X86_FEATURE_LAM)) \
> __reserved_bits |= X86_CR4_LAM_SUP; \
>+ if (!__cpu_has(__c, X86_FEATURE_FRED)) \
>+ __reserved_bits |= X86_CR4_FRED; \
> __reserved_bits; \
> })
>
>--
>2.46.2
>
>