From: Xin Li <xin3.li@intel.com>
Extend nested VMX field validation to include FRED-specific VMCS fields,
mirroring hardware behavior.
This enables support for nested FRED by ensuring control and guest/host
state fields are properly checked.
Signed-off-by: Xin Li <xin3.li@intel.com>
Signed-off-by: Xin Li (Intel) <xin@zytor.com>
Tested-by: Shan Kang <shan.kang@intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
---
Change in v5:
* Add TB from Xuelian Guo.
---
arch/x86/kvm/vmx/nested.c | 117 +++++++++++++++++++++++++++++++++-----
1 file changed, 104 insertions(+), 13 deletions(-)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 63cdfffba58b..8682709d8759 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3030,6 +3030,8 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool fred_enabled = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) &&
+ (vmcs12->guest_cr4 & X86_CR4_FRED);
if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.msrs.entry_ctls_low,
@@ -3047,22 +3049,11 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+ bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
bool urg = nested_cpu_has2(vmcs12,
SECONDARY_EXEC_UNRESTRICTED_GUEST);
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
- /* VM-entry interruption-info field: interruption type */
- if (CC(intr_type == INTR_TYPE_RESERVED) ||
- CC(intr_type == INTR_TYPE_OTHER_EVENT &&
- !nested_cpu_supports_monitor_trap_flag(vcpu)))
- return -EINVAL;
-
- /* VM-entry interruption-info field: vector */
- if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
- CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
- CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
- return -EINVAL;
-
/*
* Cannot deliver error code in real mode or if the interrupt
* type is not hardware exception. For other cases, do the
@@ -3086,8 +3077,28 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
return -EINVAL;
- /* VM-entry instruction length */
+ /*
+ * When the CPU enumerates VMX nested-exception support, bit 13
+ * (set to indicate a nested exception) of the intr info field
+ * may have value 1. Otherwise bit 13 is reserved.
+ */
+ if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
+ intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
+ return -EINVAL;
+
switch (intr_type) {
+ case INTR_TYPE_EXT_INTR:
+ break;
+ case INTR_TYPE_RESERVED:
+ return -EINVAL;
+ case INTR_TYPE_NMI_INTR:
+ if (CC(vector != NMI_VECTOR))
+ return -EINVAL;
+ break;
+ case INTR_TYPE_HARD_EXCEPTION:
+ if (CC(vector > 31))
+ return -EINVAL;
+ break;
case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR:
case INTR_TYPE_PRIV_SW_EXCEPTION:
@@ -3095,6 +3106,24 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
CC(vmcs12->vm_entry_instruction_len == 0 &&
CC(!nested_cpu_has_zero_length_injection(vcpu))))
return -EINVAL;
+ break;
+ case INTR_TYPE_OTHER_EVENT:
+ switch (vector) {
+ case 0:
+ if (CC(!nested_cpu_supports_monitor_trap_flag(vcpu)))
+ return -EINVAL;
+ break;
+ case 1:
+ case 2:
+ if (CC(!fred_enabled))
+ return -EINVAL;
+ if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
}
}
@@ -3213,9 +3242,29 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
if (ia32e) {
if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
return -EINVAL;
+ if (vmcs12->vm_exit_controls & VM_EXIT_ACTIVATE_SECONDARY_CONTROLS &&
+ vmcs12->secondary_vm_exit_controls & SECONDARY_VM_EXIT_LOAD_IA32_FRED) {
+ if (CC(vmcs12->host_ia32_fred_config &
+ (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
+ CC(vmcs12->host_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->host_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->host_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->host_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
+ CC(vmcs12->host_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
+ CC(vmcs12->host_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_config & PAGE_MASK, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp1, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp2, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp3, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp1, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp2, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp3, vcpu)))
+ return -EINVAL;
+ }
} else {
if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
+ CC(vmcs12->host_cr4 & X86_CR4_FRED) ||
CC((vmcs12->host_rip) >> 32))
return -EINVAL;
}
@@ -3384,6 +3433,48 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
return -EINVAL;
+ if (ia32e) {
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_FRED) {
+ if (CC(vmcs12->guest_ia32_fred_config &
+ (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
+ CC(vmcs12->guest_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->guest_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->guest_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
+ CC(vmcs12->guest_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
+ CC(vmcs12->guest_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
+ CC(vmcs12->guest_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_config & PAGE_MASK, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp1, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp2, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp3, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp1, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp2, vcpu)) ||
+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp3, vcpu)))
+ return -EINVAL;
+ }
+ if (vmcs12->guest_cr4 & X86_CR4_FRED) {
+ unsigned int ss_dpl = VMX_AR_DPL(vmcs12->guest_ss_ar_bytes);
+ switch (ss_dpl) {
+ case 0:
+ if (CC(!(vmcs12->guest_cs_ar_bytes & VMX_AR_L_MASK)))
+ return -EINVAL;
+ break;
+ case 1:
+ case 2:
+ return -EINVAL;
+ case 3:
+ if (CC(vmcs12->guest_rflags & X86_EFLAGS_IOPL))
+ return -EINVAL;
+ if (CC(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_STI))
+ return -EINVAL;
+ break;
+ }
+ }
+ } else {
+ if (CC(vmcs12->guest_cr4 & X86_CR4_FRED))
+ return -EINVAL;
+ }
+
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) {
if (nested_vmx_check_cet_state_common(vcpu, vmcs12->guest_s_cet,
vmcs12->guest_ssp,
--
2.51.0
On Sun, Oct 26, 2025 at 01:19:08PM -0700, Xin Li (Intel) wrote:
>From: Xin Li <xin3.li@intel.com>
>
>Extend nested VMX field validation to include FRED-specific VMCS fields,
>mirroring hardware behavior.
>
>This enables support for nested FRED by ensuring control and guest/host
>state fields are properly checked.
>
>Signed-off-by: Xin Li <xin3.li@intel.com>
>Signed-off-by: Xin Li (Intel) <xin@zytor.com>
>Tested-by: Shan Kang <shan.kang@intel.com>
>Tested-by: Xuelian Guo <xuelian.guo@intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
There are some minor issues below that may need to be fixed.
>---
>
>Change in v5:
>* Add TB from Xuelian Guo.
>---
> arch/x86/kvm/vmx/nested.c | 117 +++++++++++++++++++++++++++++++++-----
> 1 file changed, 104 insertions(+), 13 deletions(-)
>
>diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
>index 63cdfffba58b..8682709d8759 100644
>--- a/arch/x86/kvm/vmx/nested.c
>+++ b/arch/x86/kvm/vmx/nested.c
>@@ -3030,6 +3030,8 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
> struct vmcs12 *vmcs12)
> {
> struct vcpu_vmx *vmx = to_vmx(vcpu);
>+ bool fred_enabled = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) &&
>+ (vmcs12->guest_cr4 & X86_CR4_FRED);
>
> if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
> vmx->nested.msrs.entry_ctls_low,
>@@ -3047,22 +3049,11 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
> u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
> u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
> bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
>+ bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
has_error_code reflects whether the to-be-injected event has an error code.
Using has_nested_exception for CPU capabilities here is a bit confusing.
> bool urg = nested_cpu_has2(vmcs12,
> SECONDARY_EXEC_UNRESTRICTED_GUEST);
> bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
>
>- /* VM-entry interruption-info field: interruption type */
>- if (CC(intr_type == INTR_TYPE_RESERVED) ||
>- CC(intr_type == INTR_TYPE_OTHER_EVENT &&
>- !nested_cpu_supports_monitor_trap_flag(vcpu)))
>- return -EINVAL;
>-
>- /* VM-entry interruption-info field: vector */
>- if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
>- CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
>- CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
>- return -EINVAL;
>-
> /*
> * Cannot deliver error code in real mode or if the interrupt
> * type is not hardware exception. For other cases, do the
>@@ -3086,8 +3077,28 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
> if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
> return -EINVAL;
>
>- /* VM-entry instruction length */
>+ /*
>+ * When the CPU enumerates VMX nested-exception support, bit 13
>+ * (set to indicate a nested exception) of the intr info field
>+ * may have value 1. Otherwise bit 13 is reserved.
>+ */
>+ if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
>+ intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
>+ return -EINVAL;
>+
> switch (intr_type) {
>+ case INTR_TYPE_EXT_INTR:
>+ break;
This can be dropped, as the "default" case will handle it.
>+ case INTR_TYPE_RESERVED:
>+ return -EINVAL;
I think we need to add a CC() statement to make it easier to correlate a
VM-entry failure with a specific consistency check.
>+ case INTR_TYPE_NMI_INTR:
>+ if (CC(vector != NMI_VECTOR))
>+ return -EINVAL;
>+ break;
>+ case INTR_TYPE_HARD_EXCEPTION:
>+ if (CC(vector > 31))
>+ return -EINVAL;
>+ break;
> case INTR_TYPE_SOFT_EXCEPTION:
> case INTR_TYPE_SOFT_INTR:
> case INTR_TYPE_PRIV_SW_EXCEPTION:
>@@ -3095,6 +3106,24 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
> CC(vmcs12->vm_entry_instruction_len == 0 &&
> CC(!nested_cpu_has_zero_length_injection(vcpu))))
> return -EINVAL;
>+ break;
>+ case INTR_TYPE_OTHER_EVENT:
>+ switch (vector) {
>+ case 0:
>+ if (CC(!nested_cpu_supports_monitor_trap_flag(vcpu)))
>+ return -EINVAL;
Does this nested_cpu_supports_monitor_trap_flag() check apply to case 1/2?
>+ break;
>+ case 1:
>+ case 2:
>+ if (CC(!fred_enabled))
>+ return -EINVAL;
>+ if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH))
>+ return -EINVAL;
>+ break;
>+ default:
>+ return -EINVAL;
Again, I think -EINVAL should be accompanied by a CC() statement.
>+ }
>+ break;
> }
> }
>
>@@ -3213,9 +3242,29 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
> if (ia32e) {
> if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
> return -EINVAL;
>+ if (vmcs12->vm_exit_controls & VM_EXIT_ACTIVATE_SECONDARY_CONTROLS &&
>+ vmcs12->secondary_vm_exit_controls & SECONDARY_VM_EXIT_LOAD_IA32_FRED) {
>+ if (CC(vmcs12->host_ia32_fred_config &
>+ (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
>+ CC(vmcs12->host_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->host_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->host_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->host_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
>+ CC(vmcs12->host_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
>+ CC(vmcs12->host_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_config & PAGE_MASK, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp1, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp2, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp3, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp1, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp2, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp3, vcpu)))
>+ return -EINVAL;
>+ }
> } else {
> if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
> CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
>+ CC(vmcs12->host_cr4 & X86_CR4_FRED) ||
> CC((vmcs12->host_rip) >> 32))
> return -EINVAL;
> }
>@@ -3384,6 +3433,48 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
> CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
> return -EINVAL;
>
>+ if (ia32e) {
>+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_FRED) {
>+ if (CC(vmcs12->guest_ia32_fred_config &
>+ (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
>+ CC(vmcs12->guest_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->guest_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->guest_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
>+ CC(vmcs12->guest_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
>+ CC(vmcs12->guest_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
>+ CC(vmcs12->guest_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_config & PAGE_MASK, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp1, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp2, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp3, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp1, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp2, vcpu)) ||
>+ CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp3, vcpu)))
>+ return -EINVAL;
>+ }
>+ if (vmcs12->guest_cr4 & X86_CR4_FRED) {
>+ unsigned int ss_dpl = VMX_AR_DPL(vmcs12->guest_ss_ar_bytes);
>+ switch (ss_dpl) {
>+ case 0:
>+ if (CC(!(vmcs12->guest_cs_ar_bytes & VMX_AR_L_MASK)))
>+ return -EINVAL;
>+ break;
>+ case 1:
>+ case 2:
>+ return -EINVAL;
Ditto.
>+ case 3:
>+ if (CC(vmcs12->guest_rflags & X86_EFLAGS_IOPL))
>+ return -EINVAL;
>+ if (CC(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_STI))
>+ return -EINVAL;
>+ break;
>+ }
>+ }
>+ } else {
>+ if (CC(vmcs12->guest_cr4 & X86_CR4_FRED))
>+ return -EINVAL;
>+ }
>+
> if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) {
> if (nested_vmx_check_cet_state_common(vcpu, vmcs12->guest_s_cet,
> vmcs12->guest_ssp,
>--
>2.51.0
>
>
>> @@ -3047,22 +3049,11 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>> u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
>> u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
>> bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
>> + bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
>
> has_error_code reflects whether the to-be-injected event has an error code.
> Using has_nested_exception for CPU capabilities here is a bit confusing.
Looks better to just remove has_error_code.
>
>> bool urg = nested_cpu_has2(vmcs12,
>> SECONDARY_EXEC_UNRESTRICTED_GUEST);
>> bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
>>
>> - /* VM-entry interruption-info field: interruption type */
>> - if (CC(intr_type == INTR_TYPE_RESERVED) ||
>> - CC(intr_type == INTR_TYPE_OTHER_EVENT &&
>> - !nested_cpu_supports_monitor_trap_flag(vcpu)))
>> - return -EINVAL;
>> -
>> - /* VM-entry interruption-info field: vector */
>> - if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
>> - CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
>> - CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
>> - return -EINVAL;
>> -
>> /*
>> * Cannot deliver error code in real mode or if the interrupt
>> * type is not hardware exception. For other cases, do the
>> @@ -3086,8 +3077,28 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>> if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
>> return -EINVAL;
>>
>> - /* VM-entry instruction length */
>> + /*
>> + * When the CPU enumerates VMX nested-exception support, bit 13
>> + * (set to indicate a nested exception) of the intr info field
>> + * may have value 1. Otherwise bit 13 is reserved.
>> + */
>> + if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
>> + intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
>> + return -EINVAL;
>> +
>> switch (intr_type) {
>> + case INTR_TYPE_EXT_INTR:
>> + break;
>
> This can be dropped, as the "default" case will handle it.
We don’t have a default case, as all 8 cases are listed (INTR_INFO_INTR_TYPE_MASK is 0x700).
>
>> + case INTR_TYPE_RESERVED:
>> + return -EINVAL;
>
> I think we need to add a CC() statement to make it easier to correlate a
> VM-entry failure with a specific consistency check.
What do you want me to put in CC()?
CC(intr_type == INTR_TYPE_RESERVED)?
>
>> + case INTR_TYPE_NMI_INTR:
>> + if (CC(vector != NMI_VECTOR))
>> + return -EINVAL;
>> + break;
>> + case INTR_TYPE_HARD_EXCEPTION:
>> + if (CC(vector > 31))
>> + return -EINVAL;
>> + break;
>> case INTR_TYPE_SOFT_EXCEPTION:
>> case INTR_TYPE_SOFT_INTR:
>> case INTR_TYPE_PRIV_SW_EXCEPTION:
>> @@ -3095,6 +3106,24 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>> CC(vmcs12->vm_entry_instruction_len == 0 &&
>> CC(!nested_cpu_has_zero_length_injection(vcpu))))
>> return -EINVAL;
>> + break;
>> + case INTR_TYPE_OTHER_EVENT:
>> + switch (vector) {
>> + case 0:
>> + if (CC(!nested_cpu_supports_monitor_trap_flag(vcpu)))
>> + return -EINVAL;
>
> Does this nested_cpu_supports_monitor_trap_flag() check apply to case 1/2?
I did check the spec when writing the code but I will doublecheck.
>
>> + break;
>> + case 1:
>> + case 2:
>> + if (CC(!fred_enabled))
>> + return -EINVAL;
>> + if (CC(vmcs12->vm_entry_instruction_len > X86_MAX_INSTRUCTION_LENGTH))
>> + return -EINVAL;
>> + break;
>> + default:
>> + return -EINVAL;
>
> Again, I think -EINVAL should be accompanied by a CC() statement.
>
>> + }
>> + break;
>> }
>> }
>>
>> @@ -3213,9 +3242,29 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
>> if (ia32e) {
>> if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
>> return -EINVAL;
>> + if (vmcs12->vm_exit_controls & VM_EXIT_ACTIVATE_SECONDARY_CONTROLS &&
>> + vmcs12->secondary_vm_exit_controls & SECONDARY_VM_EXIT_LOAD_IA32_FRED) {
>> + if (CC(vmcs12->host_ia32_fred_config &
>> + (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
>> + CC(vmcs12->host_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->host_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->host_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->host_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
>> + CC(vmcs12->host_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
>> + CC(vmcs12->host_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_config & PAGE_MASK, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp1, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp2, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_rsp3, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp1, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp2, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->host_ia32_fred_ssp3, vcpu)))
>> + return -EINVAL;
>> + }
>> } else {
>> if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
>> CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
>> + CC(vmcs12->host_cr4 & X86_CR4_FRED) ||
>> CC((vmcs12->host_rip) >> 32))
>> return -EINVAL;
>> }
>> @@ -3384,6 +3433,48 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
>> CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
>> return -EINVAL;
>>
>> + if (ia32e) {
>> + if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_FRED) {
>> + if (CC(vmcs12->guest_ia32_fred_config &
>> + (BIT_ULL(11) | GENMASK_ULL(5, 4) | BIT_ULL(2))) ||
>> + CC(vmcs12->guest_ia32_fred_rsp1 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->guest_ia32_fred_rsp2 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->guest_ia32_fred_rsp3 & GENMASK_ULL(5, 0)) ||
>> + CC(vmcs12->guest_ia32_fred_ssp1 & GENMASK_ULL(2, 0)) ||
>> + CC(vmcs12->guest_ia32_fred_ssp2 & GENMASK_ULL(2, 0)) ||
>> + CC(vmcs12->guest_ia32_fred_ssp3 & GENMASK_ULL(2, 0)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_config & PAGE_MASK, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp1, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp2, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_rsp3, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp1, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp2, vcpu)) ||
>> + CC(is_noncanonical_msr_address(vmcs12->guest_ia32_fred_ssp3, vcpu)))
>> + return -EINVAL;
>> + }
>> + if (vmcs12->guest_cr4 & X86_CR4_FRED) {
>> + unsigned int ss_dpl = VMX_AR_DPL(vmcs12->guest_ss_ar_bytes);
>> + switch (ss_dpl) {
>> + case 0:
>> + if (CC(!(vmcs12->guest_cs_ar_bytes & VMX_AR_L_MASK)))
>> + return -EINVAL;
>> + break;
>> + case 1:
>> + case 2:
>> + return -EINVAL;
>
> Ditto.
>
>> + case 3:
>> + if (CC(vmcs12->guest_rflags & X86_EFLAGS_IOPL))
>> + return -EINVAL;
>> + if (CC(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_STI))
>> + return -EINVAL;
>> + break;
>> + }
>> + }
>> + } else {
>> + if (CC(vmcs12->guest_cr4 & X86_CR4_FRED))
>> + return -EINVAL;
>> + }
>> +
>> if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_CET_STATE) {
>> if (nested_vmx_check_cet_state_common(vcpu, vmcs12->guest_s_cet,
>> vmcs12->guest_ssp,
>> --
>> 2.51.0
On Tue, Jan 20, 2026 at 01:19:55AM -0800, Xin Li wrote:
>>> @@ -3047,22 +3049,11 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>>> u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
>>> u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
>>> bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
>>> + bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
>>
>> has_error_code reflects whether the to-be-injected event has an error code.
>> Using has_nested_exception for CPU capabilities here is a bit confusing.
>
>Looks better to just remove has_error_code.
>
>>
>>> bool urg = nested_cpu_has2(vmcs12,
>>> SECONDARY_EXEC_UNRESTRICTED_GUEST);
>>> bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
>>>
>>> - /* VM-entry interruption-info field: interruption type */
>>> - if (CC(intr_type == INTR_TYPE_RESERVED) ||
>>> - CC(intr_type == INTR_TYPE_OTHER_EVENT &&
>>> - !nested_cpu_supports_monitor_trap_flag(vcpu)))
>>> - return -EINVAL;
>>> -
>>> - /* VM-entry interruption-info field: vector */
>>> - if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
>>> - CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
>>> - CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
>>> - return -EINVAL;
>>> -
>>> /*
>>> * Cannot deliver error code in real mode or if the interrupt
>>> * type is not hardware exception. For other cases, do the
>>> @@ -3086,8 +3077,28 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
>>> if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
>>> return -EINVAL;
>>>
>>> - /* VM-entry instruction length */
>>> + /*
>>> + * When the CPU enumerates VMX nested-exception support, bit 13
>>> + * (set to indicate a nested exception) of the intr info field
>>> + * may have value 1. Otherwise bit 13 is reserved.
>>> + */
>>> + if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
>>> + intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
>>> + return -EINVAL;
>>> +
>>> switch (intr_type) {
>>> + case INTR_TYPE_EXT_INTR:
>>> + break;
>>
>> This can be dropped, as the "default" case will handle it.
>
>We don’t have a default case, as all 8 cases are listed (INTR_INFO_INTR_TYPE_MASK is 0x700).
>
>>
>>> + case INTR_TYPE_RESERVED:
>>> + return -EINVAL;
>>
>> I think we need to add a CC() statement to make it easier to correlate a
>> VM-entry failure with a specific consistency check.
>
>What do you want me to put in CC()?
>
>CC(intr_type == INTR_TYPE_RESERVED)?
how about this incremental change?
I prefer to make has_error_code and has_nested_exception consistent, and add a
CC() statement before all "return -EINVAL" statements for debugging.
t a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 8682709d8759..f13df70405d9 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3049,7 +3049,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
- bool has_nested_exception = vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION;
+ bool has_nested_exception = intr_info & INTR_INFO_NESTED_EXCEPTION_MASK;
bool urg = nested_cpu_has2(vmcs12,
SECONDARY_EXEC_UNRESTRICTED_GUEST);
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
@@ -3077,20 +3077,10 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
return -EINVAL;
- /*
- * When the CPU enumerates VMX nested-exception support, bit 13
- * (set to indicate a nested exception) of the intr info field
- * may have value 1. Otherwise bit 13 is reserved.
- */
- if (CC(!(has_nested_exception && intr_type == INTR_TYPE_HARD_EXCEPTION) &&
- intr_info & INTR_INFO_NESTED_EXCEPTION_MASK))
+ if (CC(intr_type == INTR_TYPE_RESERVED))
return -EINVAL;
switch (intr_type) {
- case INTR_TYPE_EXT_INTR:
- break;
- case INTR_TYPE_RESERVED:
- return -EINVAL;
case INTR_TYPE_NMI_INTR:
if (CC(vector != NMI_VECTOR))
return -EINVAL;
@@ -3098,6 +3088,13 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
case INTR_TYPE_HARD_EXCEPTION:
if (CC(vector > 31))
return -EINVAL;
+ /*
+ * When the CPU enumerates VMX nested-exception support, bit 13
+ * (set to indicate a nested exception) of the intr info field
+ * may have value 1. Otherwise bit 13 is reserved.
+ */
+ if (CC(has_nested_exception && !(vmx->nested.msrs.basic & VMX_BASIC_NESTED_EXCEPTION)))
+ return -EINVAL;
break;
case INTR_TYPE_SOFT_EXCEPTION:
case INTR_TYPE_SOFT_INTR:
@@ -3108,6 +3105,9 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
break;
case INTR_TYPE_OTHER_EVENT:
+ if (CC(vector > 3))
+ return -EINVAL;
+
switch (vector) {
case 0:
if (CC(!nested_cpu_supports_monitor_trap_flag(vcpu)))
@@ -3121,7 +3121,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
return -EINVAL;
break;
default:
- return -EINVAL;
+ break;
}
break;
}
@@ -3454,14 +3454,15 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
}
if (vmcs12->guest_cr4 & X86_CR4_FRED) {
unsigned int ss_dpl = VMX_AR_DPL(vmcs12->guest_ss_ar_bytes);
+
+ if (CC(ss_dpl == 1 || ss_dpl == 2))
+ return -EINVAL;
+
switch (ss_dpl) {
case 0:
if (CC(!(vmcs12->guest_cs_ar_bytes & VMX_AR_L_MASK)))
return -EINVAL;
break;
- case 1:
- case 2:
- return -EINVAL;
case 3:
if (CC(vmcs12->guest_rflags & X86_EFLAGS_IOPL))
return -EINVAL;
© 2016 - 2026 Red Hat, Inc.