[PATCH RFC v1 08/20] KVM: VMX: Support extended register index in exit handling

Chang S. Bae posted 20 patches 3 months ago
[PATCH RFC v1 08/20] KVM: VMX: Support extended register index in exit handling
Posted by Chang S. Bae 3 months ago
Support to 5-bit register indices in VMCS fields when EGPRs are enabled.

Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
---
RFC note:
The "chicken bit" (XCR0.APX) checker is intentionally deferred, as the
emulator in the next series will do a similar check. Consolidating the
XCR0 handling at the end keeps the logic clearer during the feature
exposition.
---
 arch/x86/kvm/vmx/vmx.h | 16 ++++++++++++----
 1 file changed, 12 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index b8da6ebc35dc..6cf1eb739caf 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -374,12 +374,17 @@ struct vmx_insn_info {
 
 static inline bool vmx_egpr_enabled(struct kvm_vcpu *vcpu __maybe_unused) { return false; }
 
-static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu *vcpu __maybe_unused)
+static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu *vcpu)
 {
 	struct vmx_insn_info insn;
 
-	insn.extended  = false;
-	insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
+	if (vmx_egpr_enabled(vcpu)) {
+		insn.extended   = true;
+		insn.info.dword = vmcs_read64(EXTENDED_INSTRUCTION_INFO);
+	} else {
+		insn.extended  = false;
+		insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
+	}
 
 	return insn;
 }
@@ -415,7 +420,10 @@ static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
 
 static inline int vmx_get_exit_qual_gpr(struct kvm_vcpu *vcpu)
 {
-	return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
+	if (vmx_egpr_enabled(vcpu))
+		return (vmx_get_exit_qual(vcpu) >> 8) & 0x1f;
+	else
+		return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
 }
 
 static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
-- 
2.51.0
Re: [PATCH RFC v1 08/20] KVM: VMX: Support extended register index in exit handling
Posted by Paolo Bonzini 2 months, 4 weeks ago
On 11/10/25 19:01, Chang S. Bae wrote:
> Support to 5-bit register indices in VMCS fields when EGPRs are enabled.
> 
> Signed-off-by: Chang S. Bae <chang.seok.bae@intel.com>
> ---
> RFC note:
> The "chicken bit" (XCR0.APX) checker is intentionally deferred, as the
> emulator in the next series will do a similar check. Consolidating the
> XCR0 handling at the end keeps the logic clearer during the feature
> exposition.
> ---
>   arch/x86/kvm/vmx/vmx.h | 16 ++++++++++++----
>   1 file changed, 12 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
> index b8da6ebc35dc..6cf1eb739caf 100644
> --- a/arch/x86/kvm/vmx/vmx.h
> +++ b/arch/x86/kvm/vmx/vmx.h
> @@ -374,12 +374,17 @@ struct vmx_insn_info {
>   
>   static inline bool vmx_egpr_enabled(struct kvm_vcpu *vcpu __maybe_unused) { return false; }
>   
> -static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu *vcpu __maybe_unused)
> +static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu *vcpu)
>   {
>   	struct vmx_insn_info insn;
>   
> -	insn.extended  = false;
> -	insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	if (vmx_egpr_enabled(vcpu)) {
> +		insn.extended   = true;
> +		insn.info.dword = vmcs_read64(EXTENDED_INSTRUCTION_INFO);
> +	} else {
> +		insn.extended  = false;
> +		insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
> +	}

Could this use static_cpu_has(X86_FEATURE_APX) instead, which is more 
efficient (avoids a runtime test)?

>   	return insn;
>   }
> @@ -415,7 +420,10 @@ static __always_inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
>   
>   static inline int vmx_get_exit_qual_gpr(struct kvm_vcpu *vcpu)
>   {
> -	return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
> +	if (vmx_egpr_enabled(vcpu))
> +		return (vmx_get_exit_qual(vcpu) >> 8) & 0x1f;
> +	else
> +		return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;

Can this likewise mask against 0x1f, unconditionally?

Paolo

>   }
>   
>   static __always_inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
Re: [PATCH RFC v1 08/20] KVM: VMX: Support extended register index in exit handling
Posted by Chang S. Bae 2 months, 3 weeks ago
On 11/11/2025 9:45 AM, Paolo Bonzini wrote:
> On 11/10/25 19:01, Chang S. Bae wrote:
>>
>> -static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu 
>> *vcpu __maybe_unused)
>> +static inline struct vmx_insn_info vmx_get_insn_info(struct kvm_vcpu 
>> *vcpu)
>>   {
>>       struct vmx_insn_info insn;
>> -    insn.extended  = false;
>> -    insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
>> +    if (vmx_egpr_enabled(vcpu)) {
>> +        insn.extended   = true;
>> +        insn.info.dword = vmcs_read64(EXTENDED_INSTRUCTION_INFO);
>> +    } else {
>> +        insn.extended  = false;
>> +        insn.info.word = vmcs_read32(VMX_INSTRUCTION_INFO);
>> +    }
> 
> Could this use static_cpu_has(X86_FEATURE_APX) instead, which is more 
> efficient (avoids a runtime test)?

Yes, for the same reason mentioned in patch7.

>> @@ -415,7 +420,10 @@ static __always_inline unsigned long 
>> vmx_get_exit_qual(struct kvm_vcpu *vcpu)
>>   static inline int vmx_get_exit_qual_gpr(struct kvm_vcpu *vcpu)
>>   {
>> -    return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
>> +    if (vmx_egpr_enabled(vcpu))
>> +        return (vmx_get_exit_qual(vcpu) >> 8) & 0x1f;
>> +    else
>> +        return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
> 
> Can this likewise mask against 0x1f, unconditionally?

It looks like the behavior of that previously-undefined bit is not
guaranteed -- there's no architectural promise that the bit will always
read as zero. So in this case, I think it's still safer to rely on the
enumeration.

Perhaps adding a comment like this would clarify the intent:

   /*
    * Bit 12 was previously undefined, so its value is not guaranteed to
    * be zero. Only rely on the full 5-bit with the extension.
    */
   if (vmx_ext_insn_info_available())
     ...

Re: [PATCH RFC v1 08/20] KVM: VMX: Support extended register index in exit handling
Posted by Paolo Bonzini 2 months, 3 weeks ago
On 11/14/25 00:22, Chang S. Bae wrote:
>>> @@ -415,7 +420,10 @@ static __always_inline unsigned long 
>>> vmx_get_exit_qual(struct kvm_vcpu *vcpu)
>>>   static inline int vmx_get_exit_qual_gpr(struct kvm_vcpu *vcpu)
>>>   {
>>> -    return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
>>> +    if (vmx_egpr_enabled(vcpu))
>>> +        return (vmx_get_exit_qual(vcpu) >> 8) & 0x1f;
>>> +    else
>>> +        return (vmx_get_exit_qual(vcpu) >> 8) & 0xf;
>>
>> Can this likewise mask against 0x1f, unconditionally?
> 
> It looks like the behavior of that previously-undefined bit is not
> guaranteed -- there's no architectural promise that the bit will always
> read as zero. So in this case, I think it's still safer to rely on the
> enumeration.
The manual says "VM Exit qualification is extended in-place"... This 
suggests that the wording of the SDM, "Not currently defined", really 
means "currently zero, but may not be *in the future*".

It is indeed safer, but it's a bit sad too. :)  Let's at least use 
either static_cpu_has() or a global __read_mostly variable, instead of a 
more expensive test using the vcpu.

Paolo