To be able to use cpu_has_svm/vmx_* macros in common code without enclosing
them inside #ifdef guards when the respective virtualization technology is
not enabled, define them as false when not applicable.
No functional change intended.
Signed-off-by: Xenia Ragiadakou <burzalodowa@gmail.com>
---
xen/arch/x86/include/asm/hvm/svm/svm.h | 17 ++++++++++++
xen/arch/x86/include/asm/hvm/vmx/vmcs.h | 35 +++++++++++++++++++++++++
xen/arch/x86/include/asm/hvm/vmx/vmx.h | 18 +++++++++++++
3 files changed, 70 insertions(+)
diff --git a/xen/arch/x86/include/asm/hvm/svm/svm.h b/xen/arch/x86/include/asm/hvm/svm/svm.h
index 65e35a4f59..556584851b 100644
--- a/xen/arch/x86/include/asm/hvm/svm/svm.h
+++ b/xen/arch/x86/include/asm/hvm/svm/svm.h
@@ -80,6 +80,7 @@ extern u32 svm_feature_flags;
#define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */
#define SVM_FEATURE_SPEC_CTRL 20 /* MSR_SPEC_CTRL virtualisation */
+#ifdef CONFIG_AMD_SVM
#define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f)))
#define cpu_has_svm_npt cpu_has_svm_feature(SVM_FEATURE_NPT)
#define cpu_has_svm_lbrv cpu_has_svm_feature(SVM_FEATURE_LBRV)
@@ -95,6 +96,22 @@ extern u32 svm_feature_flags;
#define cpu_has_svm_vloadsave cpu_has_svm_feature(SVM_FEATURE_VLOADSAVE)
#define cpu_has_svm_sss cpu_has_svm_feature(SVM_FEATURE_SSS)
#define cpu_has_svm_spec_ctrl cpu_has_svm_feature(SVM_FEATURE_SPEC_CTRL)
+#else
+#define cpu_has_svm_npt false
+#define cpu_has_svm_lbrv false
+#define cpu_has_svm_svml false
+#define cpu_has_svm_nrips false
+#define cpu_has_svm_cleanbits false
+#define cpu_has_svm_flushbyasid false
+#define cpu_has_svm_decode false
+#define cpu_has_svm_vgif false
+#define cpu_has_pause_filter false
+#define cpu_has_pause_thresh false
+#define cpu_has_tsc_ratio false
+#define cpu_has_svm_vloadsave false
+#define cpu_has_svm_sss false
+#define cpu_has_svm_spec_ctrl false
+#endif /* CONFIG_AMD_SVM */
#define SVM_PAUSEFILTER_INIT 4000
#define SVM_PAUSETHRESH_INIT 1000
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
index 0a84e74478..03d1f7480a 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
@@ -294,6 +294,7 @@ extern u64 vmx_ept_vpid_cap;
#define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
+#ifdef CONFIG_INTEL_VMX
#define cpu_has_wbinvd_exiting \
(vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
#define cpu_has_vmx_virtualize_apic_accesses \
@@ -352,6 +353,36 @@ extern u64 vmx_ept_vpid_cap;
(vmx_secondary_exec_control & SECONDARY_EXEC_BUS_LOCK_DETECTION)
#define cpu_has_vmx_notify_vm_exiting \
(vmx_secondary_exec_control & SECONDARY_EXEC_NOTIFY_VM_EXITING)
+#else
+#define cpu_has_wbinvd_exiting false
+#define cpu_has_vmx_virtualize_apic_accesses false
+#define cpu_has_vmx_tpr_shadow false
+#define cpu_has_vmx_vnmi false
+#define cpu_has_vmx_msr_bitmap false
+#define cpu_has_vmx_secondary_exec_control false
+#define cpu_has_vmx_ept false
+#define cpu_has_vmx_dt_exiting false
+#define cpu_has_vmx_vpid false
+#define cpu_has_monitor_trap_flag false
+#define cpu_has_vmx_pat false
+#define cpu_has_vmx_efer false
+#define cpu_has_vmx_unrestricted_guest false
+#define vmx_unrestricted_guest(v) false
+#define cpu_has_vmx_ple false
+#define cpu_has_vmx_apic_reg_virt false
+#define cpu_has_vmx_virtual_intr_delivery false
+#define cpu_has_vmx_virtualize_x2apic_mode false
+#define cpu_has_vmx_posted_intr_processing false
+#define cpu_has_vmx_vmcs_shadowing false
+#define cpu_has_vmx_vmfunc false
+#define cpu_has_vmx_virt_exceptions false
+#define cpu_has_vmx_pml false
+#define cpu_has_vmx_mpx false
+#define cpu_has_vmx_xsaves false
+#define cpu_has_vmx_tsc_scaling false
+#define cpu_has_vmx_bus_lock_detection false
+#define cpu_has_vmx_notify_vm_exiting false
+#endif /* CONFIG_INTEL_VMX */
#define VMCS_RID_TYPE_MASK 0x80000000
@@ -374,8 +405,12 @@ extern u64 vmx_ept_vpid_cap;
#define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
extern u64 vmx_basic_msr;
+#ifdef CONFIG_INTEL_VMX
#define cpu_has_vmx_ins_outs_instr_info \
(!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
+#else
+#define cpu_has_vmx_ins_outs_instr_info false
+#endif /* CONFIG_INTEL_VMX */
/* Guest interrupt status */
#define VMX_GUEST_INTR_STATUS_SUBFIELD_BITMASK 0x0FF
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index 97d6b810ec..fe9a5796f7 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -289,6 +289,7 @@ typedef union cr_access_qual {
extern uint8_t posted_intr_vector;
+#ifdef CONFIG_INTEL_VMX
#define cpu_has_vmx_ept_exec_only_supported \
(vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
@@ -301,6 +302,17 @@ extern uint8_t posted_intr_vector;
#define cpu_has_vmx_ept_ad (vmx_ept_vpid_cap & VMX_EPT_AD_BIT)
#define cpu_has_vmx_ept_invept_single_context \
(vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
+#else
+#define cpu_has_vmx_ept_exec_only_supported false
+
+#define cpu_has_vmx_ept_wl4_supported false
+#define cpu_has_vmx_ept_mt_uc false
+#define cpu_has_vmx_ept_mt_wb false
+#define cpu_has_vmx_ept_2mb false
+#define cpu_has_vmx_ept_1gb false
+#define cpu_has_vmx_ept_ad false
+#define cpu_has_vmx_ept_invept_single_context false
+#endif /* CONFIG_INTEL_VMX */
#define EPT_2MB_SHIFT 16
#define EPT_1GB_SHIFT 17
@@ -310,12 +322,18 @@ extern uint8_t posted_intr_vector;
#define INVEPT_SINGLE_CONTEXT 1
#define INVEPT_ALL_CONTEXT 2
+#ifdef CONFIG_INTEL_VMX
#define cpu_has_vmx_vpid_invvpid_individual_addr \
(vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
#define cpu_has_vmx_vpid_invvpid_single_context \
(vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT)
#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \
(vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL)
+#else
+#define cpu_has_vmx_vpid_invvpid_individual_addr false
+#define cpu_has_vmx_vpid_invvpid_single_context false
+#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global false
+#endif /* CONFIG_INTEL_VMX */
#define INVVPID_INDIVIDUAL_ADDR 0
#define INVVPID_SINGLE_CONTEXT 1
--
2.37.2
On 13.02.2023 15:57, Xenia Ragiadakou wrote:
> To be able to use cpu_has_svm/vmx_* macros in common code without enclosing
Both in the title and here the spelling you use made me first think that
you talk about "cpu_has_svm". May I suggest you follow what we typically
do and use "cpu_has_{svm,vmx}_*" instead in such a case? However, the
title may want changing anyway:
> --- a/xen/arch/x86/include/asm/hvm/svm/svm.h
> +++ b/xen/arch/x86/include/asm/hvm/svm/svm.h
> @@ -80,6 +80,7 @@ extern u32 svm_feature_flags;
> #define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */
> #define SVM_FEATURE_SPEC_CTRL 20 /* MSR_SPEC_CTRL virtualisation */
>
> +#ifdef CONFIG_AMD_SVM
> #define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f)))
Why don't you simply provide a 2nd form of this one macro, leaving all
others alone?
> --- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
> @@ -294,6 +294,7 @@ extern u64 vmx_ept_vpid_cap;
>
> #define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
>
> +#ifdef CONFIG_INTEL_VMX
> #define cpu_has_wbinvd_exiting \
> (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
> #define cpu_has_vmx_virtualize_apic_accesses \
> @@ -352,6 +353,36 @@ extern u64 vmx_ept_vpid_cap;
> (vmx_secondary_exec_control & SECONDARY_EXEC_BUS_LOCK_DETECTION)
> #define cpu_has_vmx_notify_vm_exiting \
> (vmx_secondary_exec_control & SECONDARY_EXEC_NOTIFY_VM_EXITING)
> +#else
> +#define cpu_has_wbinvd_exiting false
> +#define cpu_has_vmx_virtualize_apic_accesses false
> +#define cpu_has_vmx_tpr_shadow false
> +#define cpu_has_vmx_vnmi false
> +#define cpu_has_vmx_msr_bitmap false
> +#define cpu_has_vmx_secondary_exec_control false
> +#define cpu_has_vmx_ept false
> +#define cpu_has_vmx_dt_exiting false
> +#define cpu_has_vmx_vpid false
> +#define cpu_has_monitor_trap_flag false
> +#define cpu_has_vmx_pat false
> +#define cpu_has_vmx_efer false
> +#define cpu_has_vmx_unrestricted_guest false
> +#define vmx_unrestricted_guest(v) false
> +#define cpu_has_vmx_ple false
> +#define cpu_has_vmx_apic_reg_virt false
> +#define cpu_has_vmx_virtual_intr_delivery false
> +#define cpu_has_vmx_virtualize_x2apic_mode false
> +#define cpu_has_vmx_posted_intr_processing false
> +#define cpu_has_vmx_vmcs_shadowing false
> +#define cpu_has_vmx_vmfunc false
> +#define cpu_has_vmx_virt_exceptions false
> +#define cpu_has_vmx_pml false
> +#define cpu_has_vmx_mpx false
> +#define cpu_has_vmx_xsaves false
> +#define cpu_has_vmx_tsc_scaling false
> +#define cpu_has_vmx_bus_lock_detection false
> +#define cpu_has_vmx_notify_vm_exiting false
> +#endif /* CONFIG_INTEL_VMX */
For VMX you first of all want to separate out vmx_unrestricted_guest(v).
Maybe we can even get away without a 2nd form. Then I think it would be
much neater a change if, like we have for SVM, a couple (three I think)
helper macros were introduced, which then is all that needs providing a
2nd form for. (Both steps may want doing in separate, prereq patches.)
> @@ -374,8 +405,12 @@ extern u64 vmx_ept_vpid_cap;
> #define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
>
> extern u64 vmx_basic_msr;
> +#ifdef CONFIG_INTEL_VMX
> #define cpu_has_vmx_ins_outs_instr_info \
> (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
> +#else
> +#define cpu_has_vmx_ins_outs_instr_info false
> +#endif /* CONFIG_INTEL_VMX */
I don't think you need an alternative here - it's used solely in VMX
code. If one wanted to this could even be moved to vmcs.c.
> --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
> @@ -289,6 +289,7 @@ typedef union cr_access_qual {
>
> extern uint8_t posted_intr_vector;
>
> +#ifdef CONFIG_INTEL_VMX
> #define cpu_has_vmx_ept_exec_only_supported \
> (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
>
> @@ -301,6 +302,17 @@ extern uint8_t posted_intr_vector;
> #define cpu_has_vmx_ept_ad (vmx_ept_vpid_cap & VMX_EPT_AD_BIT)
> #define cpu_has_vmx_ept_invept_single_context \
> (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
> +#else
> +#define cpu_has_vmx_ept_exec_only_supported false
> +
> +#define cpu_has_vmx_ept_wl4_supported false
> +#define cpu_has_vmx_ept_mt_uc false
> +#define cpu_has_vmx_ept_mt_wb false
> +#define cpu_has_vmx_ept_2mb false
> +#define cpu_has_vmx_ept_1gb false
> +#define cpu_has_vmx_ept_ad false
> +#define cpu_has_vmx_ept_invept_single_context false
> +#endif /* CONFIG_INTEL_VMX */
Same suggestion for introducing a helper macro here, which would then
also be usable ...
> @@ -310,12 +322,18 @@ extern uint8_t posted_intr_vector;
> #define INVEPT_SINGLE_CONTEXT 1
> #define INVEPT_ALL_CONTEXT 2
>
> +#ifdef CONFIG_INTEL_VMX
> #define cpu_has_vmx_vpid_invvpid_individual_addr \
> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
> #define cpu_has_vmx_vpid_invvpid_single_context \
> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT)
> #define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \
> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL)
> +#else
> +#define cpu_has_vmx_vpid_invvpid_individual_addr false
> +#define cpu_has_vmx_vpid_invvpid_single_context false
> +#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global false
> +#endif /* CONFIG_INTEL_VMX */
... here.
Jan
On 2/14/23 18:36, Jan Beulich wrote:
> On 13.02.2023 15:57, Xenia Ragiadakou wrote:
>> To be able to use cpu_has_svm/vmx_* macros in common code without enclosing
>
> Both in the title and here the spelling you use made me first think that
> you talk about "cpu_has_svm". May I suggest you follow what we typically
> do and use "cpu_has_{svm,vmx}_*" instead in such a case? However, the
> title may want changing anyway:
Ok, I will use the conventional way from now on.
>
>> --- a/xen/arch/x86/include/asm/hvm/svm/svm.h
>> +++ b/xen/arch/x86/include/asm/hvm/svm/svm.h
>> @@ -80,6 +80,7 @@ extern u32 svm_feature_flags;
>> #define SVM_FEATURE_SSS 19 /* NPT Supervisor Shadow Stacks */
>> #define SVM_FEATURE_SPEC_CTRL 20 /* MSR_SPEC_CTRL virtualisation */
>>
>> +#ifdef CONFIG_AMD_SVM
>> #define cpu_has_svm_feature(f) (svm_feature_flags & (1u << (f)))
>
> Why don't you simply provide a 2nd form of this one macro, leaving all
> others alone?
You are right. That way there will be less changes.
>
>> --- a/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmcs.h
>> @@ -294,6 +294,7 @@ extern u64 vmx_ept_vpid_cap;
>>
>> #define VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
>>
>> +#ifdef CONFIG_INTEL_VMX
>> #define cpu_has_wbinvd_exiting \
>> (vmx_secondary_exec_control & SECONDARY_EXEC_WBINVD_EXITING)
>> #define cpu_has_vmx_virtualize_apic_accesses \
>> @@ -352,6 +353,36 @@ extern u64 vmx_ept_vpid_cap;
>> (vmx_secondary_exec_control & SECONDARY_EXEC_BUS_LOCK_DETECTION)
>> #define cpu_has_vmx_notify_vm_exiting \
>> (vmx_secondary_exec_control & SECONDARY_EXEC_NOTIFY_VM_EXITING)
>> +#else
>> +#define cpu_has_wbinvd_exiting false
>> +#define cpu_has_vmx_virtualize_apic_accesses false
>> +#define cpu_has_vmx_tpr_shadow false
>> +#define cpu_has_vmx_vnmi false
>> +#define cpu_has_vmx_msr_bitmap false
>> +#define cpu_has_vmx_secondary_exec_control false
>> +#define cpu_has_vmx_ept false
>> +#define cpu_has_vmx_dt_exiting false
>> +#define cpu_has_vmx_vpid false
>> +#define cpu_has_monitor_trap_flag false
>> +#define cpu_has_vmx_pat false
>> +#define cpu_has_vmx_efer false
>> +#define cpu_has_vmx_unrestricted_guest false
>> +#define vmx_unrestricted_guest(v) false
>> +#define cpu_has_vmx_ple false
>> +#define cpu_has_vmx_apic_reg_virt false
>> +#define cpu_has_vmx_virtual_intr_delivery false
>> +#define cpu_has_vmx_virtualize_x2apic_mode false
>> +#define cpu_has_vmx_posted_intr_processing false
>> +#define cpu_has_vmx_vmcs_shadowing false
>> +#define cpu_has_vmx_vmfunc false
>> +#define cpu_has_vmx_virt_exceptions false
>> +#define cpu_has_vmx_pml false
>> +#define cpu_has_vmx_mpx false
>> +#define cpu_has_vmx_xsaves false
>> +#define cpu_has_vmx_tsc_scaling false
>> +#define cpu_has_vmx_bus_lock_detection false
>> +#define cpu_has_vmx_notify_vm_exiting false
>> +#endif /* CONFIG_INTEL_VMX */
>
> For VMX you first of all want to separate out vmx_unrestricted_guest(v).
> Maybe we can even get away without a 2nd form. Then I think it would be
> much neater a change if, like we have for SVM, a couple (three I think)
> helper macros were introduced, which then is all that needs providing a
> 2nd form for. (Both steps may want doing in separate, prereq patches.)
Ok will do.
>
>> @@ -374,8 +405,12 @@ extern u64 vmx_ept_vpid_cap;
>> #define VMX_BASIC_DEFAULT1_ZERO (1ULL << 55)
>>
>> extern u64 vmx_basic_msr;
>> +#ifdef CONFIG_INTEL_VMX
>> #define cpu_has_vmx_ins_outs_instr_info \
>> (!!(vmx_basic_msr & VMX_BASIC_INS_OUT_INFO))
>> +#else
>> +#define cpu_has_vmx_ins_outs_instr_info false
>> +#endif /* CONFIG_INTEL_VMX */
>
> I don't think you need an alternative here - it's used solely in VMX
> code. If one wanted to this could even be moved to vmcs.c.
Ok. I ll take a closer look at this one.
>
>> --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
>> +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
>> @@ -289,6 +289,7 @@ typedef union cr_access_qual {
>>
>> extern uint8_t posted_intr_vector;
>>
>> +#ifdef CONFIG_INTEL_VMX
>> #define cpu_has_vmx_ept_exec_only_supported \
>> (vmx_ept_vpid_cap & VMX_EPT_EXEC_ONLY_SUPPORTED)
>>
>> @@ -301,6 +302,17 @@ extern uint8_t posted_intr_vector;
>> #define cpu_has_vmx_ept_ad (vmx_ept_vpid_cap & VMX_EPT_AD_BIT)
>> #define cpu_has_vmx_ept_invept_single_context \
>> (vmx_ept_vpid_cap & VMX_EPT_INVEPT_SINGLE_CONTEXT)
>> +#else
>> +#define cpu_has_vmx_ept_exec_only_supported false
>> +
>> +#define cpu_has_vmx_ept_wl4_supported false
>> +#define cpu_has_vmx_ept_mt_uc false
>> +#define cpu_has_vmx_ept_mt_wb false
>> +#define cpu_has_vmx_ept_2mb false
>> +#define cpu_has_vmx_ept_1gb false
>> +#define cpu_has_vmx_ept_ad false
>> +#define cpu_has_vmx_ept_invept_single_context false
>> +#endif /* CONFIG_INTEL_VMX */
>
> Same suggestion for introducing a helper macro here, which would then
> also be usable ...
>
>> @@ -310,12 +322,18 @@ extern uint8_t posted_intr_vector;
>> #define INVEPT_SINGLE_CONTEXT 1
>> #define INVEPT_ALL_CONTEXT 2
>>
>> +#ifdef CONFIG_INTEL_VMX
>> #define cpu_has_vmx_vpid_invvpid_individual_addr \
>> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_INDIVIDUAL_ADDR)
>> #define cpu_has_vmx_vpid_invvpid_single_context \
>> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT)
>> #define cpu_has_vmx_vpid_invvpid_single_context_retaining_global \
>> (vmx_ept_vpid_cap & VMX_VPID_INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL)
>> +#else
>> +#define cpu_has_vmx_vpid_invvpid_individual_addr false
>> +#define cpu_has_vmx_vpid_invvpid_single_context false
>> +#define cpu_has_vmx_vpid_invvpid_single_context_retaining_global false
>> +#endif /* CONFIG_INTEL_VMX */
>
> ... here.
Thanks.
>
> Jan
--
Xenia
© 2016 - 2026 Red Hat, Inc.