From: Grygorii Strashko <grygorii_strashko@epam.com>
Functions:
hvm_shadow_handle_cd()
hvm_set_uc_mode()
domain_exit_uc_mode()
are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX ifdef.
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
xen/arch/x86/hvm/hvm.c | 50 ++++++++++++++++++++++--------------------
1 file changed, 26 insertions(+), 24 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index f1035fc9f645..3a30404d9940 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
return X86EMUL_OKAY;
}
-/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
-static bool domain_exit_uc_mode(struct vcpu *v)
-{
- struct domain *d = v->domain;
- struct vcpu *vs;
-
- for_each_vcpu ( d, vs )
- {
- if ( (vs == v) || !vs->is_initialised )
- continue;
- if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
- mtrr_pat_not_equal(vs, v) )
- return 0;
- }
-
- return 1;
-}
-
-static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
-{
- v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
- shadow_blow_tables_per_domain(v->domain);
-}
-
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
{
struct vcpu *curr = current;
@@ -2273,6 +2249,31 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
return X86EMUL_UNHANDLEABLE;
}
+#ifdef CONFIG_INTEL_VMX
+/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
+static bool domain_exit_uc_mode(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct vcpu *vs;
+
+ for_each_vcpu ( d, vs )
+ {
+ if ( (vs == v) || !vs->is_initialised )
+ continue;
+ if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+ mtrr_pat_not_equal(vs, v) )
+ return 0;
+ }
+
+ return 1;
+}
+
+static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
+{
+ v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+ shadow_blow_tables_per_domain(v->domain);
+}
+
void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
{
if ( value & X86_CR0_CD )
@@ -2306,6 +2307,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
spin_unlock(&v->domain->arch.hvm.uc_lock);
}
}
+#endif
static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
{
--
2.34.1
On 23.10.2025 17:19, Grygorii Strashko wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
> return X86EMUL_OKAY;
> }
>
> -/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> -static bool domain_exit_uc_mode(struct vcpu *v)
> -{
> - struct domain *d = v->domain;
> - struct vcpu *vs;
> -
> - for_each_vcpu ( d, vs )
> - {
> - if ( (vs == v) || !vs->is_initialised )
> - continue;
> - if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> - mtrr_pat_not_equal(vs, v) )
> - return 0;
> - }
> -
> - return 1;
> -}
> -
> -static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> -{
> - v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> - shadow_blow_tables_per_domain(v->domain);
> -}
> -
> int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
> {
> struct vcpu *curr = current;
> @@ -2273,6 +2249,31 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
> return X86EMUL_UNHANDLEABLE;
> }
>
> +#ifdef CONFIG_INTEL_VMX
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct vcpu *vs;
> +
> + for_each_vcpu ( d, vs )
> + {
> + if ( (vs == v) || !vs->is_initialised )
> + continue;
> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> + mtrr_pat_not_equal(vs, v) )
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> +{
> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> + shadow_blow_tables_per_domain(v->domain);
> +}
> +
> void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
In addition to what others have said, as we're talking of unreachable code
here: This function also is unreachable when SHADOW_PAGING=n. If already
you make adjustments, I think you want to cover this other aspect at the
same time.
Jan
Le 23/10/2025 à 17:22, Grygorii Strashko a écrit :
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Functions:
> hvm_shadow_handle_cd()
> hvm_set_uc_mode()
> domain_exit_uc_mode()
> are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX ifdef.
>
If they are actually Intel VMX specific, they should rather be moved to
VMX code (and named appropriately) rather than if-defed in shared hvm
code. If AMD code happens to need these functions in the future, it
would make things break pretty confusingly (as headers are not updated
consistently).
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
> ---
> xen/arch/x86/hvm/hvm.c | 50 ++++++++++++++++++++++--------------------
> 1 file changed, 26 insertions(+), 24 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index f1035fc9f645..3a30404d9940 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
> return X86EMUL_OKAY;
> }
>
> -/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> -static bool domain_exit_uc_mode(struct vcpu *v)
> -{
> - struct domain *d = v->domain;
> - struct vcpu *vs;
> -
> - for_each_vcpu ( d, vs )
> - {
> - if ( (vs == v) || !vs->is_initialised )
> - continue;
> - if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> - mtrr_pat_not_equal(vs, v) )
> - return 0;
> - }
> -
> - return 1;
> -}
> -
> -static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> -{
> - v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> - shadow_blow_tables_per_domain(v->domain);
> -}
> -
> int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
> {
> struct vcpu *curr = current;
> @@ -2273,6 +2249,31 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
> return X86EMUL_UNHANDLEABLE;
> }
>
> +#ifdef CONFIG_INTEL_VMX
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct vcpu *vs;
> +
> + for_each_vcpu ( d, vs )
> + {
> + if ( (vs == v) || !vs->is_initialised )
> + continue;
> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> + mtrr_pat_not_equal(vs, v) )
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> +{
> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> + shadow_blow_tables_per_domain(v->domain);
> +}
> +
> void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
> {
> if ( value & X86_CR0_CD )
> @@ -2306,6 +2307,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
> spin_unlock(&v->domain->arch.hvm.uc_lock);
> }
> }
> +#endif
>
> static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
> {
Teddy
--
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
On 25.10.25 21:10, Teddy Astie wrote:
> Le 23/10/2025 à 17:22, Grygorii Strashko a écrit :
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Functions:
>> hvm_shadow_handle_cd()
>> hvm_set_uc_mode()
>> domain_exit_uc_mode()
>> are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX ifdef.
>>
>
> If they are actually Intel VMX specific, they should rather be moved to
> VMX code (and named appropriately) rather than if-defed in shared hvm
> code. If AMD code happens to need these functions in the future, it
> would make things break pretty confusingly (as headers are not updated
> consistently).
I agree and like it even better. Can try if there is no objections?
There is hvm_prepare_vm86_tss() also which is also used by VMX code only.
>
>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>> ---
>> xen/arch/x86/hvm/hvm.c | 50 ++++++++++++++++++++++--------------------
>> 1 file changed, 26 insertions(+), 24 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
>> index f1035fc9f645..3a30404d9940 100644
>> --- a/xen/arch/x86/hvm/hvm.c
>> +++ b/xen/arch/x86/hvm/hvm.c
>> @@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
>> return X86EMUL_OKAY;
>> }
>>
>> -/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>> -static bool domain_exit_uc_mode(struct vcpu *v)
>> -{
>> - struct domain *d = v->domain;
>> - struct vcpu *vs;
>> -
>> - for_each_vcpu ( d, vs )
>> - {
>> - if ( (vs == v) || !vs->is_initialised )
>> - continue;
>> - if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>> - mtrr_pat_not_equal(vs, v) )
>> - return 0;
>> - }
>> -
>> - return 1;
>> -}
>> -
>> -static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
>> -{
>> - v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>> - shadow_blow_tables_per_domain(v->domain);
>> -}
>> -
>> int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
>> {
>> struct vcpu *curr = current;
>> @@ -2273,6 +2249,31 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
>> return X86EMUL_UNHANDLEABLE;
>> }
>>
>> +#ifdef CONFIG_INTEL_VMX
>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>> +static bool domain_exit_uc_mode(struct vcpu *v)
>> +{
>> + struct domain *d = v->domain;
>> + struct vcpu *vs;
>> +
>> + for_each_vcpu ( d, vs )
>> + {
>> + if ( (vs == v) || !vs->is_initialised )
>> + continue;
>> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>> + mtrr_pat_not_equal(vs, v) )
>> + return 0;
>> + }
>> +
>> + return 1;
>> +}
>> +
>> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
>> +{
>> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>> + shadow_blow_tables_per_domain(v->domain);
>> +}
>> +
>> void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>> {
>> if ( value & X86_CR0_CD )
>> @@ -2306,6 +2307,7 @@ void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>> spin_unlock(&v->domain->arch.hvm.uc_lock);
>> }
>> }
>> +#endif
>>
>> static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
>> {
--
Best regards,
-grygorii
On 28/10/2025 12:43 pm, Grygorii Strashko wrote:
> On 25.10.25 21:10, Teddy Astie wrote:
>> Le 23/10/2025 à 17:22, Grygorii Strashko a écrit :
>>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>>
>>> Functions:
>>> hvm_shadow_handle_cd()
>>> hvm_set_uc_mode()
>>> domain_exit_uc_mode()
>>> are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX
>>> ifdef.
>>>
>>
>> If they are actually Intel VMX specific, they should rather be moved to
>> VMX code (and named appropriately) rather than if-defed in shared hvm
>> code. If AMD code happens to need these functions in the future, it
>> would make things break pretty confusingly (as headers are not updated
>> consistently).
>
> I agree and like it even better. Can try if there is no objections?
>
> There is hvm_prepare_vm86_tss() also which is also used by VMX code only.
There is some horrible complexity here.
With SVM, we can run guests perfectly well in Cache Disable mode (i.e.
gCR0.CD=1). Nothing extra is needed.
With VT-x, for unknown reasons, entering a VM explicitly leaves
CR0.{CD,NW} unmodified, i.e. always the Xen settings, which will be
neither bit set. As a result, when the guest wishes to set
gCR0.{CD,NW}, we have to jump through hoops to make this happen.
Without going into the details, the upshot is that this "handle uc mode"
logic is VT-x specific, and oughtn't to be in hvm.c. Moving it seems
fine, but the data, d->arch.hvm.uc_*, wants to move into the vmx union too.
hvm_prepare_vm86_tss() is rather different. It, ultimately, is because
of limitations in early VT-x and it's probably fine to move too,
although the infrastructure surrounding it has bigger changes needed.
~Andrew
On 2025-10-23 11:19, Grygorii Strashko wrote: > From: Grygorii Strashko <grygorii_strashko@epam.com> > > Functions: > hvm_shadow_handle_cd() > hvm_set_uc_mode() > domain_exit_uc_mode() > are used only by Intel VMX code, so move them under CONFIG_INTEL_VMX ifdef. > > Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com> Reviewed-by: Jason Andryuk <jason.andryuk@amd.com>
© 2016 - 2025 Red Hat, Inc.