From: Grygorii Strashko <grygorii_strashko@epam.com>
Functions:
hvm_shadow_handle_cd()
hvm_set_uc_mode()
domain_exit_uc_mode()
are used only by Intel VMX code, so move them to VMX code.
While here:
- minor format change in domain_exit_uc_mode()
- s/(0/1)/(false/true) for bool types
- use "struct domain *" as parameter in hvm_set_uc_mode()
- use "struct domain *d" as local var in hvm_shadow_handle_cd()
No functional changes.
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
changes in v2:
- use "struct domain *" as parameter in hvm_set_uc_mode()
- use "struct domain *d" as local var in hvm_shadow_handle_cd()
- move code before vmx_handle_cd()
xen/arch/x86/hvm/hvm.c | 58 ------------------------
xen/arch/x86/hvm/vmx/vmx.c | 61 ++++++++++++++++++++++++++
xen/arch/x86/include/asm/hvm/support.h | 2 -
3 files changed, 61 insertions(+), 60 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index da3cde1ff0e6..9caca93e5f56 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
return X86EMUL_OKAY;
}
-/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
-static bool domain_exit_uc_mode(struct vcpu *v)
-{
- struct domain *d = v->domain;
- struct vcpu *vs;
-
- for_each_vcpu ( d, vs )
- {
- if ( (vs == v) || !vs->is_initialised )
- continue;
- if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
- mtrr_pat_not_equal(vs, v) )
- return 0;
- }
-
- return 1;
-}
-
-static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
-{
- v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
- shadow_blow_tables_per_domain(v->domain);
-}
-
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
{
struct vcpu *curr = current;
@@ -2273,40 +2249,6 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
return X86EMUL_UNHANDLEABLE;
}
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
-{
- if ( value & X86_CR0_CD )
- {
- /* Entering no fill cache mode. */
- spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
-
- if ( !v->domain->arch.hvm.is_in_uc_mode )
- {
- domain_pause_nosync(v->domain);
-
- /* Flush physical caches. */
- flush_all(FLUSH_CACHE_EVICT);
- hvm_set_uc_mode(v, 1);
-
- domain_unpause(v->domain);
- }
- spin_unlock(&v->domain->arch.hvm.uc_lock);
- }
- else if ( !(value & X86_CR0_CD) &&
- (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
- {
- /* Exit from no fill cache mode. */
- spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
-
- if ( domain_exit_uc_mode(v) )
- hvm_set_uc_mode(v, 0);
-
- spin_unlock(&v->domain->arch.hvm.uc_lock);
- }
-}
-
static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
{
v->arch.hvm.guest_cr[cr] = value;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 6f2cc635e582..d7efd0a73add 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -44,6 +44,7 @@
#include <asm/processor.h>
#include <asm/prot-key.h>
#include <asm/regs.h>
+#include <asm/shadow.h>
#include <asm/spec_ctrl.h>
#include <asm/stubs.h>
#include <asm/x86_emulate.h>
@@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
return 1;
}
+/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
+static bool domain_exit_uc_mode(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct vcpu *vs;
+
+ for_each_vcpu(d, vs)
+ {
+ if ( (vs == v) || !vs->is_initialised )
+ continue;
+ if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+ mtrr_pat_not_equal(vs, v) )
+ return false;
+ }
+
+ return true;
+}
+
+static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
+{
+ d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+ shadow_blow_tables_per_domain(d);
+}
+
+static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
+{
+ struct domain *d = v->domain;
+
+ if ( value & X86_CR0_CD )
+ {
+ /* Entering no fill cache mode. */
+ spin_lock(&d->arch.hvm.uc_lock);
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+
+ if ( !d->arch.hvm.is_in_uc_mode )
+ {
+ domain_pause_nosync(d);
+
+ /* Flush physical caches. */
+ flush_all(FLUSH_CACHE_EVICT);
+ hvm_set_uc_mode(d, true);
+
+ domain_unpause(d);
+ }
+ spin_unlock(&d->arch.hvm.uc_lock);
+ }
+ else if ( !(value & X86_CR0_CD) &&
+ (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+ {
+ /* Exit from no fill cache mode. */
+ spin_lock(&d->arch.hvm.uc_lock);
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+
+ if ( domain_exit_uc_mode(v) )
+ hvm_set_uc_mode(d, false);
+
+ spin_unlock(&d->arch.hvm.uc_lock);
+ }
+}
+
static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
{
if ( !paging_mode_hap(v->domain) )
diff --git a/xen/arch/x86/include/asm/hvm/support.h b/xen/arch/x86/include/asm/hvm/support.h
index 2a7ba36af06f..9e9fa6295567 100644
--- a/xen/arch/x86/include/asm/hvm/support.h
+++ b/xen/arch/x86/include/asm/hvm/support.h
@@ -119,8 +119,6 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
-
/*
* These functions all return X86EMUL return codes. For hvm_set_*(), the
* caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
--
2.34.1
On 11.11.2025 21:10, Grygorii Strashko wrote:
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Functions:
> hvm_shadow_handle_cd()
> hvm_set_uc_mode()
> domain_exit_uc_mode()
> are used only by Intel VMX code, so move them to VMX code.
>
> While here:
> - minor format change in domain_exit_uc_mode()
> - s/(0/1)/(false/true) for bool types
> - use "struct domain *" as parameter in hvm_set_uc_mode()
> - use "struct domain *d" as local var in hvm_shadow_handle_cd()
One more please:
> @@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
> return 1;
> }
>
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct vcpu *vs;
const on all three pointer target types. Then, together with the function
prefix adjustment discussed on the other sub-thread,
Acked-by: Jan Beulich <jbeulich@suse.com>
Jan
On 13.11.2025 11:44, Jan Beulich wrote:
> On 11.11.2025 21:10, Grygorii Strashko wrote:
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Functions:
>> hvm_shadow_handle_cd()
>> hvm_set_uc_mode()
>> domain_exit_uc_mode()
>> are used only by Intel VMX code, so move them to VMX code.
>>
>> While here:
>> - minor format change in domain_exit_uc_mode()
>> - s/(0/1)/(false/true) for bool types
>> - use "struct domain *" as parameter in hvm_set_uc_mode()
>> - use "struct domain *d" as local var in hvm_shadow_handle_cd()
>
> One more please:
>
>> @@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
>> return 1;
>> }
>>
>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>> +static bool domain_exit_uc_mode(struct vcpu *v)
>> +{
>> + struct domain *d = v->domain;
>> + struct vcpu *vs;
>
> const on all three pointer target types. Then, together with the function
> prefix adjustment discussed on the other sub-thread,
> Acked-by: Jan Beulich <jbeulich@suse.com>
Seeing that the other two patches are ready to go in (again, once the tree is
fully open again), I might as well do these edits while committing. So long as
you agree, of course.
Jan
Hi Jan,
On 13.11.25 13:19, Jan Beulich wrote:
> On 13.11.2025 11:44, Jan Beulich wrote:
>> On 11.11.2025 21:10, Grygorii Strashko wrote:
>>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>>
>>> Functions:
>>> hvm_shadow_handle_cd()
>>> hvm_set_uc_mode()
>>> domain_exit_uc_mode()
>>> are used only by Intel VMX code, so move them to VMX code.
>>>
>>> While here:
>>> - minor format change in domain_exit_uc_mode()
>>> - s/(0/1)/(false/true) for bool types
>>> - use "struct domain *" as parameter in hvm_set_uc_mode()
>>> - use "struct domain *d" as local var in hvm_shadow_handle_cd()
>>
>> One more please:
>>
>>> @@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
>>> return 1;
>>> }
>>>
>>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>>> +static bool domain_exit_uc_mode(struct vcpu *v)
>>> +{
>>> + struct domain *d = v->domain;
>>> + struct vcpu *vs;
>>
>> const on all three pointer target types. Then, together with the function
>> prefix adjustment discussed on the other sub-thread,
>> Acked-by: Jan Beulich <jbeulich@suse.com>
>
> Seeing that the other two patches are ready to go in (again, once the tree is
> fully open again), I might as well do these edits while committing. So long as
> you agree, of course.
Of course I agree and will be infinitely grateful.
Thank you.
--
Best regards,
-grygorii
Le 11/11/2025 à 21:11, Grygorii Strashko a écrit :
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Functions:
> hvm_shadow_handle_cd()
> hvm_set_uc_mode()
> domain_exit_uc_mode()
> are used only by Intel VMX code, so move them to VMX code.
>
> While here:
> - minor format change in domain_exit_uc_mode()
> - s/(0/1)/(false/true) for bool types
> - use "struct domain *" as parameter in hvm_set_uc_mode()
> - use "struct domain *d" as local var in hvm_shadow_handle_cd()
>
> No functional changes.
>
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
> ---
> changes in v2:
> - use "struct domain *" as parameter in hvm_set_uc_mode()
> - use "struct domain *d" as local var in hvm_shadow_handle_cd()
> - move code before vmx_handle_cd()
>
> xen/arch/x86/hvm/hvm.c | 58 ------------------------
> xen/arch/x86/hvm/vmx/vmx.c | 61 ++++++++++++++++++++++++++
> xen/arch/x86/include/asm/hvm/support.h | 2 -
> 3 files changed, 61 insertions(+), 60 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index da3cde1ff0e6..9caca93e5f56 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
> return X86EMUL_OKAY;
> }
>
> -/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> -static bool domain_exit_uc_mode(struct vcpu *v)
> -{
> - struct domain *d = v->domain;
> - struct vcpu *vs;
> -
> - for_each_vcpu ( d, vs )
> - {
> - if ( (vs == v) || !vs->is_initialised )
> - continue;
> - if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> - mtrr_pat_not_equal(vs, v) )
> - return 0;
> - }
> -
> - return 1;
> -}
> -
> -static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> -{
> - v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> - shadow_blow_tables_per_domain(v->domain);
> -}
> -
> int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
> {
> struct vcpu *curr = current;
> @@ -2273,40 +2249,6 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
> return X86EMUL_UNHANDLEABLE;
> }
>
> -void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
> -{
> - if ( value & X86_CR0_CD )
> - {
> - /* Entering no fill cache mode. */
> - spin_lock(&v->domain->arch.hvm.uc_lock);
> - v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
> -
> - if ( !v->domain->arch.hvm.is_in_uc_mode )
> - {
> - domain_pause_nosync(v->domain);
> -
> - /* Flush physical caches. */
> - flush_all(FLUSH_CACHE_EVICT);
> - hvm_set_uc_mode(v, 1);
> -
> - domain_unpause(v->domain);
> - }
> - spin_unlock(&v->domain->arch.hvm.uc_lock);
> - }
> - else if ( !(value & X86_CR0_CD) &&
> - (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
> - {
> - /* Exit from no fill cache mode. */
> - spin_lock(&v->domain->arch.hvm.uc_lock);
> - v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
> -
> - if ( domain_exit_uc_mode(v) )
> - hvm_set_uc_mode(v, 0);
> -
> - spin_unlock(&v->domain->arch.hvm.uc_lock);
> - }
> -}
> -
> static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
> {
> v->arch.hvm.guest_cr[cr] = value;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 6f2cc635e582..d7efd0a73add 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -44,6 +44,7 @@
> #include <asm/processor.h>
> #include <asm/prot-key.h>
> #include <asm/regs.h>
> +#include <asm/shadow.h>
> #include <asm/spec_ctrl.h>
> #include <asm/stubs.h>
> #include <asm/x86_emulate.h>
> @@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
> return 1;
> }
>
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct vcpu *vs;
> +
> + for_each_vcpu(d, vs)
> + {
> + if ( (vs == v) || !vs->is_initialised )
> + continue;
> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> + mtrr_pat_not_equal(vs, v) )
> + return false;
> + }
> +
> + return true;
> +}
> +
> +static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
> +{
> + d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> + shadow_blow_tables_per_domain(d);
> +}
> +
> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
> +{
> + struct domain *d = v->domain;
> +
> + if ( value & X86_CR0_CD )
> + {
> + /* Entering no fill cache mode. */
> + spin_lock(&d->arch.hvm.uc_lock);
> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
> +
> + if ( !d->arch.hvm.is_in_uc_mode )
> + {
> + domain_pause_nosync(d);
> +
> + /* Flush physical caches. */
> + flush_all(FLUSH_CACHE_EVICT);
> + hvm_set_uc_mode(d, true);
> +
> + domain_unpause(d);
> + }
> + spin_unlock(&d->arch.hvm.uc_lock);
> + }
> + else if ( !(value & X86_CR0_CD) &&
> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
> + {
> + /* Exit from no fill cache mode. */
> + spin_lock(&d->arch.hvm.uc_lock);
> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
> +
> + if ( domain_exit_uc_mode(v) )
> + hvm_set_uc_mode(d, false);
> +
> + spin_unlock(&d->arch.hvm.uc_lock);
> + }
> +}
> +
Given that these functions are now static in vmx.c, I would give them
the vmx_* prefix instead of hvm_* (which could be confusing as it sounds
like something that is in general code).
> static void cf_check vmx_handle_cd(struct vcpu *v, unsigned long value)
> {
> if ( !paging_mode_hap(v->domain) )
> diff --git a/xen/arch/x86/include/asm/hvm/support.h b/xen/arch/x86/include/asm/hvm/support.h
> index 2a7ba36af06f..9e9fa6295567 100644
> --- a/xen/arch/x86/include/asm/hvm/support.h
> +++ b/xen/arch/x86/include/asm/hvm/support.h
> @@ -119,8 +119,6 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
>
> int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
>
> -void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
> -
> /*
> * These functions all return X86EMUL return codes. For hvm_set_*(), the
> * caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
Teddy
--
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
On 12.11.2025 11:52, Teddy Astie wrote:
> Le 11/11/2025 à 21:11, Grygorii Strashko a écrit :
>> @@ -1451,6 +1452,66 @@ static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
>> return 1;
>> }
>>
>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>> +static bool domain_exit_uc_mode(struct vcpu *v)
>> +{
>> + struct domain *d = v->domain;
>> + struct vcpu *vs;
>> +
>> + for_each_vcpu(d, vs)
>> + {
>> + if ( (vs == v) || !vs->is_initialised )
>> + continue;
>> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>> + mtrr_pat_not_equal(vs, v) )
>> + return false;
>> + }
>> +
>> + return true;
>> +}
>> +
>> +static void hvm_set_uc_mode(struct domain *d, bool is_in_uc_mode)
>> +{
>> + d->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>> + shadow_blow_tables_per_domain(d);
>> +}
>> +
>> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>> +{
>> + struct domain *d = v->domain;
>> +
>> + if ( value & X86_CR0_CD )
>> + {
>> + /* Entering no fill cache mode. */
>> + spin_lock(&d->arch.hvm.uc_lock);
>> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
>> +
>> + if ( !d->arch.hvm.is_in_uc_mode )
>> + {
>> + domain_pause_nosync(d);
>> +
>> + /* Flush physical caches. */
>> + flush_all(FLUSH_CACHE_EVICT);
>> + hvm_set_uc_mode(d, true);
>> +
>> + domain_unpause(d);
>> + }
>> + spin_unlock(&d->arch.hvm.uc_lock);
>> + }
>> + else if ( !(value & X86_CR0_CD) &&
>> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
>> + {
>> + /* Exit from no fill cache mode. */
>> + spin_lock(&d->arch.hvm.uc_lock);
>> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
>> +
>> + if ( domain_exit_uc_mode(v) )
>> + hvm_set_uc_mode(d, false);
>> +
>> + spin_unlock(&d->arch.hvm.uc_lock);
>> + }
>> +}
>> +
>
> Given that these functions are now static in vmx.c, I would give them
> the vmx_* prefix instead of hvm_* (which could be confusing as it sounds
> like something that is in general code).
There's no need for either prefix really, I would say.
Jan
© 2016 - 2025 Red Hat, Inc.