From: Grygorii Strashko <grygorii_strashko@epam.com>
Functions:
hvm_shadow_handle_cd()
hvm_set_uc_mode()
domain_exit_uc_mode()
are used only by Intel VMX code, so move them in VMX code.
While here:
- minor format change in domain_exit_uc_mode()
- s/(0/1)/(false/true) for bool types
No functional changes.
Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
---
xen/arch/x86/hvm/hvm.c | 58 -------------------------
xen/arch/x86/hvm/vmx/vmx.c | 59 ++++++++++++++++++++++++++
xen/arch/x86/include/asm/hvm/support.h | 2 -
3 files changed, 59 insertions(+), 60 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index da3cde1ff0e6..9caca93e5f56 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2168,30 +2168,6 @@ int hvm_set_efer(uint64_t value)
return X86EMUL_OKAY;
}
-/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
-static bool domain_exit_uc_mode(struct vcpu *v)
-{
- struct domain *d = v->domain;
- struct vcpu *vs;
-
- for_each_vcpu ( d, vs )
- {
- if ( (vs == v) || !vs->is_initialised )
- continue;
- if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
- mtrr_pat_not_equal(vs, v) )
- return 0;
- }
-
- return 1;
-}
-
-static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
-{
- v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
- shadow_blow_tables_per_domain(v->domain);
-}
-
int hvm_mov_to_cr(unsigned int cr, unsigned int gpr)
{
struct vcpu *curr = current;
@@ -2273,40 +2249,6 @@ int hvm_mov_from_cr(unsigned int cr, unsigned int gpr)
return X86EMUL_UNHANDLEABLE;
}
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
-{
- if ( value & X86_CR0_CD )
- {
- /* Entering no fill cache mode. */
- spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
-
- if ( !v->domain->arch.hvm.is_in_uc_mode )
- {
- domain_pause_nosync(v->domain);
-
- /* Flush physical caches. */
- flush_all(FLUSH_CACHE_EVICT);
- hvm_set_uc_mode(v, 1);
-
- domain_unpause(v->domain);
- }
- spin_unlock(&v->domain->arch.hvm.uc_lock);
- }
- else if ( !(value & X86_CR0_CD) &&
- (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
- {
- /* Exit from no fill cache mode. */
- spin_lock(&v->domain->arch.hvm.uc_lock);
- v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
-
- if ( domain_exit_uc_mode(v) )
- hvm_set_uc_mode(v, 0);
-
- spin_unlock(&v->domain->arch.hvm.uc_lock);
- }
-}
-
static void hvm_update_cr(struct vcpu *v, unsigned int cr, unsigned long value)
{
v->arch.hvm.guest_cr[cr] = value;
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index e2b5077654ef..a7ee0519957a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -44,6 +44,7 @@
#include <asm/processor.h>
#include <asm/prot-key.h>
#include <asm/regs.h>
+#include <asm/shadow.h>
#include <asm/spec_ctrl.h>
#include <asm/stubs.h>
#include <asm/x86_emulate.h>
@@ -1421,6 +1422,64 @@ static void cf_check vmx_set_segment_register(
vmx_vmcs_exit(v);
}
+/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
+static bool domain_exit_uc_mode(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ struct vcpu *vs;
+
+ for_each_vcpu(d, vs)
+ {
+ if ( (vs == v) || !vs->is_initialised )
+ continue;
+ if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
+ mtrr_pat_not_equal(vs, v) )
+ return false;
+ }
+
+ return true;
+}
+
+static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
+{
+ v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
+ shadow_blow_tables_per_domain(v->domain);
+}
+
+static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
+{
+ if ( value & X86_CR0_CD )
+ {
+ /* Entering no fill cache mode. */
+ spin_lock(&v->domain->arch.hvm.uc_lock);
+ v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
+
+ if ( !v->domain->arch.hvm.is_in_uc_mode )
+ {
+ domain_pause_nosync(v->domain);
+
+ /* Flush physical caches. */
+ flush_all(FLUSH_CACHE_EVICT);
+ hvm_set_uc_mode(v, true);
+
+ domain_unpause(v->domain);
+ }
+ spin_unlock(&v->domain->arch.hvm.uc_lock);
+ }
+ else if ( !(value & X86_CR0_CD) &&
+ (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
+ {
+ /* Exit from no fill cache mode. */
+ spin_lock(&v->domain->arch.hvm.uc_lock);
+ v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
+
+ if ( domain_exit_uc_mode(v) )
+ hvm_set_uc_mode(v, false);
+
+ spin_unlock(&v->domain->arch.hvm.uc_lock);
+ }
+}
+
static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
{
if ( !paging_mode_hap(v->domain) ||
diff --git a/xen/arch/x86/include/asm/hvm/support.h b/xen/arch/x86/include/asm/hvm/support.h
index 2a7ba36af06f..9e9fa6295567 100644
--- a/xen/arch/x86/include/asm/hvm/support.h
+++ b/xen/arch/x86/include/asm/hvm/support.h
@@ -119,8 +119,6 @@ void hvm_rdtsc_intercept(struct cpu_user_regs *regs);
int __must_check hvm_handle_xsetbv(u32 index, u64 new_bv);
-void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value);
-
/*
* These functions all return X86EMUL return codes. For hvm_set_*(), the
* caller is responsible for injecting #GP[0] if X86EMUL_EXCEPTION is
--
2.34.1
On 30.10.2025 00:54, Grygorii Strashko wrote:
> From: Grygorii Strashko <grygorii_strashko@epam.com>
>
> Functions:
> hvm_shadow_handle_cd()
> hvm_set_uc_mode()
> domain_exit_uc_mode()
> are used only by Intel VMX code, so move them in VMX code.
Nit: I think both in the title and here you mean "to" or "into".
> While here:
> - minor format change in domain_exit_uc_mode()
> - s/(0/1)/(false/true) for bool types
>
> No functional changes.
>
> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
You did read Andrew's request to also move the involved structure field(s),
didn't you? Oh, wait - maybe that's going to be the subject of patch 3. While
often splitting steps helps, I'm not sure that's very useful here. You're
touching again immediately what you just have moved, all to reach a single
goal.
> @@ -1421,6 +1422,64 @@ static void cf_check vmx_set_segment_register(
> vmx_vmcs_exit(v);
> }
>
> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
> +static bool domain_exit_uc_mode(struct vcpu *v)
> +{
> + struct domain *d = v->domain;
> + struct vcpu *vs;
> +
> + for_each_vcpu(d, vs)
> + {
> + if ( (vs == v) || !vs->is_initialised )
> + continue;
> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
> + mtrr_pat_not_equal(vs, v) )
> + return false;
> + }
> +
> + return true;
> +}
> +
> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
> +{
> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
> + shadow_blow_tables_per_domain(v->domain);
> +}
Similarly I wonder whether this function wouldn't better change to taking
struct domain * right away. "v" itself is only ever used to get hold of
its domain. At the call sites this will then make obvious that this is a
domain-wide operation.
> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
> +{
> + if ( value & X86_CR0_CD )
> + {
> + /* Entering no fill cache mode. */
> + spin_lock(&v->domain->arch.hvm.uc_lock);
> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
> +
> + if ( !v->domain->arch.hvm.is_in_uc_mode )
> + {
> + domain_pause_nosync(v->domain);
> +
> + /* Flush physical caches. */
> + flush_all(FLUSH_CACHE_EVICT);
> + hvm_set_uc_mode(v, true);
> +
> + domain_unpause(v->domain);
> + }
> + spin_unlock(&v->domain->arch.hvm.uc_lock);
> + }
> + else if ( !(value & X86_CR0_CD) &&
> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
> + {
> + /* Exit from no fill cache mode. */
> + spin_lock(&v->domain->arch.hvm.uc_lock);
> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
> +
> + if ( domain_exit_uc_mode(v) )
> + hvm_set_uc_mode(v, false);
> +
> + spin_unlock(&v->domain->arch.hvm.uc_lock);
> + }
> +}
This function, in turn, could do with a local struct domain *d.
> static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
> {
> if ( !paging_mode_hap(v->domain) ||
Why did you put the code above this function? It's solely a helper of
vmx_handle_cd(), so would imo best be placed immediately ahead of that one.
Bottom line: The change could go in as is, but imo it would be nice if it
was tidied some while moving.
Jan
Hi Jan,
On 30.10.25 13:08, Jan Beulich wrote:
> On 30.10.2025 00:54, Grygorii Strashko wrote:
>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> Functions:
>> hvm_shadow_handle_cd()
>> hvm_set_uc_mode()
>> domain_exit_uc_mode()
>> are used only by Intel VMX code, so move them in VMX code.
>
> Nit: I think both in the title and here you mean "to" or "into".
>
>> While here:
>> - minor format change in domain_exit_uc_mode()
>> - s/(0/1)/(false/true) for bool types
>>
>> No functional changes.
>>
>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>
> You did read Andrew's request to also move the involved structure field(s),
> didn't you? Oh, wait - maybe that's going to be the subject of patch 3.
yes. it is patch 3 - It is not small.
And I really wanted this patch to contain as less modifications as possible on
top of code moving.
>While
> often splitting steps helps, I'm not sure that's very useful here. You're
> touching again immediately what you just have moved, all to reach a single
> goal.
>
>> @@ -1421,6 +1422,64 @@ static void cf_check vmx_set_segment_register(
>> vmx_vmcs_exit(v);
>> }
>>
>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>> +static bool domain_exit_uc_mode(struct vcpu *v)
>> +{
>> + struct domain *d = v->domain;
>> + struct vcpu *vs;
>> +
>> + for_each_vcpu(d, vs)
>> + {
>> + if ( (vs == v) || !vs->is_initialised )
>> + continue;
>> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>> + mtrr_pat_not_equal(vs, v) )
>> + return false;
>> + }
>> +
>> + return true;
>> +}
>> +
>> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
>> +{
>> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>> + shadow_blow_tables_per_domain(v->domain);
>> +}
>
> Similarly I wonder whether this function wouldn't better change to taking
> struct domain * right away. "v" itself is only ever used to get hold of
> its domain. At the call sites this will then make obvious that this is a
> domain-wide operation.
Agree. but..
In this patch I wanted to minimize changes and do modifications step by step.
I can add additional patch such as "rework struct domain access in cache disable mode code".
Will it work?
>
>> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>> +{
>> + if ( value & X86_CR0_CD )
>> + {
>> + /* Entering no fill cache mode. */
>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
>> +
>> + if ( !v->domain->arch.hvm.is_in_uc_mode )
>> + {
>> + domain_pause_nosync(v->domain);
>> +
>> + /* Flush physical caches. */
>> + flush_all(FLUSH_CACHE_EVICT);
>> + hvm_set_uc_mode(v, true);
>> +
>> + domain_unpause(v->domain);
>> + }
>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>> + }
>> + else if ( !(value & X86_CR0_CD) &&
>> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
>> + {
>> + /* Exit from no fill cache mode. */
>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
>> +
>> + if ( domain_exit_uc_mode(v) )
>> + hvm_set_uc_mode(v, false);
>> +
>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>> + }
>> +}
>
> This function, in turn, could do with a local struct domain *d.
>
>> static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
>> {
>> if ( !paging_mode_hap(v->domain) ||
>
> Why did you put the code above this function? It's solely a helper of
> vmx_handle_cd(), so would imo best be placed immediately ahead of that one.
Right. Hence vmx_x_guest_pat() are also used by vmx_handle_cd() I decided to put before them.
>
> Bottom line: The change could go in as is, but imo it would be nice if it
> was tidied some while moving.
I'd be very much appreciated if this could happen.
--
Best regards,
-grygorii
On 30.10.2025 13:28, Grygorii Strashko wrote:
> On 30.10.25 13:08, Jan Beulich wrote:
>> On 30.10.2025 00:54, Grygorii Strashko wrote:
>>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>>
>>> Functions:
>>> hvm_shadow_handle_cd()
>>> hvm_set_uc_mode()
>>> domain_exit_uc_mode()
>>> are used only by Intel VMX code, so move them in VMX code.
>>
>> Nit: I think both in the title and here you mean "to" or "into".
>>
>>> While here:
>>> - minor format change in domain_exit_uc_mode()
>>> - s/(0/1)/(false/true) for bool types
>>>
>>> No functional changes.
>>>
>>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>>
>> You did read Andrew's request to also move the involved structure field(s),
>> didn't you? Oh, wait - maybe that's going to be the subject of patch 3.
>
> yes. it is patch 3 - It is not small.
> And I really wanted this patch to contain as less modifications as possible on
> top of code moving.
I wonder what other x86 maintainers think here.
>> While
>> often splitting steps helps, I'm not sure that's very useful here. You're
>> touching again immediately what you just have moved, all to reach a single
>> goal.
>>
>>> @@ -1421,6 +1422,64 @@ static void cf_check vmx_set_segment_register(
>>> vmx_vmcs_exit(v);
>>> }
>>>
>>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>>> +static bool domain_exit_uc_mode(struct vcpu *v)
>>> +{
>>> + struct domain *d = v->domain;
>>> + struct vcpu *vs;
>>> +
>>> + for_each_vcpu(d, vs)
>>> + {
>>> + if ( (vs == v) || !vs->is_initialised )
>>> + continue;
>>> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>>> + mtrr_pat_not_equal(vs, v) )
>>> + return false;
>>> + }
>>> +
>>> + return true;
>>> +}
>>> +
>>> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
>>> +{
>>> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>>> + shadow_blow_tables_per_domain(v->domain);
>>> +}
>>
>> Similarly I wonder whether this function wouldn't better change to taking
>> struct domain * right away. "v" itself is only ever used to get hold of
>> its domain. At the call sites this will then make obvious that this is a
>> domain-wide operation.
>
> Agree. but..
> In this patch I wanted to minimize changes and do modifications step by step.
>
> I can add additional patch such as "rework struct domain access in cache disable mode code".
> Will it work?
>
>>
>>> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>>> +{
>>> + if ( value & X86_CR0_CD )
>>> + {
>>> + /* Entering no fill cache mode. */
>>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>>> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
>>> +
>>> + if ( !v->domain->arch.hvm.is_in_uc_mode )
>>> + {
>>> + domain_pause_nosync(v->domain);
>>> +
>>> + /* Flush physical caches. */
>>> + flush_all(FLUSH_CACHE_EVICT);
>>> + hvm_set_uc_mode(v, true);
>>> +
>>> + domain_unpause(v->domain);
>>> + }
>>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>>> + }
>>> + else if ( !(value & X86_CR0_CD) &&
>>> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
>>> + {
>>> + /* Exit from no fill cache mode. */
>>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>>> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
>>> +
>>> + if ( domain_exit_uc_mode(v) )
>>> + hvm_set_uc_mode(v, false);
>>> +
>>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>>> + }
>>> +}
>>
>> This function, in turn, could do with a local struct domain *d.
>>
>>> static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
>>> {
>>> if ( !paging_mode_hap(v->domain) ||
>>
>> Why did you put the code above this function? It's solely a helper of
>> vmx_handle_cd(), so would imo best be placed immediately ahead of that one.
>
> Right. Hence vmx_x_guest_pat() are also used by vmx_handle_cd() I decided to put before them.
The main purpose of vmx_set_guest_pat() is, however, its use as a hook function.
It's merely an optimization that the function is called directly by VMX code.
>> Bottom line: The change could go in as is, but imo it would be nice if it
>> was tidied some while moving.
>
> I'd be very much appreciated if this could happen.
"this" being what out of the two or more possible options? (I take it you mean
"could go in as is", but that's guesswork.)
Jan
Hi
On 30.10.25 14:47, Jan Beulich wrote:
> On 30.10.2025 13:28, Grygorii Strashko wrote:
>> On 30.10.25 13:08, Jan Beulich wrote:
>>> On 30.10.2025 00:54, Grygorii Strashko wrote:
>>>> From: Grygorii Strashko <grygorii_strashko@epam.com>
>>>>
>>>> Functions:
>>>> hvm_shadow_handle_cd()
>>>> hvm_set_uc_mode()
>>>> domain_exit_uc_mode()
>>>> are used only by Intel VMX code, so move them in VMX code.
>>>
>>> Nit: I think both in the title and here you mean "to" or "into".
>>>
>>>> While here:
>>>> - minor format change in domain_exit_uc_mode()
>>>> - s/(0/1)/(false/true) for bool types
>>>>
>>>> No functional changes.
>>>>
>>>> Signed-off-by: Grygorii Strashko <grygorii_strashko@epam.com>
>>>
>>> You did read Andrew's request to also move the involved structure field(s),
>>> didn't you? Oh, wait - maybe that's going to be the subject of patch 3.
>>
>> yes. it is patch 3 - It is not small.
>> And I really wanted this patch to contain as less modifications as possible on
>> top of code moving.
>
> I wonder what other x86 maintainers think here.
>
>>> While
>>> often splitting steps helps, I'm not sure that's very useful here. You're
>>> touching again immediately what you just have moved, all to reach a single
>>> goal.
>>>
>>>> @@ -1421,6 +1422,64 @@ static void cf_check vmx_set_segment_register(
>>>> vmx_vmcs_exit(v);
>>>> }
>>>>
>>>> +/* Exit UC mode only if all VCPUs agree on MTRR/PAT and are not in no_fill. */
>>>> +static bool domain_exit_uc_mode(struct vcpu *v)
>>>> +{
>>>> + struct domain *d = v->domain;
>>>> + struct vcpu *vs;
>>>> +
>>>> + for_each_vcpu(d, vs)
>>>> + {
>>>> + if ( (vs == v) || !vs->is_initialised )
>>>> + continue;
>>>> + if ( (vs->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) ||
>>>> + mtrr_pat_not_equal(vs, v) )
>>>> + return false;
>>>> + }
>>>> +
>>>> + return true;
>>>> +}
>>>> +
>>>> +static void hvm_set_uc_mode(struct vcpu *v, bool is_in_uc_mode)
>>>> +{
>>>> + v->domain->arch.hvm.is_in_uc_mode = is_in_uc_mode;
>>>> + shadow_blow_tables_per_domain(v->domain);
>>>> +}
>>>
>>> Similarly I wonder whether this function wouldn't better change to taking
>>> struct domain * right away. "v" itself is only ever used to get hold of
>>> its domain. At the call sites this will then make obvious that this is a
>>> domain-wide operation.
>>
>> Agree. but..
>> In this patch I wanted to minimize changes and do modifications step by step.
>>
>> I can add additional patch such as "rework struct domain access in cache disable mode code".
>> Will it work?
I'm planning to resend with:
- incorporating struct domain * as parameter in hvm_set_uc_mode()
>>
>>>
>>>> +static void hvm_shadow_handle_cd(struct vcpu *v, unsigned long value)
>>>> +{
>>>> + if ( value & X86_CR0_CD )
>>>> + {
>>>> + /* Entering no fill cache mode. */
>>>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>>>> + v->arch.hvm.cache_mode = NO_FILL_CACHE_MODE;
>>>> +
>>>> + if ( !v->domain->arch.hvm.is_in_uc_mode )
>>>> + {
>>>> + domain_pause_nosync(v->domain);
>>>> +
>>>> + /* Flush physical caches. */
>>>> + flush_all(FLUSH_CACHE_EVICT);
>>>> + hvm_set_uc_mode(v, true);
>>>> +
>>>> + domain_unpause(v->domain);
>>>> + }
>>>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>>>> + }
>>>> + else if ( !(value & X86_CR0_CD) &&
>>>> + (v->arch.hvm.cache_mode == NO_FILL_CACHE_MODE) )
>>>> + {
>>>> + /* Exit from no fill cache mode. */
>>>> + spin_lock(&v->domain->arch.hvm.uc_lock);
>>>> + v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
>>>> +
>>>> + if ( domain_exit_uc_mode(v) )
>>>> + hvm_set_uc_mode(v, false);
>>>> +
>>>> + spin_unlock(&v->domain->arch.hvm.uc_lock);
>>>> + }
>>>> +}
>>>
>>> This function, in turn, could do with a local struct domain *d.
- incorporating struct domain * as parameter local var
>>>
>>>> static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
>>>> {
>>>> if ( !paging_mode_hap(v->domain) ||
>>>
>>> Why did you put the code above this function? It's solely a helper of
>>> vmx_handle_cd(), so would imo best be placed immediately ahead of that one.
>>
>> Right. Hence vmx_x_guest_pat() are also used by vmx_handle_cd() I decided to put before them.
>
> The main purpose of vmx_set_guest_pat() is, however, its use as a hook function.
> It's merely an optimization that the function is called directly by VMX code.
- moving code before vmx_handle_cd().
>
>>> Bottom line: The change could go in as is, but imo it would be nice if it
>>> was tidied some while moving.
>>
>> I'd be very much appreciated if this could happen.
>
> "this" being what out of the two or more possible options? (I take it you mean
> "could go in as is", but that's guesswork.)
I'm not goint to squah rest of the series.
--
Best regards,
-grygorii
© 2016 - 2025 Red Hat, Inc.