With cross-vendor support gone, it's no longer needed.
Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
---
xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++-------------
xen/arch/x86/hvm/svm/vmcb.c | 3 ++
xen/arch/x86/include/asm/hvm/svm-types.h | 10 ------
3 files changed, 22 insertions(+), 33 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 0658ca990f..e8f19dec04 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -401,10 +401,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
- c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
- c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
-
if ( vmcb->event_inj.v &&
hvm_event_needs_reinjection(vmcb->event_inj.type,
vmcb->event_inj.vector) )
@@ -468,11 +464,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
svm_update_guest_cr(v, 0, 0);
svm_update_guest_cr(v, 4, 0);
- /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
-
if ( paging_mode_hap(v->domain) )
{
vmcb_set_np(vmcb, true);
@@ -501,6 +492,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
+ data->sysenter_cs = vmcb->sysenter_cs;
+ data->sysenter_esp = vmcb->sysenter_esp;
+ data->sysenter_eip = vmcb->sysenter_eip;
data->shadow_gs = vmcb->kerngsbase;
data->msr_lstar = vmcb->lstar;
data->msr_star = vmcb->star;
@@ -512,11 +506,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
{
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
- vmcb->kerngsbase = data->shadow_gs;
- vmcb->lstar = data->msr_lstar;
- vmcb->star = data->msr_star;
- vmcb->cstar = data->msr_cstar;
- vmcb->sfmask = data->msr_syscall_mask;
+ vmcb->sysenter_cs = data->sysenter_cs;
+ vmcb->sysenter_esp = data->sysenter_esp;
+ vmcb->sysenter_eip = data->sysenter_eip;
+ vmcb->kerngsbase = data->shadow_gs;
+ vmcb->lstar = data->msr_lstar;
+ vmcb->star = data->msr_star;
+ vmcb->cstar = data->msr_cstar;
+ vmcb->sfmask = data->msr_syscall_mask;
v->arch.hvm.guest_efer = data->msr_efer;
svm_update_guest_efer(v);
}
@@ -1720,12 +1717,9 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
- /*
- * Sync not needed while the cross-vendor logic is in unilateral effect.
case MSR_IA32_SYSENTER_CS:
case MSR_IA32_SYSENTER_ESP:
case MSR_IA32_SYSENTER_EIP:
- */
case MSR_STAR:
case MSR_LSTAR:
case MSR_CSTAR:
@@ -1740,13 +1734,15 @@ static int cf_check svm_msr_read_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_CS:
- *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
+ *msr_content = vmcb->sysenter_cs;
break;
+
case MSR_IA32_SYSENTER_ESP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
+ *msr_content = vmcb->sysenter_esp;
break;
+
case MSR_IA32_SYSENTER_EIP:
- *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
+ *msr_content = vmcb->sysenter_eip;
break;
case MSR_STAR:
@@ -1940,11 +1936,11 @@ static int cf_check svm_msr_write_intercept(
switch ( msr )
{
case MSR_IA32_SYSENTER_ESP:
- vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content;
+ vmcb->sysenter_esp = msr_content;
break;
case MSR_IA32_SYSENTER_EIP:
- vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content;
+ vmcb->sysenter_eip = msr_content;
break;
case MSR_LSTAR:
@@ -1970,7 +1966,7 @@ static int cf_check svm_msr_write_intercept(
break;
case MSR_IA32_SYSENTER_CS:
- vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
+ vmcb->sysenter_cs = msr_content;
break;
case MSR_STAR:
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index e583ef8548..76fcaf15c2 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v)
svm_disable_intercept_for_msr(v, MSR_LSTAR);
svm_disable_intercept_for_msr(v, MSR_STAR);
svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
+ svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h b/xen/arch/x86/include/asm/hvm/svm-types.h
index 051b235d8f..aaee91b4b6 100644
--- a/xen/arch/x86/include/asm/hvm/svm-types.h
+++ b/xen/arch/x86/include/asm/hvm/svm-types.h
@@ -27,16 +27,6 @@ struct svm_vcpu {
/* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
uint8_t cached_insn_len; /* Zero if no cached instruction. */
-
- /*
- * Upper four bytes are undefined in the VMCB, therefore we can't use the
- * fields in the VMCB. Write a 64bit value and then read a 64bit value is
- * fine unless there's a VMRUN/VMEXIT in between which clears the upper
- * four bytes.
- */
- uint64_t guest_sysenter_cs;
- uint64_t guest_sysenter_esp;
- uint64_t guest_sysenter_eip;
};
struct nestedsvm {
--
2.43.0
On 22/01/2026 4:49 pm, Alejandro Vallejo wrote:
> With cross-vendor support gone, it's no longer needed.
>
> Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
You're not really dropping or altering the SYSENTER behaviour. You're
dropping the emulation of Intel's MSR behaviour on AMD systems.
And this comes which a subtle change in behaviour that is relevant to
point out.
Notably, AMD prohibit the use of the SYSENTER and SYSEXIT instructions
outside of Long mode, and they're behaviour for MSR_SEL_* still follows
the 32bit model where the upper half is write-discard. AMD CPUs really
do only have 32 bits of storage for the 3 MSRs, and unlike Intel, did
not extend it to 64 bits of storage to support Long Mode.
Xen previously (and unconditionally, irrespective of same or cross
vendor configuration) emulated Intel behaviour on AMD CPUs.
I think this is even a latent bug; on hardware that supports
vVM{LOAD,SAVE}, the intercept doesn't trigger anyway, and this whole
emulation falls apart anyway.
Something which is very much not obvious in the slightest is that the
MSR Intercept bitmaps for VMs apply to the RDMSR/WRMSR instructions
only, and not to implicit MSR accesses such as
SWAPGS/LKGS/WR{FS,GS}BASE, etc.
Anyway,
I think it is necessary to note that VMs which were happening to store
state in the high parts of the SEP MSRs will now lose this state. It is
an ABI change, but it's acceptable because there are almost certainly no
VMs doing this, because it's not how real AMD CPUs behave.
~Andrew
Le 22/01/2026 à 17:51, Alejandro Vallejo a écrit :
> With cross-vendor support gone, it's no longer needed.
>
> Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
> ---
> xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++-------------
> xen/arch/x86/hvm/svm/vmcb.c | 3 ++
> xen/arch/x86/include/asm/hvm/svm-types.h | 10 ------
> 3 files changed, 22 insertions(+), 33 deletions(-)
>
> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
> index 0658ca990f..e8f19dec04 100644
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -401,10 +401,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
> {
> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>
> - c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
> - c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
> - c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
> -
> if ( vmcb->event_inj.v &&
> hvm_event_needs_reinjection(vmcb->event_inj.type,
> vmcb->event_inj.vector) )
> @@ -468,11 +464,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
> svm_update_guest_cr(v, 0, 0);
> svm_update_guest_cr(v, 4, 0);
>
> - /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
> - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
> - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
> - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
> -
> if ( paging_mode_hap(v->domain) )
> {
> vmcb_set_np(vmcb, true);
> @@ -501,6 +492,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
> {
> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>
> + data->sysenter_cs = vmcb->sysenter_cs;
> + data->sysenter_esp = vmcb->sysenter_esp;
> + data->sysenter_eip = vmcb->sysenter_eip;
> data->shadow_gs = vmcb->kerngsbase;
> data->msr_lstar = vmcb->lstar;
> data->msr_star = vmcb->star;
> @@ -512,11 +506,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
> {
> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>
> - vmcb->kerngsbase = data->shadow_gs;
> - vmcb->lstar = data->msr_lstar;
> - vmcb->star = data->msr_star;
> - vmcb->cstar = data->msr_cstar;
> - vmcb->sfmask = data->msr_syscall_mask;
> + vmcb->sysenter_cs = data->sysenter_cs;
> + vmcb->sysenter_esp = data->sysenter_esp;
> + vmcb->sysenter_eip = data->sysenter_eip;
> + vmcb->kerngsbase = data->shadow_gs;
> + vmcb->lstar = data->msr_lstar;
> + vmcb->star = data->msr_star;
> + vmcb->cstar = data->msr_cstar;
> + vmcb->sfmask = data->msr_syscall_mask;
> v->arch.hvm.guest_efer = data->msr_efer;
> svm_update_guest_efer(v);
> }
I think we should merge svm_save_cpu_state/svm_vmcb_save into
svm_save_vmcb_ctxt and similarly for svm_load_cpu_state/svm_vmcb_restore
into svm_load_vmcb_ctxt to avoid having multiple functions as a part of
the same logic.
That could be done in a separate patch (or series).
> @@ -1720,12 +1717,9 @@ static int cf_check svm_msr_read_intercept(
>
> switch ( msr )
> {
> - /*
> - * Sync not needed while the cross-vendor logic is in unilateral effect.
> case MSR_IA32_SYSENTER_CS:
> case MSR_IA32_SYSENTER_ESP:
> case MSR_IA32_SYSENTER_EIP:
> - */
> case MSR_STAR:
> case MSR_LSTAR:
> case MSR_CSTAR:
> @@ -1740,13 +1734,15 @@ static int cf_check svm_msr_read_intercept(
> switch ( msr )
> {
> case MSR_IA32_SYSENTER_CS:
> - *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
> + *msr_content = vmcb->sysenter_cs;
> break;
> +
> case MSR_IA32_SYSENTER_ESP:
> - *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
> + *msr_content = vmcb->sysenter_esp;
> break;
> +
> case MSR_IA32_SYSENTER_EIP:
> - *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
> + *msr_content = vmcb->sysenter_eip;
> break;
>
> case MSR_STAR:
> @@ -1940,11 +1936,11 @@ static int cf_check svm_msr_write_intercept(
> switch ( msr )
> {
> case MSR_IA32_SYSENTER_ESP:
> - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content;
> + vmcb->sysenter_esp = msr_content;
> break;
>
> case MSR_IA32_SYSENTER_EIP:
> - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content;
> + vmcb->sysenter_eip = msr_content;
> break;
>
> case MSR_LSTAR:
> @@ -1970,7 +1966,7 @@ static int cf_check svm_msr_write_intercept(
> break;
>
> case MSR_IA32_SYSENTER_CS:
> - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
> + vmcb->sysenter_cs = msr_content;
> break;
>
> case MSR_STAR:
> diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
> index e583ef8548..76fcaf15c2 100644
> --- a/xen/arch/x86/hvm/svm/vmcb.c
> +++ b/xen/arch/x86/hvm/svm/vmcb.c
> @@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v)
> svm_disable_intercept_for_msr(v, MSR_LSTAR);
> svm_disable_intercept_for_msr(v, MSR_STAR);
> svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
>
> vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
> vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
> diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h b/xen/arch/x86/include/asm/hvm/svm-types.h
> index 051b235d8f..aaee91b4b6 100644
> --- a/xen/arch/x86/include/asm/hvm/svm-types.h
> +++ b/xen/arch/x86/include/asm/hvm/svm-types.h
> @@ -27,16 +27,6 @@ struct svm_vcpu {
>
> /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
> uint8_t cached_insn_len; /* Zero if no cached instruction. */
> -
> - /*
> - * Upper four bytes are undefined in the VMCB, therefore we can't use the
> - * fields in the VMCB. Write a 64bit value and then read a 64bit value is
> - * fine unless there's a VMRUN/VMEXIT in between which clears the upper
> - * four bytes.
> - */
> - uint64_t guest_sysenter_cs;
> - uint64_t guest_sysenter_esp;
> - uint64_t guest_sysenter_eip;
> };
>
> struct nestedsvm {
Reviewed-by: Teddy Astie <teddy.astie@vates.tech>
--
Teddy Astie | Vates XCP-ng Developer
XCP-ng & Xen Orchestra - Vates solutions
web: https://vates.tech
On Thu Jan 22, 2026 at 6:52 PM CET, Teddy Astie wrote:
> Le 22/01/2026 à 17:51, Alejandro Vallejo a écrit :
>> With cross-vendor support gone, it's no longer needed.
>>
>> Signed-off-by: Alejandro Vallejo <alejandro.garciavallejo@amd.com>
>> ---
>> xen/arch/x86/hvm/svm/svm.c | 42 +++++++++++-------------
>> xen/arch/x86/hvm/svm/vmcb.c | 3 ++
>> xen/arch/x86/include/asm/hvm/svm-types.h | 10 ------
>> 3 files changed, 22 insertions(+), 33 deletions(-)
>>
>> diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
>> index 0658ca990f..e8f19dec04 100644
>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -401,10 +401,6 @@ static int svm_vmcb_save(struct vcpu *v, struct hvm_hw_cpu *c)
>> {
>> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>>
>> - c->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs;
>> - c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
>> - c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
>> -
>> if ( vmcb->event_inj.v &&
>> hvm_event_needs_reinjection(vmcb->event_inj.type,
>> vmcb->event_inj.vector) )
>> @@ -468,11 +464,6 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c)
>> svm_update_guest_cr(v, 0, 0);
>> svm_update_guest_cr(v, 4, 0);
>>
>> - /* Load sysenter MSRs into both VMCB save area and VCPU fields. */
>> - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = c->sysenter_cs;
>> - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = c->sysenter_esp;
>> - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = c->sysenter_eip;
>> -
>> if ( paging_mode_hap(v->domain) )
>> {
>> vmcb_set_np(vmcb, true);
>> @@ -501,6 +492,9 @@ static void svm_save_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
>> {
>> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>>
>> + data->sysenter_cs = vmcb->sysenter_cs;
>> + data->sysenter_esp = vmcb->sysenter_esp;
>> + data->sysenter_eip = vmcb->sysenter_eip;
>> data->shadow_gs = vmcb->kerngsbase;
>> data->msr_lstar = vmcb->lstar;
>> data->msr_star = vmcb->star;
>> @@ -512,11 +506,14 @@ static void svm_load_cpu_state(struct vcpu *v, struct hvm_hw_cpu *data)
>> {
>> struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
>>
>> - vmcb->kerngsbase = data->shadow_gs;
>> - vmcb->lstar = data->msr_lstar;
>> - vmcb->star = data->msr_star;
>> - vmcb->cstar = data->msr_cstar;
>> - vmcb->sfmask = data->msr_syscall_mask;
>> + vmcb->sysenter_cs = data->sysenter_cs;
>> + vmcb->sysenter_esp = data->sysenter_esp;
>> + vmcb->sysenter_eip = data->sysenter_eip;
>> + vmcb->kerngsbase = data->shadow_gs;
>> + vmcb->lstar = data->msr_lstar;
>> + vmcb->star = data->msr_star;
>> + vmcb->cstar = data->msr_cstar;
>> + vmcb->sfmask = data->msr_syscall_mask;
>> v->arch.hvm.guest_efer = data->msr_efer;
>> svm_update_guest_efer(v);
>> }
>
> I think we should merge svm_save_cpu_state/svm_vmcb_save into
> svm_save_vmcb_ctxt and similarly for svm_load_cpu_state/svm_vmcb_restore
> into svm_load_vmcb_ctxt to avoid having multiple functions as a part of
> the same logic.
>
> That could be done in a separate patch (or series).
Maybe. I (subjectively) think it makes sense to keep separate the fields coming
straight from the VMCB from those that have authoritative values elsewhere.
Nothing precludes having that visual separation within a single function though
it's not like either is huge.
I may append it as a patch at the tail.
>
>> @@ -1720,12 +1717,9 @@ static int cf_check svm_msr_read_intercept(
>>
>> switch ( msr )
>> {
>> - /*
>> - * Sync not needed while the cross-vendor logic is in unilateral effect.
>> case MSR_IA32_SYSENTER_CS:
>> case MSR_IA32_SYSENTER_ESP:
>> case MSR_IA32_SYSENTER_EIP:
>> - */
>> case MSR_STAR:
>> case MSR_LSTAR:
>> case MSR_CSTAR:
>> @@ -1740,13 +1734,15 @@ static int cf_check svm_msr_read_intercept(
>> switch ( msr )
>> {
>> case MSR_IA32_SYSENTER_CS:
>> - *msr_content = v->arch.hvm.svm.guest_sysenter_cs;
>> + *msr_content = vmcb->sysenter_cs;
>> break;
>> +
>> case MSR_IA32_SYSENTER_ESP:
>> - *msr_content = v->arch.hvm.svm.guest_sysenter_esp;
>> + *msr_content = vmcb->sysenter_esp;
>> break;
>> +
>> case MSR_IA32_SYSENTER_EIP:
>> - *msr_content = v->arch.hvm.svm.guest_sysenter_eip;
>> + *msr_content = vmcb->sysenter_eip;
>> break;
>>
>> case MSR_STAR:
>> @@ -1940,11 +1936,11 @@ static int cf_check svm_msr_write_intercept(
>> switch ( msr )
>> {
>> case MSR_IA32_SYSENTER_ESP:
>> - vmcb->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp = msr_content;
>> + vmcb->sysenter_esp = msr_content;
>> break;
>>
>> case MSR_IA32_SYSENTER_EIP:
>> - vmcb->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip = msr_content;
>> + vmcb->sysenter_eip = msr_content;
>> break;
>
>>
>> case MSR_LSTAR:
>> @@ -1970,7 +1966,7 @@ static int cf_check svm_msr_write_intercept(
>> break;
>>
>> case MSR_IA32_SYSENTER_CS:
>> - vmcb->sysenter_cs = v->arch.hvm.svm.guest_sysenter_cs = msr_content;
>> + vmcb->sysenter_cs = msr_content;
>> break;
>>
>> case MSR_STAR:
>> diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
>> index e583ef8548..76fcaf15c2 100644
>> --- a/xen/arch/x86/hvm/svm/vmcb.c
>> +++ b/xen/arch/x86/hvm/svm/vmcb.c
>> @@ -97,6 +97,9 @@ static int construct_vmcb(struct vcpu *v)
>> svm_disable_intercept_for_msr(v, MSR_LSTAR);
>> svm_disable_intercept_for_msr(v, MSR_STAR);
>> svm_disable_intercept_for_msr(v, MSR_SYSCALL_MASK);
>> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_CS);
>> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_EIP);
>> + svm_disable_intercept_for_msr(v, MSR_IA32_SYSENTER_ESP);
>>
>> vmcb->_msrpm_base_pa = virt_to_maddr(svm->msrpm);
>> vmcb->_iopm_base_pa = __pa(v->domain->arch.hvm.io_bitmap);
>> diff --git a/xen/arch/x86/include/asm/hvm/svm-types.h b/xen/arch/x86/include/asm/hvm/svm-types.h
>> index 051b235d8f..aaee91b4b6 100644
>> --- a/xen/arch/x86/include/asm/hvm/svm-types.h
>> +++ b/xen/arch/x86/include/asm/hvm/svm-types.h
>> @@ -27,16 +27,6 @@ struct svm_vcpu {
>>
>> /* VMCB has a cached instruction from #PF/#NPF Decode Assist? */
>> uint8_t cached_insn_len; /* Zero if no cached instruction. */
>> -
>> - /*
>> - * Upper four bytes are undefined in the VMCB, therefore we can't use the
>> - * fields in the VMCB. Write a 64bit value and then read a 64bit value is
>> - * fine unless there's a VMRUN/VMEXIT in between which clears the upper
>> - * four bytes.
>> - */
>> - uint64_t guest_sysenter_cs;
>> - uint64_t guest_sysenter_esp;
>> - uint64_t guest_sysenter_eip;
>> };
>>
>> struct nestedsvm {
>
> Reviewed-by: Teddy Astie <teddy.astie@vates.tech>
Cheers,
Alejandro
© 2016 - 2026 Red Hat, Inc.