xen/arch/x86/hvm/svm/nestedsvm.c | 4 ++-- xen/arch/x86/hvm/svm/svm.c | 8 ++++---- xen/arch/x86/hvm/svm/svmdebug.c | 4 ++-- xen/include/asm-x86/hvm/svm/vmcb.h | 13 ++++++++++++- 4 files changed, 20 insertions(+), 9 deletions(-)
According to chapter "Appendix B Layout of VMCB" in the new version
(v3.32) AMD64 APM[1], bit 1 of the VMCB offset 68h is defined as
GUEST_INTERRUPT_MASK.
In current xen codes, it use whole u64 interrupt_shadow to setup
interrupt shadow, which will misuse other bit in VMCB offset 68h
as part of interrupt_shadow.
Add union intstat_t for VMCB offset 68h and fix codes to only use
bit 0 as intr_shadow according to the new APM description.
Reference:
[1] https://www.amd.com/system/files/TechDocs/24593.pdf
Signed-off-by: Pu Wen <puwen@hygon.cn>
---
xen/arch/x86/hvm/svm/nestedsvm.c | 4 ++--
xen/arch/x86/hvm/svm/svm.c | 8 ++++----
xen/arch/x86/hvm/svm/svmdebug.c | 4 ++--
xen/include/asm-x86/hvm/svm/vmcb.h | 13 ++++++++++++-
4 files changed, 20 insertions(+), 9 deletions(-)
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index 3bd2a119d3..595cbb1d81 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -508,7 +508,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs)
}
/* Shadow Mode */
- n2vmcb->interrupt_shadow = ns_vmcb->interrupt_shadow;
+ n2vmcb->int_stat.intr_shadow = ns_vmcb->int_stat.intr_shadow;
/* Exit codes */
n2vmcb->exitcode = ns_vmcb->exitcode;
@@ -1058,7 +1058,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs)
ns_vmcb->_vintr.fields.intr_masking = 0;
/* Shadow mode */
- ns_vmcb->interrupt_shadow = n2vmcb->interrupt_shadow;
+ ns_vmcb->int_stat.intr_shadow = n2vmcb->int_stat.intr_shadow;
/* Exit codes */
ns_vmcb->exitcode = n2vmcb->exitcode;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 32d8d847f2..888f504a94 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -116,7 +116,7 @@ void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len)
regs->rip += inst_len;
regs->eflags &= ~X86_EFLAGS_RF;
- curr->arch.hvm.svm.vmcb->interrupt_shadow = 0;
+ curr->arch.hvm.svm.vmcb->int_stat.intr_shadow = 0;
if ( regs->eflags & X86_EFLAGS_TF )
hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC);
@@ -432,7 +432,7 @@ static unsigned int svm_get_interrupt_shadow(struct vcpu *v)
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
unsigned int intr_shadow = 0;
- if ( vmcb->interrupt_shadow )
+ if ( vmcb->int_stat.intr_shadow )
intr_shadow |= HVM_INTR_SHADOW_MOV_SS | HVM_INTR_SHADOW_STI;
if ( vmcb_get_general1_intercepts(vmcb) & GENERAL1_INTERCEPT_IRET )
@@ -446,7 +446,7 @@ static void svm_set_interrupt_shadow(struct vcpu *v, unsigned int intr_shadow)
struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
- vmcb->interrupt_shadow =
+ vmcb->int_stat.intr_shadow =
!!(intr_shadow & (HVM_INTR_SHADOW_MOV_SS|HVM_INTR_SHADOW_STI));
general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
@@ -2945,7 +2945,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
* retired.
*/
general1_intercepts &= ~GENERAL1_INTERCEPT_IRET;
- vmcb->interrupt_shadow = 1;
+ vmcb->int_stat.intr_shadow = 1;
vmcb_set_general1_intercepts(vmcb, general1_intercepts);
break;
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 366a003f21..5547baa497 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -51,9 +51,9 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb)
printk("iopm_base_pa = %#"PRIx64" msrpm_base_pa = %#"PRIx64" tsc_offset = %#"PRIx64"\n",
vmcb_get_iopm_base_pa(vmcb), vmcb_get_msrpm_base_pa(vmcb),
vmcb_get_tsc_offset(vmcb));
- printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#"PRIx64"\n",
+ printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#x\n",
vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes,
- vmcb->interrupt_shadow);
+ vmcb->int_stat.intr_shadow);
printk("event_inj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
vmcb->event_inj.raw, vmcb->event_inj.v,
vmcb->event_inj.ev, vmcb->event_inj.type,
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h
index b9e389d481..d8a3285752 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -316,6 +316,17 @@ typedef union
uint64_t raw;
} intinfo_t;
+typedef union
+{
+ struct
+ {
+ u64 intr_shadow: 1;
+ u64 guest_intr_mask:1;
+ u64 resvd: 62;
+ };
+ uint64_t raw;
+} intstat_t;
+
typedef union
{
u64 bytes;
@@ -414,7 +425,7 @@ struct vmcb_struct {
u8 tlb_control; /* offset 0x5C */
u8 res07[3];
vintr_t _vintr; /* offset 0x60 - cleanbit 3 */
- u64 interrupt_shadow; /* offset 0x68 */
+ intstat_t int_stat; /* offset 0x68 */
u64 exitcode; /* offset 0x70 */
union {
struct {
--
2.23.0
On 24.03.2020 05:52, Pu Wen wrote: > --- a/xen/arch/x86/hvm/svm/nestedsvm.c > +++ b/xen/arch/x86/hvm/svm/nestedsvm.c > @@ -508,7 +508,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) > } > > /* Shadow Mode */ > - n2vmcb->interrupt_shadow = ns_vmcb->interrupt_shadow; > + n2vmcb->int_stat.intr_shadow = ns_vmcb->int_stat.intr_shadow; While bit 1 is irrelevant to VMRUN, I still wonder whether you shouldn't copy "raw" here. > @@ -1058,7 +1058,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) > ns_vmcb->_vintr.fields.intr_masking = 0; > > /* Shadow mode */ > - ns_vmcb->interrupt_shadow = n2vmcb->interrupt_shadow; > + ns_vmcb->int_stat.intr_shadow = n2vmcb->int_stat.intr_shadow; Same here, or at the very least you want to also copy bit 1 here. > --- a/xen/arch/x86/hvm/svm/svmdebug.c > +++ b/xen/arch/x86/hvm/svm/svmdebug.c > @@ -51,9 +51,9 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) > printk("iopm_base_pa = %#"PRIx64" msrpm_base_pa = %#"PRIx64" tsc_offset = %#"PRIx64"\n", > vmcb_get_iopm_base_pa(vmcb), vmcb_get_msrpm_base_pa(vmcb), > vmcb_get_tsc_offset(vmcb)); > - printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#"PRIx64"\n", > + printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#x\n", > vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes, > - vmcb->interrupt_shadow); > + vmcb->int_stat.intr_shadow); Please dump all 64 bits here. Jan
On 2020/3/24 17:08, Jan Beulich wrote: > On 24.03.2020 05:52, Pu Wen wrote: >> --- a/xen/arch/x86/hvm/svm/nestedsvm.c >> +++ b/xen/arch/x86/hvm/svm/nestedsvm.c >> @@ -508,7 +508,7 @@ static int nsvm_vmcb_prepare4vmrun(struct vcpu *v, struct cpu_user_regs *regs) >> } >> >> /* Shadow Mode */ >> - n2vmcb->interrupt_shadow = ns_vmcb->interrupt_shadow; >> + n2vmcb->int_stat.intr_shadow = ns_vmcb->int_stat.intr_shadow; > > While bit 1 is irrelevant to VMRUN, I still wonder whether you > shouldn't copy "raw" here. Ok, will copy the whole "raw" as suggested, thanks. >> @@ -1058,7 +1058,7 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct cpu_user_regs *regs) >> ns_vmcb->_vintr.fields.intr_masking = 0; >> >> /* Shadow mode */ >> - ns_vmcb->interrupt_shadow = n2vmcb->interrupt_shadow; >> + ns_vmcb->int_stat.intr_shadow = n2vmcb->int_stat.intr_shadow; > > Same here, or at the very least you want to also copy bit 1 here. Ok, will change to: /* Interrupt state */ ns_vmcb->int_stat = n2vmcb->int_stat; >> --- a/xen/arch/x86/hvm/svm/svmdebug.c >> +++ b/xen/arch/x86/hvm/svm/svmdebug.c >> @@ -51,9 +51,9 @@ void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb) >> printk("iopm_base_pa = %#"PRIx64" msrpm_base_pa = %#"PRIx64" tsc_offset = %#"PRIx64"\n", >> vmcb_get_iopm_base_pa(vmcb), vmcb_get_msrpm_base_pa(vmcb), >> vmcb_get_tsc_offset(vmcb)); >> - printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#"PRIx64"\n", >> + printk("tlb_control = %#x vintr = %#"PRIx64" interrupt_shadow = %#x\n", >> vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes, >> - vmcb->interrupt_shadow); >> + vmcb->int_stat.intr_shadow); OK, will dump like this: printk("tlb_control = %#x vintr = %#"PRIx64" int_stat = %#"PRIx64"\n", vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes, vmcb->int_stat.raw); Thx. -- Regards, Pu Wen
© 2016 - 2024 Red Hat, Inc.