Pass the target vCPU to the hwapic_isr_update() vendor hook so that VMX
can defer the update until after nested VM-Exit if an EOI for L1's vAPIC
occurs while L2 is active.
No functional change intended.
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
arch/x86/include/asm/kvm_host.h | 2 +-
arch/x86/kvm/lapic.c | 11 +++++------
arch/x86/kvm/vmx/vmx.c | 2 +-
arch/x86/kvm/vmx/x86_ops.h | 2 +-
4 files changed, 8 insertions(+), 9 deletions(-)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 70c7ed0ef184..3f3de047cbfd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1734,7 +1734,7 @@ struct kvm_x86_ops {
bool allow_apicv_in_x2apic_without_x2apic_virtualization;
void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
- void (*hwapic_isr_update)(int isr);
+ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr);
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 65412640cfc7..5be2be44a188 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -763,7 +763,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
* just set SVI.
*/
if (unlikely(apic->apicv_active))
- kvm_x86_call(hwapic_isr_update)(vec);
+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec);
else {
++apic->isr_count;
BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -808,7 +808,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
* and must be left alone.
*/
if (unlikely(apic->apicv_active))
- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
else {
--apic->isr_count;
BUG_ON(apic->isr_count < 0);
@@ -2767,7 +2767,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
if (apic->apicv_active) {
kvm_x86_call(apicv_post_state_restore)(vcpu);
kvm_x86_call(hwapic_irr_update)(vcpu, -1);
- kvm_x86_call(hwapic_isr_update)(-1);
+ kvm_x86_call(hwapic_isr_update)(vcpu, -1);
}
vcpu->arch.apic_arb_prio = 0;
@@ -3083,9 +3083,8 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_update_apicv(vcpu);
if (apic->apicv_active) {
kvm_x86_call(apicv_post_state_restore)(vcpu);
- kvm_x86_call(hwapic_irr_update)(vcpu,
- apic_find_highest_irr(apic));
- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic));
+ kvm_x86_call(hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
+ kvm_x86_call(hwapic_isr_update)(vcpu, apic_find_highest_isr(apic));
}
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (ioapic_in_kernel(vcpu->kvm))
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 6ed801ffe33f..fe9887a5fa4a 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6868,7 +6868,7 @@ void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
read_unlock(&vcpu->kvm->mmu_lock);
}
-void vmx_hwapic_isr_update(int max_isr)
+void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
{
u16 status;
u8 old;
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index a55981c5216e..48dc76bf0ec0 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -48,7 +48,7 @@ void vmx_migrate_timers(struct kvm_vcpu *vcpu);
void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
void vmx_apicv_pre_state_restore(struct kvm_vcpu *vcpu);
void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
-void vmx_hwapic_isr_update(int max_isr);
+void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu);
void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
int trig_mode, int vector);
--
2.47.0.163.g1226f6d8fa-goog
On Fri, Nov 01, 2024, Sean Christopherson wrote: > Pass the target vCPU to the hwapic_isr_update() vendor hook so that VMX > can defer the update until after nested VM-Exit if an EOI for L1's vAPIC > occurs while L2 is active. > > No functional change intended. > > Cc: stable@vger.kernel.org > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/lapic.c | 11 +++++------ > arch/x86/kvm/vmx/vmx.c | 2 +- > arch/x86/kvm/vmx/x86_ops.h | 2 +- > 4 files changed, 8 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > index 70c7ed0ef184..3f3de047cbfd 100644 > --- a/arch/x86/include/asm/kvm_host.h > +++ b/arch/x86/include/asm/kvm_host.h > @@ -1734,7 +1734,7 @@ struct kvm_x86_ops { > bool allow_apicv_in_x2apic_without_x2apic_virtualization; > void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); > void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); > - void (*hwapic_isr_update)(int isr); > + void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); Oh, the hilarity. Got that one wrong. d39850f57d21 ("KVM: x86: Drop @vcpu parameter from kvm_x86_ops.hwapic_isr_update()") Not entirely sure what cleanups were made possible by dropping @vcpu at the time. I assume the end goal was ce0a58f4756c ("KVM: x86: Move "apicv_active" into "struct kvm_lapic""), but that should have been possible, if slightly more annoying, without modifying hwapic_isr_update(). *sigh*
On Fri, Nov 01, 2024 at 12:21:13PM -0700, Sean Christopherson wrote: >Pass the target vCPU to the hwapic_isr_update() vendor hook so that VMX >can defer the update until after nested VM-Exit if an EOI for L1's vAPIC >occurs while L2 is active. > >No functional change intended. > >Cc: stable@vger.kernel.org >Signed-off-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Chao Gao <chao.gao@intel.com> >--- > arch/x86/include/asm/kvm_host.h | 2 +- > arch/x86/kvm/lapic.c | 11 +++++------ > arch/x86/kvm/vmx/vmx.c | 2 +- > arch/x86/kvm/vmx/x86_ops.h | 2 +- > 4 files changed, 8 insertions(+), 9 deletions(-) > >diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h >index 70c7ed0ef184..3f3de047cbfd 100644 >--- a/arch/x86/include/asm/kvm_host.h >+++ b/arch/x86/include/asm/kvm_host.h >@@ -1734,7 +1734,7 @@ struct kvm_x86_ops { > bool allow_apicv_in_x2apic_without_x2apic_virtualization; > void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); > void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); >- void (*hwapic_isr_update)(int isr); >+ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); > void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); > void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); > void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); >diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c >index 65412640cfc7..5be2be44a188 100644 >--- a/arch/x86/kvm/lapic.c >+++ b/arch/x86/kvm/lapic.c >@@ -763,7 +763,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) > * just set SVI. > */ > if (unlikely(apic->apicv_active)) >- kvm_x86_call(hwapic_isr_update)(vec); >+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec); Both branches need braces here. So, maybe take the opportunity to fix the coding style issue. > else { > ++apic->isr_count; > BUG_ON(apic->isr_count > MAX_APIC_VECTOR); >@@ -808,7 +808,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) > * and must be left alone. > */ > if (unlikely(apic->apicv_active)) >- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic)); >+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic)); ditto > else { > --apic->isr_count; > BUG_ON(apic->isr_count < 0);
On Mon, Nov 04, 2024, Chao Gao wrote: > On Fri, Nov 01, 2024 at 12:21:13PM -0700, Sean Christopherson wrote: > >Pass the target vCPU to the hwapic_isr_update() vendor hook so that VMX > >can defer the update until after nested VM-Exit if an EOI for L1's vAPIC > >occurs while L2 is active. > > > >No functional change intended. > > > >Cc: stable@vger.kernel.org > >Signed-off-by: Sean Christopherson <seanjc@google.com> > > Reviewed-by: Chao Gao <chao.gao@intel.com> > > >--- > > arch/x86/include/asm/kvm_host.h | 2 +- > > arch/x86/kvm/lapic.c | 11 +++++------ > > arch/x86/kvm/vmx/vmx.c | 2 +- > > arch/x86/kvm/vmx/x86_ops.h | 2 +- > > 4 files changed, 8 insertions(+), 9 deletions(-) > > > >diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h > >index 70c7ed0ef184..3f3de047cbfd 100644 > >--- a/arch/x86/include/asm/kvm_host.h > >+++ b/arch/x86/include/asm/kvm_host.h > >@@ -1734,7 +1734,7 @@ struct kvm_x86_ops { > > bool allow_apicv_in_x2apic_without_x2apic_virtualization; > > void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); > > void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); > >- void (*hwapic_isr_update)(int isr); > >+ void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); > > void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); > > void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); > > void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); > >diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c > >index 65412640cfc7..5be2be44a188 100644 > >--- a/arch/x86/kvm/lapic.c > >+++ b/arch/x86/kvm/lapic.c > >@@ -763,7 +763,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic) > > * just set SVI. > > */ > > if (unlikely(apic->apicv_active)) > >- kvm_x86_call(hwapic_isr_update)(vec); > >+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, vec); > > Both branches need braces here. So, maybe take the opportunity to fix the > coding style issue. Very tempting, but since this is destined for stable, I'll go with a minimal patch to reduce the odds of creating a conflict. > > else { > > ++apic->isr_count; > > BUG_ON(apic->isr_count > MAX_APIC_VECTOR); > >@@ -808,7 +808,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic) > > * and must be left alone. > > */ > > if (unlikely(apic->apicv_active)) > >- kvm_x86_call(hwapic_isr_update)(apic_find_highest_isr(apic)); > >+ kvm_x86_call(hwapic_isr_update)(apic->vcpu, apic_find_highest_isr(apic));
© 2016 - 2024 Red Hat, Inc.