The per-VM ASID is currently shared by both L1 and L2 guests. That ASID
is currently flushed on every transition between L1 and L2.
Allocate and track a separate ASID per-VM for nested guests. This is in
preparation for doing fine-grained TLB flushes on nested transitions
instead of unconditional full flushes.
Nested ASIDs are still not fully maintained (e.g. a remote flush will
only flush the current ASID), so keep the TLB flush on every transition
until this is sorted out in following changes.
Add a helper to get the ASID associated with a specific VMCB and use it
instead of directly reading the VM's ASID. This transparently uses L2's
ASID when an L2 guest is being run.
L1's ASID is flushed on KVM_REQ_TLB_FLUSH_GUEST if it is the active
context, so remove the TODO in nested_svm_transition_tlb_flush() about
it.
Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
---
arch/x86/kvm/svm/nested.c | 8 ++++++--
arch/x86/kvm/svm/svm.c | 13 +++++++++++--
arch/x86/kvm/svm/svm.h | 3 ++-
3 files changed, 19 insertions(+), 5 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 81184b2fb27fd..75223869aa8c6 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -495,7 +495,6 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
* - Honor L1's request to flush an ASID on nested VMRUN
* - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
* - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
- * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
*
* [*] Unlike nested EPT, SVM's ASID management can invalidate nested
* NPT guest-physical mappings on VMRUN.
@@ -677,7 +676,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
- vmcb02->control.asid = svm_asid(vcpu->kvm);
+ vmcb02->control.asid = svm_nested_asid(vcpu->kvm);
/* Also overwritten later if necessary. */
vmcb_clr_flush_asid(vmcb02);
@@ -1179,6 +1178,7 @@ static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
int svm_allocate_nested(struct vcpu_svm *svm)
{
+ struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
struct page *vmcb02_page;
if (svm->nested.initialized)
@@ -1196,6 +1196,10 @@ int svm_allocate_nested(struct vcpu_svm *svm)
svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
svm->nested.initialized = true;
+
+ if (!kvm_svm->nested_asid)
+ kvm_svm->nested_asid = kvm_svm->asid;
+
return 0;
err_free_vmcb02:
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f028d006f69dc..e664d8428c792 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1225,17 +1225,26 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
}
}
-unsigned int svm_asid(struct kvm *kvm)
+unsigned int svm_nested_asid(struct kvm *kvm)
+{
+ return to_kvm_svm(kvm)->nested_asid;
+}
+
+static unsigned int svm_asid(struct kvm *kvm)
{
return to_kvm_svm(kvm)->asid;
}
static unsigned int svm_get_current_asid(struct vcpu_svm *svm)
{
- struct kvm *kvm = svm->vcpu.kvm;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct kvm *kvm = vcpu->kvm;
if (sev_guest(kvm))
return sev_get_asid(kvm);
+ if (is_guest_mode(vcpu))
+ return svm_nested_asid(kvm);
+ WARN_ON_ONCE(svm->current_vmcb != &svm->vmcb01);
return svm_asid(kvm);
}
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 436b7e83141b9..e67e3a64e92f7 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -118,6 +118,7 @@ struct kvm_svm {
struct kvm kvm;
unsigned int asid;
+ unsigned int nested_asid;
/* Struct members for AVIC */
u32 avic_vm_id;
@@ -651,7 +652,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
int trig_mode, int vec);
bool svm_register_asid(unsigned int asid);
void svm_unregister_asid(unsigned int asid);
-unsigned int svm_asid(struct kvm *kvm);
+unsigned int svm_nested_asid(struct kvm *kvm);
/* nested.c */
--
2.49.0.395.g12beb8f557-goog
On Wed, 2025-03-26 at 19:36 +0000, Yosry Ahmed wrote:
> The per-VM ASID is currently shared by both L1 and L2 guests. That ASID
> is currently flushed on every transition between L1 and L2.
>
> Allocate and track a separate ASID per-VM for nested guests. This is in
> preparation for doing fine-grained TLB flushes on nested transitions
> instead of unconditional full flushes.
>
> Nested ASIDs are still not fully maintained (e.g. a remote flush will
> only flush the current ASID), so keep the TLB flush on every transition
> until this is sorted out in following changes.
>
> Add a helper to get the ASID associated with a specific VMCB and use it
> instead of directly reading the VM's ASID. This transparently uses L2's
> ASID when an L2 guest is being run.
>
> L1's ASID is flushed on KVM_REQ_TLB_FLUSH_GUEST if it is the active
> context, so remove the TODO in nested_svm_transition_tlb_flush() about
> it.
>
> Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> ---
> arch/x86/kvm/svm/nested.c | 8 ++++++--
> arch/x86/kvm/svm/svm.c | 13 +++++++++++--
> arch/x86/kvm/svm/svm.h | 3 ++-
> 3 files changed, 19 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 81184b2fb27fd..75223869aa8c6 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -495,7 +495,6 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
> * - Honor L1's request to flush an ASID on nested VMRUN
> * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
> * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
> - * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
> *
> * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
> * NPT guest-physical mappings on VMRUN.
> @@ -677,7 +676,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
> vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
> vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
> vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
> - vmcb02->control.asid = svm_asid(vcpu->kvm);
> + vmcb02->control.asid = svm_nested_asid(vcpu->kvm);
>
> /* Also overwritten later if necessary. */
> vmcb_clr_flush_asid(vmcb02);
> @@ -1179,6 +1178,7 @@ static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
>
> int svm_allocate_nested(struct vcpu_svm *svm)
> {
> + struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
> struct page *vmcb02_page;
>
> if (svm->nested.initialized)
> @@ -1196,6 +1196,10 @@ int svm_allocate_nested(struct vcpu_svm *svm)
> svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
>
> svm->nested.initialized = true;
> +
> + if (!kvm_svm->nested_asid)
> + kvm_svm->nested_asid = kvm_svm->asid;
Nitpick: maybe put nested_asid into .nested struct as well?
I don't have a strong option on this, feel free to leave it where it is now.
> +
> return 0;
>
> err_free_vmcb02:
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index f028d006f69dc..e664d8428c792 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1225,17 +1225,26 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
> }
> }
>
> -unsigned int svm_asid(struct kvm *kvm)
> +unsigned int svm_nested_asid(struct kvm *kvm)
> +{
> + return to_kvm_svm(kvm)->nested_asid;
> +}
It might also make sense to add WARN_ON_ONCE(!svm->nested.initialized) here, just in case.
> +
> +static unsigned int svm_asid(struct kvm *kvm)
> {
> return to_kvm_svm(kvm)->asid;
> }
>
> static unsigned int svm_get_current_asid(struct vcpu_svm *svm)
> {
> - struct kvm *kvm = svm->vcpu.kvm;
> + struct kvm_vcpu *vcpu = &svm->vcpu;
> + struct kvm *kvm = vcpu->kvm;
>
> if (sev_guest(kvm))
> return sev_get_asid(kvm);
> + if (is_guest_mode(vcpu))
> + return svm_nested_asid(kvm);
> + WARN_ON_ONCE(svm->current_vmcb != &svm->vmcb01);
> return svm_asid(kvm);
> }
>
> diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> index 436b7e83141b9..e67e3a64e92f7 100644
> --- a/arch/x86/kvm/svm/svm.h
> +++ b/arch/x86/kvm/svm/svm.h
> @@ -118,6 +118,7 @@ struct kvm_svm {
> struct kvm kvm;
>
> unsigned int asid;
> + unsigned int nested_asid;
>
> /* Struct members for AVIC */
> u32 avic_vm_id;
> @@ -651,7 +652,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
> int trig_mode, int vec);
> bool svm_register_asid(unsigned int asid);
> void svm_unregister_asid(unsigned int asid);
> -unsigned int svm_asid(struct kvm *kvm);
> +unsigned int svm_nested_asid(struct kvm *kvm);
>
> /* nested.c */
>
Overall looks good,
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Best regards,
Maxim Levitsky
On Thu, Apr 03, 2025 at 04:09:30PM -0400, Maxim Levitsky wrote:
> On Wed, 2025-03-26 at 19:36 +0000, Yosry Ahmed wrote:
> > The per-VM ASID is currently shared by both L1 and L2 guests. That ASID
> > is currently flushed on every transition between L1 and L2.
> >
> > Allocate and track a separate ASID per-VM for nested guests. This is in
> > preparation for doing fine-grained TLB flushes on nested transitions
> > instead of unconditional full flushes.
> >
> > Nested ASIDs are still not fully maintained (e.g. a remote flush will
> > only flush the current ASID), so keep the TLB flush on every transition
> > until this is sorted out in following changes.
> >
> > Add a helper to get the ASID associated with a specific VMCB and use it
> > instead of directly reading the VM's ASID. This transparently uses L2's
> > ASID when an L2 guest is being run.
> >
> > L1's ASID is flushed on KVM_REQ_TLB_FLUSH_GUEST if it is the active
> > context, so remove the TODO in nested_svm_transition_tlb_flush() about
> > it.
> >
> > Signed-off-by: Yosry Ahmed <yosry.ahmed@linux.dev>
> > ---
> > arch/x86/kvm/svm/nested.c | 8 ++++++--
> > arch/x86/kvm/svm/svm.c | 13 +++++++++++--
> > arch/x86/kvm/svm/svm.h | 3 ++-
> > 3 files changed, 19 insertions(+), 5 deletions(-)
> >
> > diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> > index 81184b2fb27fd..75223869aa8c6 100644
> > --- a/arch/x86/kvm/svm/nested.c
> > +++ b/arch/x86/kvm/svm/nested.c
> > @@ -495,7 +495,6 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
> > * - Honor L1's request to flush an ASID on nested VMRUN
> > * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
> > * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
> > - * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
> > *
> > * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
> > * NPT guest-physical mappings on VMRUN.
> > @@ -677,7 +676,7 @@ static void nested_vmcb02_prepare_control(struct vcpu_svm *svm,
> > vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
> > vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
> > vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
> > - vmcb02->control.asid = svm_asid(vcpu->kvm);
> > + vmcb02->control.asid = svm_nested_asid(vcpu->kvm);
> >
> > /* Also overwritten later if necessary. */
> > vmcb_clr_flush_asid(vmcb02);
> > @@ -1179,6 +1178,7 @@ static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
> >
> > int svm_allocate_nested(struct vcpu_svm *svm)
> > {
> > + struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
> > struct page *vmcb02_page;
> >
> > if (svm->nested.initialized)
> > @@ -1196,6 +1196,10 @@ int svm_allocate_nested(struct vcpu_svm *svm)
> > svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
> >
> > svm->nested.initialized = true;
> > +
> > + if (!kvm_svm->nested_asid)
> > + kvm_svm->nested_asid = kvm_svm->asid;
>
> Nitpick: maybe put nested_asid into .nested struct as well?
> I don't have a strong option on this, feel free to leave it where it is now.
I did this initially but I thought created a struct just for the purpose
of holding the nested ASID would be an overkill, but I don't feel
strongly.
>
>
> > +
> > return 0;
> >
> > err_free_vmcb02:
> > diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> > index f028d006f69dc..e664d8428c792 100644
> > --- a/arch/x86/kvm/svm/svm.c
> > +++ b/arch/x86/kvm/svm/svm.c
> > @@ -1225,17 +1225,26 @@ static inline void init_vmcb_after_set_cpuid(struct kvm_vcpu *vcpu)
> > }
> > }
> >
> > -unsigned int svm_asid(struct kvm *kvm)
> > +unsigned int svm_nested_asid(struct kvm *kvm)
> > +{
> > + return to_kvm_svm(kvm)->nested_asid;
> > +}
>
> It might also make sense to add WARN_ON_ONCE(!svm->nested.initialized) here, just in case.
Yeah we can do that, but I will check the callers first to make sure
there's no chance of false positives.
>
> > +
> > +static unsigned int svm_asid(struct kvm *kvm)
> > {
> > return to_kvm_svm(kvm)->asid;
> > }
> >
> > static unsigned int svm_get_current_asid(struct vcpu_svm *svm)
> > {
> > - struct kvm *kvm = svm->vcpu.kvm;
> > + struct kvm_vcpu *vcpu = &svm->vcpu;
> > + struct kvm *kvm = vcpu->kvm;
> >
> > if (sev_guest(kvm))
> > return sev_get_asid(kvm);
> > + if (is_guest_mode(vcpu))
> > + return svm_nested_asid(kvm);
> > + WARN_ON_ONCE(svm->current_vmcb != &svm->vmcb01);
> > return svm_asid(kvm);
> > }
> >
> > diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
> > index 436b7e83141b9..e67e3a64e92f7 100644
> > --- a/arch/x86/kvm/svm/svm.h
> > +++ b/arch/x86/kvm/svm/svm.h
> > @@ -118,6 +118,7 @@ struct kvm_svm {
> > struct kvm kvm;
> >
> > unsigned int asid;
> > + unsigned int nested_asid;
> >
> > /* Struct members for AVIC */
> > u32 avic_vm_id;
> > @@ -651,7 +652,7 @@ void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
> > int trig_mode, int vec);
> > bool svm_register_asid(unsigned int asid);
> > void svm_unregister_asid(unsigned int asid);
> > -unsigned int svm_asid(struct kvm *kvm);
> > +unsigned int svm_nested_asid(struct kvm *kvm);
> >
> > /* nested.c */
> >
>
>
> Overall looks good,
>
> Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Thanks!
>
> Best regards,
> Maxim Levitsky
>
>
>
© 2016 - 2025 Red Hat, Inc.