This ensures that, if a VCPU has "observed" that an IO registration has
occurred, the instruction currently being trapped or emulated will also
observe the IO registration.
At the same time, enforce that kvm_get_bus() is used only on the
update side, ensuring that a long-term reference cannot be obtained by
an SRCU reader.
Signed-off-by: Keir Fraser <keirf@google.com>
---
arch/x86/kvm/vmx/vmx.c | 7 +++++++
include/linux/kvm_host.h | 10 +++++++---
virt/kvm/kvm_main.c | 33 +++++++++++++++++++++++++++------
3 files changed, 41 insertions(+), 9 deletions(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 191a9ed0da22..425e3d8074ab 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -5861,6 +5861,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1;
+ /*
+ * Ensure that any updates to kvm->buses[] observed by the
+ * previous instruction (emulated or otherwise) are also
+ * visible to the instruction we are about to emulate.
+ */
+ smp_rmb();
+
if (!kvm_emulate_instruction(vcpu, 0))
return 0;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3bde4fb5c6aa..9132148fb467 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm)
return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET);
}
+/*
+ * Get a bus reference under the update-side lock. No long-term SRCU reader
+ * references are permitted, to avoid stale reads vs concurrent IO
+ * registrations.
+ */
static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx)
{
- return srcu_dereference_check(kvm->buses[idx], &kvm->srcu,
- lockdep_is_held(&kvm->slots_lock) ||
- !refcount_read(&kvm->users_count));
+ return rcu_dereference_protected(kvm->buses[idx],
+ lockdep_is_held(&kvm->slots_lock));
}
static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 222f0e894a0c..9ec3b96b9666 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1103,6 +1103,15 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
{
}
+/* Called only on cleanup and destruction paths when there are no users. */
+static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm,
+ enum kvm_bus idx)
+{
+ return rcu_dereference_protected(kvm->buses[idx],
+ !refcount_read(&kvm->users_count));
+}
+
+
static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
{
struct kvm *kvm = kvm_arch_alloc_vm();
@@ -1228,7 +1237,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
out_err_no_arch_destroy_vm:
WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
for (i = 0; i < KVM_NR_BUSES; i++)
- kfree(kvm_get_bus(kvm, i));
+ kfree(kvm_get_bus_for_destruction(kvm, i));
kvm_free_irq_routing(kvm);
out_err_no_irq_routing:
cleanup_srcu_struct(&kvm->irq_srcu);
@@ -1276,7 +1285,7 @@ static void kvm_destroy_vm(struct kvm *kvm)
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++) {
- struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
+ struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i);
if (bus)
kvm_io_bus_destroy(bus);
@@ -5838,6 +5847,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
return -EOPNOTSUPP;
}
+static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx)
+{
+ /*
+ * Ensure that any updates to kvm_buses[] observed by the previous VCPU
+ * machine instruction are also visible to the VCPU machine instruction
+ * that triggered this call.
+ */
+ smp_mb__after_srcu_read_lock();
+
+ return srcu_dereference(kvm->buses[idx], &kvm->srcu);
+}
+
int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
@@ -5850,7 +5871,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
@@ -5869,7 +5890,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
@@ -5919,7 +5940,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
.len = len,
};
- bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx);
if (!bus)
return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
@@ -6028,7 +6049,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
srcu_idx = srcu_read_lock(&kvm->srcu);
- bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ bus = kvm_get_bus_srcu(kvm, bus_idx);
if (!bus)
goto out_unlock;
--
2.50.0.727.gbf7dc18ff4-goog
On Wed, Jul 16, 2025 at 11:07:36AM +0800, Keir Fraser wrote: > This ensures that, if a VCPU has "observed" that an IO registration has > occurred, the instruction currently being trapped or emulated will also > observe the IO registration. > > At the same time, enforce that kvm_get_bus() is used only on the > update side, ensuring that a long-term reference cannot be obtained by > an SRCU reader. > > Signed-off-by: Keir Fraser <keirf@google.com> > --- > arch/x86/kvm/vmx/vmx.c | 7 +++++++ > include/linux/kvm_host.h | 10 +++++++--- > virt/kvm/kvm_main.c | 33 +++++++++++++++++++++++++++------ > 3 files changed, 41 insertions(+), 9 deletions(-) > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 191a9ed0da22..425e3d8074ab 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -5861,6 +5861,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) > if (kvm_test_request(KVM_REQ_EVENT, vcpu)) > return 1; > > + /* > + * Ensure that any updates to kvm->buses[] observed by the > + * previous instruction (emulated or otherwise) are also > + * visible to the instruction we are about to emulate. > + */ > + smp_rmb(); > + > if (!kvm_emulate_instruction(vcpu, 0)) > return 0; > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index 3bde4fb5c6aa..9132148fb467 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) > return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); > } > > +/* > + * Get a bus reference under the update-side lock. No long-term SRCU reader > + * references are permitted, to avoid stale reads vs concurrent IO > + * registrations. > + */ > static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) > { > - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, > - lockdep_is_held(&kvm->slots_lock) || > - !refcount_read(&kvm->users_count)); > + return rcu_dereference_protected(kvm->buses[idx], > + lockdep_is_held(&kvm->slots_lock)); I want to consult the true reason for using protected version here, save unnecessary READ_ONCE() ? > } > > static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 222f0e894a0c..9ec3b96b9666 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -1103,6 +1103,15 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) > { > } > > +/* Called only on cleanup and destruction paths when there are no users. */ > +static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm, > + enum kvm_bus idx) > +{ > + return rcu_dereference_protected(kvm->buses[idx], > + !refcount_read(&kvm->users_count)); > +} > + > + > static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > { > struct kvm *kvm = kvm_arch_alloc_vm(); > @@ -1228,7 +1237,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > out_err_no_arch_destroy_vm: > WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); > for (i = 0; i < KVM_NR_BUSES; i++) > - kfree(kvm_get_bus(kvm, i)); > + kfree(kvm_get_bus_for_destruction(kvm, i)); > kvm_free_irq_routing(kvm); > out_err_no_irq_routing: > cleanup_srcu_struct(&kvm->irq_srcu); > @@ -1276,7 +1285,7 @@ static void kvm_destroy_vm(struct kvm *kvm) > > kvm_free_irq_routing(kvm); > for (i = 0; i < KVM_NR_BUSES; i++) { > - struct kvm_io_bus *bus = kvm_get_bus(kvm, i); > + struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i); > > if (bus) > kvm_io_bus_destroy(bus); > @@ -5838,6 +5847,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, > return -EOPNOTSUPP; > } > > +static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx) > +{ > + /* > + * Ensure that any updates to kvm_buses[] observed by the previous VCPU > + * machine instruction are also visible to the VCPU machine instruction > + * that triggered this call. > + */ > + smp_mb__after_srcu_read_lock(); > + > + return srcu_dereference(kvm->buses[idx], &kvm->srcu); > +} > + > int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > int len, const void *val) > { > @@ -5850,7 +5871,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > .len = len, > }; > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > if (!bus) > return -ENOMEM; > r = __kvm_io_bus_write(vcpu, bus, &range, val); > @@ -5869,7 +5890,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, > .len = len, > }; > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > if (!bus) > return -ENOMEM; > > @@ -5919,7 +5940,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > .len = len, > }; > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > if (!bus) > return -ENOMEM; > r = __kvm_io_bus_read(vcpu, bus, &range, val); > @@ -6028,7 +6049,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, > > srcu_idx = srcu_read_lock(&kvm->srcu); > > - bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); > + bus = kvm_get_bus_srcu(kvm, bus_idx); > if (!bus) > goto out_unlock; > > -- > 2.50.0.727.gbf7dc18ff4-goog >
On Thu, Jul 17, 2025 at 02:01:32PM +0800, Yao Yuan wrote: > On Wed, Jul 16, 2025 at 11:07:36AM +0800, Keir Fraser wrote: > > This ensures that, if a VCPU has "observed" that an IO registration has > > occurred, the instruction currently being trapped or emulated will also > > observe the IO registration. > > > > At the same time, enforce that kvm_get_bus() is used only on the > > update side, ensuring that a long-term reference cannot be obtained by > > an SRCU reader. > > > > Signed-off-by: Keir Fraser <keirf@google.com> > > --- > > arch/x86/kvm/vmx/vmx.c | 7 +++++++ > > include/linux/kvm_host.h | 10 +++++++--- > > virt/kvm/kvm_main.c | 33 +++++++++++++++++++++++++++------ > > 3 files changed, 41 insertions(+), 9 deletions(-) > > > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > index 191a9ed0da22..425e3d8074ab 100644 > > --- a/arch/x86/kvm/vmx/vmx.c > > +++ b/arch/x86/kvm/vmx/vmx.c > > @@ -5861,6 +5861,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) > > if (kvm_test_request(KVM_REQ_EVENT, vcpu)) > > return 1; > > > > + /* > > + * Ensure that any updates to kvm->buses[] observed by the > > + * previous instruction (emulated or otherwise) are also > > + * visible to the instruction we are about to emulate. > > + */ > > + smp_rmb(); > > + > > if (!kvm_emulate_instruction(vcpu, 0)) > > return 0; > > > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > index 3bde4fb5c6aa..9132148fb467 100644 > > --- a/include/linux/kvm_host.h > > +++ b/include/linux/kvm_host.h > > @@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) > > return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); > > } > > > > +/* > > + * Get a bus reference under the update-side lock. No long-term SRCU reader > > + * references are permitted, to avoid stale reads vs concurrent IO > > + * registrations. > > + */ > > static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) > > { > > - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, > > - lockdep_is_held(&kvm->slots_lock) || > > - !refcount_read(&kvm->users_count)); > > + return rcu_dereference_protected(kvm->buses[idx], > > + lockdep_is_held(&kvm->slots_lock)); > > I want to consult the true reason for using protected version here, > save unnecessary READ_ONCE() ? We don't want this function to be callable from SRCU read section, but *only* during teardown. Hence protected version provides a better, stricter safety check (that there are no users). -- Keir > > } > > > > static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > > index 222f0e894a0c..9ec3b96b9666 100644 > > --- a/virt/kvm/kvm_main.c > > +++ b/virt/kvm/kvm_main.c > > @@ -1103,6 +1103,15 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) > > { > > } > > > > +/* Called only on cleanup and destruction paths when there are no users. */ > > +static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm, > > + enum kvm_bus idx) > > +{ > > + return rcu_dereference_protected(kvm->buses[idx], > > + !refcount_read(&kvm->users_count)); > > +} > > + > > + > > static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > > { > > struct kvm *kvm = kvm_arch_alloc_vm(); > > @@ -1228,7 +1237,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > > out_err_no_arch_destroy_vm: > > WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); > > for (i = 0; i < KVM_NR_BUSES; i++) > > - kfree(kvm_get_bus(kvm, i)); > > + kfree(kvm_get_bus_for_destruction(kvm, i)); > > kvm_free_irq_routing(kvm); > > out_err_no_irq_routing: > > cleanup_srcu_struct(&kvm->irq_srcu); > > @@ -1276,7 +1285,7 @@ static void kvm_destroy_vm(struct kvm *kvm) > > > > kvm_free_irq_routing(kvm); > > for (i = 0; i < KVM_NR_BUSES; i++) { > > - struct kvm_io_bus *bus = kvm_get_bus(kvm, i); > > + struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i); > > > > if (bus) > > kvm_io_bus_destroy(bus); > > @@ -5838,6 +5847,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, > > return -EOPNOTSUPP; > > } > > > > +static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx) > > +{ > > + /* > > + * Ensure that any updates to kvm_buses[] observed by the previous VCPU > > + * machine instruction are also visible to the VCPU machine instruction > > + * that triggered this call. > > + */ > > + smp_mb__after_srcu_read_lock(); > > + > > + return srcu_dereference(kvm->buses[idx], &kvm->srcu); > > +} > > + > > int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > int len, const void *val) > > { > > @@ -5850,7 +5871,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > .len = len, > > }; > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > if (!bus) > > return -ENOMEM; > > r = __kvm_io_bus_write(vcpu, bus, &range, val); > > @@ -5869,7 +5890,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, > > .len = len, > > }; > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > if (!bus) > > return -ENOMEM; > > > > @@ -5919,7 +5940,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > .len = len, > > }; > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > if (!bus) > > return -ENOMEM; > > r = __kvm_io_bus_read(vcpu, bus, &range, val); > > @@ -6028,7 +6049,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, > > > > srcu_idx = srcu_read_lock(&kvm->srcu); > > > > - bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); > > + bus = kvm_get_bus_srcu(kvm, bus_idx); > > if (!bus) > > goto out_unlock; > > > > -- > > 2.50.0.727.gbf7dc18ff4-goog > >
On Fri, Jul 18, 2025 at 02:56:25PM +0000, Keir Fraser wrote: > On Thu, Jul 17, 2025 at 02:01:32PM +0800, Yao Yuan wrote: > > On Wed, Jul 16, 2025 at 11:07:36AM +0800, Keir Fraser wrote: > > > This ensures that, if a VCPU has "observed" that an IO registration has > > > occurred, the instruction currently being trapped or emulated will also > > > observe the IO registration. > > > > > > At the same time, enforce that kvm_get_bus() is used only on the > > > update side, ensuring that a long-term reference cannot be obtained by > > > an SRCU reader. > > > > > > Signed-off-by: Keir Fraser <keirf@google.com> > > > --- > > > arch/x86/kvm/vmx/vmx.c | 7 +++++++ > > > include/linux/kvm_host.h | 10 +++++++--- > > > virt/kvm/kvm_main.c | 33 +++++++++++++++++++++++++++------ > > > 3 files changed, 41 insertions(+), 9 deletions(-) > > > > > > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > > > index 191a9ed0da22..425e3d8074ab 100644 > > > --- a/arch/x86/kvm/vmx/vmx.c > > > +++ b/arch/x86/kvm/vmx/vmx.c > > > @@ -5861,6 +5861,13 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) > > > if (kvm_test_request(KVM_REQ_EVENT, vcpu)) > > > return 1; > > > > > > + /* > > > + * Ensure that any updates to kvm->buses[] observed by the > > > + * previous instruction (emulated or otherwise) are also > > > + * visible to the instruction we are about to emulate. > > > + */ > > > + smp_rmb(); > > > + > > > if (!kvm_emulate_instruction(vcpu, 0)) > > > return 0; > > > > > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > > index 3bde4fb5c6aa..9132148fb467 100644 > > > --- a/include/linux/kvm_host.h > > > +++ b/include/linux/kvm_host.h > > > @@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) > > > return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); > > > } > > > > > > +/* > > > + * Get a bus reference under the update-side lock. No long-term SRCU reader > > > + * references are permitted, to avoid stale reads vs concurrent IO > > > + * registrations. > > > + */ > > > static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) > > > { > > > - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, > > > - lockdep_is_held(&kvm->slots_lock) || > > > - !refcount_read(&kvm->users_count)); > > > + return rcu_dereference_protected(kvm->buses[idx], > > > + lockdep_is_held(&kvm->slots_lock)); > > > > I want to consult the true reason for using protected version here, > > save unnecessary READ_ONCE() ? > > We don't want this function to be callable from SRCU read section, but > *only* during teardown. Hence protected version provides a better, > stricter safety check (that there are no users). I see, thanks for your explanation! > > -- Keir > > > > } > > > > > > static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) > > > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > > > index 222f0e894a0c..9ec3b96b9666 100644 > > > --- a/virt/kvm/kvm_main.c > > > +++ b/virt/kvm/kvm_main.c > > > @@ -1103,6 +1103,15 @@ void __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) > > > { > > > } > > > > > > +/* Called only on cleanup and destruction paths when there are no users. */ > > > +static inline struct kvm_io_bus *kvm_get_bus_for_destruction(struct kvm *kvm, > > > + enum kvm_bus idx) > > > +{ > > > + return rcu_dereference_protected(kvm->buses[idx], > > > + !refcount_read(&kvm->users_count)); > > > +} > > > + > > > + > > > static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > > > { > > > struct kvm *kvm = kvm_arch_alloc_vm(); > > > @@ -1228,7 +1237,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname) > > > out_err_no_arch_destroy_vm: > > > WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); > > > for (i = 0; i < KVM_NR_BUSES; i++) > > > - kfree(kvm_get_bus(kvm, i)); > > > + kfree(kvm_get_bus_for_destruction(kvm, i)); > > > kvm_free_irq_routing(kvm); > > > out_err_no_irq_routing: > > > cleanup_srcu_struct(&kvm->irq_srcu); > > > @@ -1276,7 +1285,7 @@ static void kvm_destroy_vm(struct kvm *kvm) > > > > > > kvm_free_irq_routing(kvm); > > > for (i = 0; i < KVM_NR_BUSES; i++) { > > > - struct kvm_io_bus *bus = kvm_get_bus(kvm, i); > > > + struct kvm_io_bus *bus = kvm_get_bus_for_destruction(kvm, i); > > > > > > if (bus) > > > kvm_io_bus_destroy(bus); > > > @@ -5838,6 +5847,18 @@ static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus, > > > return -EOPNOTSUPP; > > > } > > > > > > +static struct kvm_io_bus *kvm_get_bus_srcu(struct kvm *kvm, enum kvm_bus idx) > > > +{ > > > + /* > > > + * Ensure that any updates to kvm_buses[] observed by the previous VCPU > > > + * machine instruction are also visible to the VCPU machine instruction > > > + * that triggered this call. > > > + */ > > > + smp_mb__after_srcu_read_lock(); > > > + > > > + return srcu_dereference(kvm->buses[idx], &kvm->srcu); > > > +} > > > + > > > int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > > int len, const void *val) > > > { > > > @@ -5850,7 +5871,7 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > > .len = len, > > > }; > > > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > > if (!bus) > > > return -ENOMEM; > > > r = __kvm_io_bus_write(vcpu, bus, &range, val); > > > @@ -5869,7 +5890,7 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, > > > .len = len, > > > }; > > > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > > if (!bus) > > > return -ENOMEM; > > > > > > @@ -5919,7 +5940,7 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, > > > .len = len, > > > }; > > > > > > - bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); > > > + bus = kvm_get_bus_srcu(vcpu->kvm, bus_idx); > > > if (!bus) > > > return -ENOMEM; > > > r = __kvm_io_bus_read(vcpu, bus, &range, val); > > > @@ -6028,7 +6049,7 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, > > > > > > srcu_idx = srcu_read_lock(&kvm->srcu); > > > > > > - bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); > > > + bus = kvm_get_bus_srcu(kvm, bus_idx); > > > if (!bus) > > > goto out_unlock; > > > > > > -- > > > 2.50.0.727.gbf7dc18ff4-goog > > > >
On Thu, Jul 17, 2025, Yao Yuan wrote: > On Wed, Jul 16, 2025 at 11:07:36AM +0800, Keir Fraser wrote: > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > index 3bde4fb5c6aa..9132148fb467 100644 > > --- a/include/linux/kvm_host.h > > +++ b/include/linux/kvm_host.h > > @@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) > > return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); > > } > > > > +/* > > + * Get a bus reference under the update-side lock. No long-term SRCU reader > > + * references are permitted, to avoid stale reads vs concurrent IO > > + * registrations. > > + */ > > static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) > > { > > - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, > > - lockdep_is_held(&kvm->slots_lock) || > > - !refcount_read(&kvm->users_count)); > > + return rcu_dereference_protected(kvm->buses[idx], > > + lockdep_is_held(&kvm->slots_lock)); > > I want to consult the true reason for using protected version here, > save unnecessary READ_ONCE() ? Avoiding the READ_ONCE() is a happy bonus. The main goal is to help document and enforce that kvm_get_bus() can only be used if slots_lock is held. Keeping this as srcu_dereference_check() would result in PROVE_RCU getting a false negative if the caller held kvm->srcu but not slots_lock. From a documentation perspective, rcu_dereference_protected() (hopefully) helps highlight that there's something "special" about this helper, e.g. gives the reader a hint that they probably shouldn't be using kvm_get_bus().
On Fri, Jul 18, 2025 at 07:54:38AM -0700, Sean Christopherson wrote: > On Thu, Jul 17, 2025, Yao Yuan wrote: > > On Wed, Jul 16, 2025 at 11:07:36AM +0800, Keir Fraser wrote: > > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > > > index 3bde4fb5c6aa..9132148fb467 100644 > > > --- a/include/linux/kvm_host.h > > > +++ b/include/linux/kvm_host.h > > > @@ -965,11 +965,15 @@ static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) > > > return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); > > > } > > > > > > +/* > > > + * Get a bus reference under the update-side lock. No long-term SRCU reader > > > + * references are permitted, to avoid stale reads vs concurrent IO > > > + * registrations. > > > + */ > > > static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) > > > { > > > - return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, > > > - lockdep_is_held(&kvm->slots_lock) || > > > - !refcount_read(&kvm->users_count)); > > > + return rcu_dereference_protected(kvm->buses[idx], > > > + lockdep_is_held(&kvm->slots_lock)); > > > > I want to consult the true reason for using protected version here, > > save unnecessary READ_ONCE() ? > > Avoiding the READ_ONCE() is a happy bonus. The main goal is to help document > and enforce that kvm_get_bus() can only be used if slots_lock is held. Keeping > this as srcu_dereference_check() would result in PROVE_RCU getting a false negative > if the caller held kvm->srcu but not slots_lock. Ah, I noticed the srcu_read_lock_held(ssp) in srcu_dereference_check() this time ! > > From a documentation perspective, rcu_dereference_protected() (hopefully) helps > highlight that there's something "special" about this helper, e.g. gives the reader > a hint that they probably shouldn't be using kvm_get_bus(). Yes, I got it. Thanks for your nice explanation! >
© 2016 - 2025 Red Hat, Inc.