Now that KVM takes vcpu->mutex inside kvm->lock when creating a vCPU, drop
the hack to manually inform lockdep of the kvm->lock => vcpu->mutex
ordering.
This effectively reverts commit 42a90008f890 ("KVM: Ensure lockdep knows
about kvm->lock vs. vcpu->mutex ordering rule").
Cc: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Sean Christopherson <seanjc@google.com>
---
virt/kvm/kvm_main.c | 9 ++-------
1 file changed, 2 insertions(+), 7 deletions(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ae216256ee9d..2dd3ff8764da 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4271,12 +4271,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
mutex_lock(&kvm->lock);
-#ifdef CONFIG_LOCKDEP
- /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
- mutex_lock(&vcpu->mutex);
- mutex_unlock(&vcpu->mutex);
-#endif
-
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
@@ -4293,7 +4287,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, unsigned long id)
* so that userspace can't invoke vCPU ioctl()s until the vCPU is fully
* visible (per online_vcpus), e.g. so that KVM doesn't get tricked
* into a NULL-pointer dereference because KVM thinks the _current_
- * vCPU doesn't exist.
+ * vCPU doesn't exist. As a bonus, taking vcpu->mutex ensures lockdep
+ * knows it's taken *inside* kvm->lock.
*/
mutex_lock(&vcpu->mutex);
kvm_get_kvm(kvm);
--
2.47.0.rc0.187.ge670bccf7e-goog