include/linux/kvm_host.h | 2 +- virt/kvm/kvm_main.c | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-)
mmu_notifier_invalidate_range_start() may be invoked via
mmu_notifier_invalidate_range_start_nonblock(), e.g. from oom_reaper(),
where sleeping is explicitly forbidden.
KVM's mmu_notifier invalidate_range_start currently takes
mn_invalidate_lock using spin_lock(). On PREEMPT_RT, spin_lock() maps
to rt_mutex and may sleep, triggering:
BUG: sleeping function called from invalid context
This violates the MMU notifier contract regardless of PREEMPT_RT; RT
kernels merely make the issue deterministic.
Fix by converting mn_invalidate_lock to a raw spinlock so that
invalidate_range_start() remains non-sleeping while preserving the
existing serialization between invalidate_range_start() and
invalidate_range_end().
Signed-off-by: shaikh.kamal <shaikhkamal2012@gmail.com>
---
include/linux/kvm_host.h | 2 +-
virt/kvm/kvm_main.c | 18 +++++++++---------
2 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d93f75b05ae2..77a6d4833eda 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -797,7 +797,7 @@ struct kvm {
atomic_t nr_memslots_dirty_logging;
/* Used to wait for completion of MMU notifiers. */
- spinlock_t mn_invalidate_lock;
+ raw_spinlock_t mn_invalidate_lock;
unsigned long mn_active_invalidate_count;
struct rcuwait mn_memslots_update_rcuwait;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5fcd401a5897..7a9c33f01a37 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -747,9 +747,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
*
* Pairs with the decrement in range_end().
*/
- spin_lock(&kvm->mn_invalidate_lock);
+ raw_spin_lock(&kvm->mn_invalidate_lock);
kvm->mn_active_invalidate_count++;
- spin_unlock(&kvm->mn_invalidate_lock);
+ raw_spin_unlock(&kvm->mn_invalidate_lock);
/*
* Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
@@ -817,11 +817,11 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
- spin_lock(&kvm->mn_invalidate_lock);
+ raw_spin_lock(&kvm->mn_invalidate_lock);
if (!WARN_ON_ONCE(!kvm->mn_active_invalidate_count))
--kvm->mn_active_invalidate_count;
wake = !kvm->mn_active_invalidate_count;
- spin_unlock(&kvm->mn_invalidate_lock);
+ raw_spin_unlock(&kvm->mn_invalidate_lock);
/*
* There can only be one waiter, since the wait happens under
@@ -1129,7 +1129,7 @@ static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
mutex_init(&kvm->slots_arch_lock);
- spin_lock_init(&kvm->mn_invalidate_lock);
+ raw_spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
xa_init(&kvm->vcpu_array);
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
@@ -1635,17 +1635,17 @@ static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
* progress, otherwise the locking in invalidate_range_start and
* invalidate_range_end will be unbalanced.
*/
- spin_lock(&kvm->mn_invalidate_lock);
+ raw_spin_lock(&kvm->mn_invalidate_lock);
prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
while (kvm->mn_active_invalidate_count) {
set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock(&kvm->mn_invalidate_lock);
+ raw_spin_unlock(&kvm->mn_invalidate_lock);
schedule();
- spin_lock(&kvm->mn_invalidate_lock);
+ raw_spin_lock(&kvm->mn_invalidate_lock);
}
finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
rcu_assign_pointer(kvm->memslots[as_id], slots);
- spin_unlock(&kvm->mn_invalidate_lock);
+ raw_spin_unlock(&kvm->mn_invalidate_lock);
/*
* Acquired in kvm_set_memslot. Must be released before synchronize
--
2.43.0
© 2016 - 2026 Red Hat, Inc.