Now that we have separate paths for the TDP MMU, it is trivial to only
grab rcu_read_lock() for the TDP MMU case. We do not need to grab it
for the shadow MMU, as pages are not RCU-freed in that case.
Signed-off-by: James Houghton <jthoughton@google.com>
---
arch/x86/kvm/mmu/mmu.c | 33 ++++++++++++++++++---------------
1 file changed, 18 insertions(+), 15 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7df1b4ead705b..c8f7dd747d524 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7577,17 +7577,18 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
nx_huge_pages = &kvm->arch.possible_nx_huge_pages[mmu_type].pages;
rcu_idx = srcu_read_lock(&kvm->srcu);
- if (is_tdp_mmu)
+ if (is_tdp_mmu) {
read_lock(&kvm->mmu_lock);
- else
+ /*
+ * Zapping TDP MMU shadow pages, including the remote TLB flush,
+ * must be done under RCU protection, because the pages are
+ * freed via RCU callback.
+ */
+ rcu_read_lock();
+ } else {
write_lock(&kvm->mmu_lock);
+ }
- /*
- * Zapping TDP MMU shadow pages, including the remote TLB flush, must
- * be done under RCU protection, because the pages are freed via RCU
- * callback.
- */
- rcu_read_lock();
for ( ; to_zap; --to_zap) {
#ifdef CONFIG_X86_64
@@ -7641,25 +7642,27 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- rcu_read_unlock();
- if (is_tdp_mmu)
+ if (is_tdp_mmu) {
+ rcu_read_unlock();
cond_resched_rwlock_read(&kvm->mmu_lock);
- else
+ rcu_read_lock();
+ } else {
cond_resched_rwlock_write(&kvm->mmu_lock);
+ }
flush = false;
- rcu_read_lock();
}
}
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
- rcu_read_unlock();
- if (is_tdp_mmu)
+ if (is_tdp_mmu) {
+ rcu_read_unlock();
read_unlock(&kvm->mmu_lock);
- else
+ } else {
write_unlock(&kvm->mmu_lock);
+ }
srcu_read_unlock(&kvm->srcu, rcu_idx);
}
--
2.50.0.727.gbf7dc18ff4-goog