Move reused shadow pages to the head of active_mmu_pages in
kvm_mmu_find_shadow_page(). This will allow us to move towards more of
a LRU approximation eviction strategy instead of just straight FIFO.
Signed-off-by: Hamza Mahfooz <someguy@effective-light.com>
---
v2: move logic to kvm_mmu_find_shadow_page().
---
arch/x86/kvm/mmu/mmu.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 02c450686b4a..d89099ba1fca 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2325,6 +2325,9 @@ static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm,
out:
kvm_mmu_commit_zap_page(kvm, &invalid_list);
+ if (sp && !list_is_head(&sp->link, &kvm->arch.active_mmu_pages))
+ list_move(&sp->link, &kvm->arch.active_mmu_pages);
+
if (collisions > kvm->stat.max_mmu_page_hash_collisions)
kvm->stat.max_mmu_page_hash_collisions = collisions;
return sp;
--
2.52.0