From: Fred Griffoul <fgriffo@amazon.co.uk>
Restore functionality for guest page access tracking in pfncache,
enabling automatic vCPU request generation when cache invalidation
occurs through MMU notifier events.
This feature is critical for nested VMX operations where both KVM and L2
guest access guest-provided pages, such as APIC pages and posted
interrupt descriptors.
This change:
- Reverts commit eefb85b3f031 ("KVM: Drop unused @may_block param from
gfn_to_pfn_cache_invalidate_start()")
- Partially reverts commit a4bff3df5147 ("KVM: pfncache: remove
KVM_GUEST_USES_PFN usage"). Adds kvm_gpc_init_for_vcpu() to
initialize pfncache for guest mode access, instead of the
usage-specific flag approach.
Signed-off-by: Fred Griffoul <fgriffo@amazon.co.uk>
---
include/linux/kvm_host.h | 29 +++++++++++++++++++++++++-
include/linux/kvm_types.h | 1 +
virt/kvm/kvm_main.c | 3 ++-
virt/kvm/kvm_mm.h | 6 ++++--
virt/kvm/pfncache.c | 43 ++++++++++++++++++++++++++++++++++++---
5 files changed, 75 insertions(+), 7 deletions(-)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 8b47891adca1..2eb551a11818 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1397,6 +1397,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+void __kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
+ struct kvm_vcpu *vcpu);
+
/**
* kvm_gpc_init - initialize gfn_to_pfn_cache.
*
@@ -1407,7 +1410,11 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
* immutable attributes. Note, the cache must be zero-allocated (or zeroed by
* the caller before init).
*/
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm);
+
+static inline void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
+{
+ __kvm_gpc_init(gpc, kvm, NULL);
+}
/**
* kvm_gpc_activate - prepare a cached kernel mapping and HPA for a given guest
@@ -1489,6 +1496,26 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len);
*/
void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc);
+/**
+ * kvm_gpc_init_for_vcpu - initialize gfn_to_pfn_cache for pin/unpin usage
+ *
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @vcpu: vCPU that will pin and directly access this cache.
+ * @req: request to send when cache is invalidated while pinned.
+ *
+ * This sets up a gfn_to_pfn_cache for use by a vCPU that will directly access
+ * the cached physical address. When the cache is invalidated while pinned,
+ * the specified request will be sent to the associated vCPU to force cache
+ * refresh.
+ *
+ * Note, the cache must be zero-allocated (or zeroed by the caller before init).
+ */
+static inline void kvm_gpc_init_for_vcpu(struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu)
+{
+ __kvm_gpc_init(gpc, vcpu->kvm, vcpu);
+}
+
static inline bool kvm_gpc_is_gpa_active(struct gfn_to_pfn_cache *gpc)
{
return gpc->active && !kvm_is_error_gpa(gpc->gpa);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 827ecc0b7e10..1c4bbf9947a4 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -64,6 +64,7 @@ struct gfn_to_pfn_cache {
struct kvm_memory_slot *memslot;
struct kvm *kvm;
struct list_head list;
+ struct kvm_vcpu *vcpu;
rwlock_t lock;
struct mutex refresh_lock;
void *khva;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 18f29ef93543..f42cfd524ecf 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -760,7 +760,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
* mn_active_invalidate_count (see above) instead of
* mmu_invalidate_in_progress.
*/
- gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end);
+ gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
+ hva_range.may_block);
/*
* If one or more memslots were found and thus zapped, notify arch code
diff --git a/virt/kvm/kvm_mm.h b/virt/kvm/kvm_mm.h
index 31defb08ccba..f1ba02084bd9 100644
--- a/virt/kvm/kvm_mm.h
+++ b/virt/kvm/kvm_mm.h
@@ -58,11 +58,13 @@ kvm_pfn_t hva_to_pfn(struct kvm_follow_pfn *kfp);
#ifdef CONFIG_HAVE_KVM_PFNCACHE
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end);
+ unsigned long end,
+ bool may_block);
#else
static inline void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool may_block)
{
}
#endif /* HAVE_KVM_PFNCACHE */
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index 728d2c1b488a..543466ff40a0 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -23,9 +23,11 @@
* MMU notifier 'invalidate_range_start' hook.
*/
void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
- unsigned long end)
+ unsigned long end, bool may_block)
{
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
struct gfn_to_pfn_cache *gpc;
+ bool evict_vcpus = false;
spin_lock(&kvm->gpc_lock);
list_for_each_entry(gpc, &kvm->gpc_list, list) {
@@ -46,8 +48,21 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
write_lock_irq(&gpc->lock);
if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
- gpc->uhva >= start && gpc->uhva < end)
+ gpc->uhva >= start && gpc->uhva < end) {
gpc->valid = false;
+
+ /*
+ * If a guest vCPU could be using the physical address,
+ * it needs to be forced out of guest mode.
+ */
+ if (gpc->vcpu) {
+ if (!evict_vcpus) {
+ evict_vcpus = true;
+ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
+ }
+ __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
+ }
+ }
write_unlock_irq(&gpc->lock);
continue;
}
@@ -55,6 +70,27 @@ void gfn_to_pfn_cache_invalidate_start(struct kvm *kvm, unsigned long start,
read_unlock_irq(&gpc->lock);
}
spin_unlock(&kvm->gpc_lock);
+
+ if (evict_vcpus) {
+ /*
+ * KVM needs to ensure the vCPU is fully out of guest context
+ * before allowing the invalidation to continue.
+ */
+ unsigned int req = KVM_REQ_OUTSIDE_GUEST_MODE;
+ bool called;
+
+ /*
+ * If the OOM reaper is active, then all vCPUs should have
+ * been stopped already, so perform the request without
+ * KVM_REQUEST_WAIT and be sad if any needed to be IPI'd.
+ */
+ if (!may_block)
+ req &= ~KVM_REQUEST_WAIT;
+
+ called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
+
+ WARN_ON_ONCE(called && !may_block);
+ }
}
static bool kvm_gpc_is_valid_len(gpa_t gpa, unsigned long uhva,
@@ -382,7 +418,7 @@ int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len)
return __kvm_gpc_refresh(gpc, gpc->gpa, uhva);
}
-void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
+void __kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm, struct kvm_vcpu *vcpu)
{
rwlock_init(&gpc->lock);
mutex_init(&gpc->refresh_lock);
@@ -391,6 +427,7 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm)
gpc->pfn = KVM_PFN_ERR_FAULT;
gpc->gpa = INVALID_GPA;
gpc->uhva = KVM_HVA_ERR_BAD;
+ gpc->vcpu = vcpu;
gpc->active = gpc->valid = false;
}
--
2.51.0
© 2016 - 2025 Red Hat, Inc.