[PATCH v3 16/24] KVM: guest_memfd: Split for punch hole and private-to-shared conversion

Yan Zhao posted 24 patches 1 month ago
[PATCH v3 16/24] KVM: guest_memfd: Split for punch hole and private-to-shared conversion
Posted by Yan Zhao 1 month ago
In TDX, private page tables require precise zapping because faulting back
the zapped mappings necessitates guest re-acceptance. Therefore, before
performing a zap for hole punching and private-to-shared conversions, huge
leaves that cross the boundary of the zapping GFN range in the mirror page
table must be split.

Splitting may fail (usually due to out of memory). If this happens, hole
punching and private-to-shared conversion should bail out early and return
an error to userspace.

Splitting is not necessary for zapping shared mappings or zapping in
kvm_gmem_release()/kvm_gmem_error_folio(). The penalty of zapping more
shared mappings than necessary is minimal. All mappings are zapped in
kvm_gmem_release(). kvm_gmem_error_folio() zaps the entire folio range, and
KVM's basic assumption is that a huge mapping must have a single backend
folio.

Signed-off-by: Yan Zhao <yan.y.zhao@intel.com>
---
v3:
- Rebased to [2].
- Do not flush TLB for kvm_split_cross_boundary_leafs(), i.e., only flush
  TLB if zaps are performed.

[2] https://github.com/googleprodkernel/linux-cc/tree/wip-gmem-conversions-hugetlb-restructuring-12-08-25

RFC v2:
- Rebased to [1]. As changes in this patch are gmem specific, they may need
  to be updated if the implementation in [1] changes.
- Update kvm_split_boundary_leafs() to kvm_split_cross_boundary_leafs() and
  invoke it before kvm_gmem_punch_hole() and private-to-shared conversion.

[1] https://lore.kernel.org/all/cover.1747264138.git.ackerleytng@google.com/

RFC v1:
- new patch.
---
 virt/kvm/guest_memfd.c | 67 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 03613b791728..8e7fbed57a20 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -486,6 +486,55 @@ static int merge_truncate_range(struct inode *inode, pgoff_t start,
 	return ret;
 }
 
+static int __kvm_gmem_split_private(struct gmem_file *f, pgoff_t start, pgoff_t end)
+{
+	enum kvm_gfn_range_filter attr_filter = KVM_FILTER_PRIVATE;
+
+	bool locked = false;
+	struct kvm_memory_slot *slot;
+	struct kvm *kvm = f->kvm;
+	unsigned long index;
+	int ret = 0;
+
+	xa_for_each_range(&f->bindings, index, slot, start, end - 1) {
+		pgoff_t pgoff = slot->gmem.pgoff;
+		struct kvm_gfn_range gfn_range = {
+			.start = slot->base_gfn + max(pgoff, start) - pgoff,
+			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
+			.slot = slot,
+			.may_block = true,
+			.attr_filter = attr_filter,
+		};
+
+		if (!locked) {
+			KVM_MMU_LOCK(kvm);
+			locked = true;
+		}
+
+		ret = kvm_split_cross_boundary_leafs(kvm, &gfn_range, false);
+		if (ret)
+			break;
+	}
+
+	if (locked)
+		KVM_MMU_UNLOCK(kvm);
+
+	return ret;
+}
+
+static int kvm_gmem_split_private(struct inode *inode, pgoff_t start, pgoff_t end)
+{
+	struct gmem_file *f;
+	int r = 0;
+
+	kvm_gmem_for_each_file(f, inode->i_mapping) {
+		r = __kvm_gmem_split_private(f, start, end);
+		if (r)
+			break;
+	}
+	return r;
+}
+
 static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 {
 	pgoff_t start = offset >> PAGE_SHIFT;
@@ -499,6 +548,13 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	filemap_invalidate_lock(inode->i_mapping);
 
 	kvm_gmem_invalidate_begin(inode, start, end);
+
+	ret = kvm_gmem_split_private(inode, start, end);
+	if (ret) {
+		kvm_gmem_invalidate_end(inode, start, end);
+		filemap_invalidate_unlock(inode->i_mapping);
+		return ret;
+	}
 	kvm_gmem_zap(inode, start, end);
 
 	ret = merge_truncate_range(inode, start, len >> PAGE_SHIFT, true);
@@ -907,6 +963,17 @@ static int kvm_gmem_convert(struct inode *inode, pgoff_t start,
 	invalidate_start = kvm_gmem_compute_invalidate_start(inode, start);
 	invalidate_end = kvm_gmem_compute_invalidate_end(inode, end);
 	kvm_gmem_invalidate_begin(inode, invalidate_start, invalidate_end);
+
+	if (!to_private) {
+		r = kvm_gmem_split_private(inode, start, end);
+		if (r) {
+			*err_index = start;
+			mas_destroy(&mas);
+			kvm_gmem_invalidate_end(inode, invalidate_start, invalidate_end);
+			return r;
+		}
+	}
+
 	kvm_gmem_zap(inode, start, end);
 	kvm_gmem_invalidate_end(inode, invalidate_start, invalidate_end);
 
-- 
2.43.2
Re: [PATCH v3 16/24] KVM: guest_memfd: Split for punch hole and private-to-shared conversion
Posted by Sean Christopherson 1 week, 2 days ago
On Tue, Jan 06, 2026, Yan Zhao wrote:
>  virt/kvm/guest_memfd.c | 67 ++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 67 insertions(+)
> 
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 03613b791728..8e7fbed57a20 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -486,6 +486,55 @@ static int merge_truncate_range(struct inode *inode, pgoff_t start,
>  	return ret;
>  }
>  
> +static int __kvm_gmem_split_private(struct gmem_file *f, pgoff_t start, pgoff_t end)
> +{
> +	enum kvm_gfn_range_filter attr_filter = KVM_FILTER_PRIVATE;
> +
> +	bool locked = false;
> +	struct kvm_memory_slot *slot;
> +	struct kvm *kvm = f->kvm;
> +	unsigned long index;
> +	int ret = 0;
> +
> +	xa_for_each_range(&f->bindings, index, slot, start, end - 1) {
> +		pgoff_t pgoff = slot->gmem.pgoff;
> +		struct kvm_gfn_range gfn_range = {
> +			.start = slot->base_gfn + max(pgoff, start) - pgoff,
> +			.end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff,
> +			.slot = slot,
> +			.may_block = true,
> +			.attr_filter = attr_filter,
> +		};
> +
> +		if (!locked) {
> +			KVM_MMU_LOCK(kvm);
> +			locked = true;
> +		}
> +
> +		ret = kvm_split_cross_boundary_leafs(kvm, &gfn_range, false);

This bleeds TDX details all over guest_memfd.  Presumably SNP needs a similar
callback to update the RMP, but SNP most definitely doesn't _need_ to split
hugepages that now have mixed attributes.  In fact, SNP can probably do literally
nothing here and let kvm_gmem_zap() do the heavy lifting.

Sadly, an arch hook is "necessary", because otherwise we'll end up in dependency
hell.  E.g. I _want_ to just let the TDP MMU do the splits during kvm_gmem_zap(),
but then an -ENOMEM when splitting would result in a partial conversion if more
than one KVM instance was bound to the gmem instance (ignoring that it's actually
"fine" for the TDX case, because only one S-EPT tree can have a valid mapping).

Even if we're willing to live with that assumption baked into the TDP MMU, we'd
still need to allow kvm_gmem_zap() to fail, e.g. because -ENOMEM isn't strictly
fatal.  And I really, really don't want to set the precedence that "zap" operations
are allow to fail.

But those details absolutely do not belong in guest_memfd.c.  Provide an arch
hook to give x86 the opportunity to pre-split hugepages, but keep the details
in arch code.

static int __kvm_gmem_convert(struct gmem_file *f, pgoff_t start, pgoff_t end,
			      bool to_private)
{
	struct kvm_memory_slot *slot;
	unsigned long index;
	int r;

	xa_for_each_range(&f->bindings, index, slot, start, end - 1) {
		r = kvm_arch_gmem_convert(f->kvm,
					  kvm_gmem_get_start_gfn(slot, start),
					  kvm_gmem_get_end_gfn(slot, end),
					  to_private);
		if (r)
			return r;
	}
	return 0;
}

static int kvm_gmem_convert(struct inode *inode, pgoff_t start, pgoff_t end,
			    bool to_private)
{
	struct gmem_file *f;
	int r;

	kvm_gmem_for_each_file(f, inode->i_mapping) {
		r = __kvm_gmem_convert(f, start, end, to_private);
		if (r)
			return r;
	}
	return 0;
}