Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated
inode cache for guest memory backing, similar to how shmem handles inodes.
This adds the necessary allocation/destruction functions and prepares
for upcoming guest_memfd NUMA policy support changes.
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
virt/kvm/guest_memfd.c | 70 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 68 insertions(+), 2 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 6c66a0974055..356947d36a47 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -17,6 +17,15 @@ struct kvm_gmem {
struct list_head entry;
};
+struct kvm_gmem_inode_info {
+ struct inode vfs_inode;
+};
+
+static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode)
+{
+ return container_of(inode, struct kvm_gmem_inode_info, vfs_inode);
+}
+
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
* @folio: The folio which contains this index.
@@ -389,13 +398,46 @@ static struct file_operations kvm_gmem_fops = {
.fallocate = kvm_gmem_fallocate,
};
+static struct kmem_cache *kvm_gmem_inode_cachep;
+
+static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
+{
+ struct kvm_gmem_inode_info *info;
+
+ info = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ return &info->vfs_inode;
+}
+
+static void kvm_gmem_destroy_inode(struct inode *inode)
+{
+}
+
+static void kvm_gmem_free_inode(struct inode *inode)
+{
+ kmem_cache_free(kvm_gmem_inode_cachep, KVM_GMEM_I(inode));
+}
+
+static const struct super_operations kvm_gmem_super_operations = {
+ .statfs = simple_statfs,
+ .alloc_inode = kvm_gmem_alloc_inode,
+ .destroy_inode = kvm_gmem_destroy_inode,
+ .free_inode = kvm_gmem_free_inode,
+};
+
static int kvm_gmem_init_fs_context(struct fs_context *fc)
{
+ struct pseudo_fs_context *ctx;
+
if (!init_pseudo(fc, GUEST_MEMFD_MAGIC))
return -ENOMEM;
fc->s_iflags |= SB_I_NOEXEC;
fc->s_iflags |= SB_I_NODEV;
+ ctx = fc->fs_private;
+ ctx->ops = &kvm_gmem_super_operations;
return 0;
}
@@ -417,17 +459,41 @@ static int kvm_gmem_init_mount(void)
return 0;
}
+static void kvm_gmem_init_inode(void *foo)
+{
+ struct kvm_gmem_inode_info *info = foo;
+
+ inode_init_once(&info->vfs_inode);
+}
+
int kvm_gmem_init(struct module *module)
{
- kvm_gmem_fops.owner = module;
+ int ret;
+ struct kmem_cache_args args = {
+ .align = 0,
+ .ctor = kvm_gmem_init_inode,
+ };
- return kvm_gmem_init_mount();
+ kvm_gmem_fops.owner = module;
+ kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache",
+ sizeof(struct kvm_gmem_inode_info),
+ &args, SLAB_ACCOUNT);
+ if (!kvm_gmem_inode_cachep)
+ return -ENOMEM;
+ ret = kvm_gmem_init_mount();
+ if (ret) {
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
+ return ret;
+ }
+ return 0;
}
void kvm_gmem_exit(void)
{
kern_unmount(kvm_gmem_mnt);
kvm_gmem_mnt = NULL;
+ rcu_barrier();
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
}
static int kvm_gmem_migrate_folio(struct address_space *mapping,
--
2.43.0
On Wed, Aug 27, 2025, Shivank Garg wrote:
> Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated
> inode cache for guest memory backing, similar to how shmem handles inodes.
>
> This adds the necessary allocation/destruction functions and prepares
> for upcoming guest_memfd NUMA policy support changes.
>
> Signed-off-by: Shivank Garg <shivankg@amd.com>
> ---
> virt/kvm/guest_memfd.c | 70 ++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 68 insertions(+), 2 deletions(-)
>
> diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> index 6c66a0974055..356947d36a47 100644
> --- a/virt/kvm/guest_memfd.c
> +++ b/virt/kvm/guest_memfd.c
> @@ -17,6 +17,15 @@ struct kvm_gmem {
> struct list_head entry;
> };
>
> +struct kvm_gmem_inode_info {
What about naming this simply gmem_inode?
> + struct inode vfs_inode;
> +};
> +
> +static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode)
And then GMEM_I()?
And then (in a later follow-up if we target this for 6.18, or as a prep patch if
we push this out to 6.19), rename kvm_gmem to gmem_file?
That would make guest_memfd look a bit more like other filesystems, and I don't
see a need to preface the local structures and helpers with "kvm_", e.g. GMEM_I()
is analogous to x86's to_vmx() and to_svm().
As for renaming kvm_gmem => gmem_file, I wandered back into this code via Ackerley's
in-place conversion series, and it took me a good long while to remember the roles
of files vs. inodes in gmem. That's probably a sign that the code needs clarification
given that I wrote the original code. :-)
Leveraging an old discussion[*], my thought is to get to this:
/*
* A guest_memfd instance can be associated multiple VMs, each with its own
* "view" of the underlying physical memory.
*
* The gmem's inode is effectively the raw underlying physical storage, and is
* used to track properties of the physical memory, while each gmem file is
* effectively a single VM's view of that storage, and is used to track assets
* specific to its associated VM, e.g. memslots=>gmem bindings.
*/
struct gmem_file {
struct kvm *kvm;
struct xarray bindings;
struct list_head entry;
};
struct gmem_inode {
struct shared_policy policy;
struct inode vfs_inode;
};
[*] https://lore.kernel.org/all/ZLGiEfJZTyl7M8mS@google.com
On Thu, Sep 25, 2025, Sean Christopherson wrote:
> On Wed, Aug 27, 2025, Shivank Garg wrote:
> > Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated
> > inode cache for guest memory backing, similar to how shmem handles inodes.
> >
> > This adds the necessary allocation/destruction functions and prepares
> > for upcoming guest_memfd NUMA policy support changes.
> >
> > Signed-off-by: Shivank Garg <shivankg@amd.com>
> > ---
> > virt/kvm/guest_memfd.c | 70 ++++++++++++++++++++++++++++++++++++++++--
> > 1 file changed, 68 insertions(+), 2 deletions(-)
> >
> > diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
> > index 6c66a0974055..356947d36a47 100644
> > --- a/virt/kvm/guest_memfd.c
> > +++ b/virt/kvm/guest_memfd.c
> > @@ -17,6 +17,15 @@ struct kvm_gmem {
> > struct list_head entry;
> > };
> >
> > +struct kvm_gmem_inode_info {
>
> What about naming this simply gmem_inode?
Heh, after looking through other filesystems, they're fairly even on appending
_info or not. My vote is definitely for gmem_inode.
Before we accumulate more inode usage, e.g. for in-place conversion (which is
actually why I started looking at this code), I think we should also settle on
naming for gmem_file and gmem_inode variables.
As below, "struct kvm_gmem *gmem" gets quite confusing once inodes are in the
picture, especially since that structure isn't _the_ gmem instance, rather it's
a VM's view of that gmem instance. And on the other side, "info" for the inode
is a bit imprecise, e.g. doesn't immediately make me think of inodes.
A few ideas:
(a)
struct gmem_inode *gmem;
struct gmem_file *f;
(b)
struct gmem_inode *gi;
struct gmem_file *f;
(c)
struct gmem_inode *gi;
struct gmem_file *gf;
(d)
struct gmem_inode *gmem_i;
struct gmem_file *gmem_f;
I think my would be for (a) or (b). Option (c) seems like it would be hard to
visually differentiate between "gi" and "gf", and gmem_{i,f} are a bit verbose
IMO.
> > + struct inode vfs_inode;
> > +};
> > +
> > +static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode)
>
> And then GMEM_I()?
>
> And then (in a later follow-up if we target this for 6.18, or as a prep patch if
> we push this out to 6.19), rename kvm_gmem to gmem_file?
>
> That would make guest_memfd look a bit more like other filesystems, and I don't
> see a need to preface the local structures and helpers with "kvm_", e.g. GMEM_I()
> is analogous to x86's to_vmx() and to_svm().
>
> As for renaming kvm_gmem => gmem_file, I wandered back into this code via Ackerley's
> in-place conversion series, and it took me a good long while to remember the roles
> of files vs. inodes in gmem. That's probably a sign that the code needs clarification
> given that I wrote the original code. :-)
>
> Leveraging an old discussion[*], my thought is to get to this:
>
> /*
> * A guest_memfd instance can be associated multiple VMs, each with its own
> * "view" of the underlying physical memory.
> *
> * The gmem's inode is effectively the raw underlying physical storage, and is
> * used to track properties of the physical memory, while each gmem file is
> * effectively a single VM's view of that storage, and is used to track assets
> * specific to its associated VM, e.g. memslots=>gmem bindings.
> */
> struct gmem_file {
> struct kvm *kvm;
> struct xarray bindings;
> struct list_head entry;
> };
>
> struct gmem_inode {
> struct shared_policy policy;
> struct inode vfs_inode;
> };
>
> [*] https://lore.kernel.org/all/ZLGiEfJZTyl7M8mS@google.com
© 2016 - 2026 Red Hat, Inc.