Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated
inode cache for guest memory backing, similar to how shmem handles inodes.
This adds the necessary allocation/destruction functions and prepares
for upcoming guest_memfd NUMA policy support changes.
Signed-off-by: Shivank Garg <shivankg@amd.com>
---
virt/kvm/guest_memfd.c | 58 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 56 insertions(+), 2 deletions(-)
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index dabcc2317291..989e2b26b344 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -17,6 +17,15 @@ struct kvm_gmem {
struct list_head entry;
};
+struct kvm_gmem_inode_info {
+ struct inode vfs_inode;
+};
+
+static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode)
+{
+ return container_of(inode, struct kvm_gmem_inode_info, vfs_inode);
+}
+
/**
* folio_file_pfn - like folio_file_page, but return a pfn.
* @folio: The folio which contains this index.
@@ -392,8 +401,33 @@ static struct file_operations kvm_gmem_fops = {
.fallocate = kvm_gmem_fallocate,
};
+static struct kmem_cache *kvm_gmem_inode_cachep;
+
+static struct inode *kvm_gmem_alloc_inode(struct super_block *sb)
+{
+ struct kvm_gmem_inode_info *info;
+
+ info = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ return &info->vfs_inode;
+}
+
+static void kvm_gmem_destroy_inode(struct inode *inode)
+{
+}
+
+static void kvm_gmem_free_inode(struct inode *inode)
+{
+ kmem_cache_free(kvm_gmem_inode_cachep, KVM_GMEM_I(inode));
+}
+
static const struct super_operations kvm_gmem_super_operations = {
.statfs = simple_statfs,
+ .alloc_inode = kvm_gmem_alloc_inode,
+ .destroy_inode = kvm_gmem_destroy_inode,
+ .free_inode = kvm_gmem_free_inode,
};
static int kvm_gmem_init_fs_context(struct fs_context *fc)
@@ -426,17 +460,37 @@ static int kvm_gmem_init_mount(void)
return 0;
}
+static void kvm_gmem_init_inode(void *foo)
+{
+ struct kvm_gmem_inode_info *info = foo;
+
+ inode_init_once(&info->vfs_inode);
+}
+
int kvm_gmem_init(struct module *module)
{
- kvm_gmem_fops.owner = module;
+ int ret;
- return kvm_gmem_init_mount();
+ kvm_gmem_fops.owner = module;
+ kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache",
+ sizeof(struct kvm_gmem_inode_info),
+ 0, SLAB_ACCOUNT,
+ kvm_gmem_init_inode);
+ if (!kvm_gmem_inode_cachep)
+ return -ENOMEM;
+ ret = kvm_gmem_init_mount();
+ if (ret) {
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
+ return ret;
+ }
+ return 0;
}
void kvm_gmem_exit(void)
{
kern_unmount(kvm_gmem_mnt);
kvm_gmem_mnt = NULL;
+ kmem_cache_destroy(kvm_gmem_inode_cachep);
}
static int kvm_gmem_migrate_folio(struct address_space *mapping,
--
2.43.0
On 7/13/25 19:43, Shivank Garg wrote: > Add dedicated inode structure (kvm_gmem_inode_info) and slab-allocated > inode cache for guest memory backing, similar to how shmem handles inodes. > > This adds the necessary allocation/destruction functions and prepares > for upcoming guest_memfd NUMA policy support changes. > > Signed-off-by: Shivank Garg <shivankg@amd.com> > --- > virt/kvm/guest_memfd.c | 58 ++++++++++++++++++++++++++++++++++++++++-- > 1 file changed, 56 insertions(+), 2 deletions(-) > > diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c > index dabcc2317291..989e2b26b344 100644 > --- a/virt/kvm/guest_memfd.c > +++ b/virt/kvm/guest_memfd.c > @@ -17,6 +17,15 @@ struct kvm_gmem { > struct list_head entry; > }; > > +struct kvm_gmem_inode_info { > + struct inode vfs_inode; > +}; > + > +static inline struct kvm_gmem_inode_info *KVM_GMEM_I(struct inode *inode) > +{ > + return container_of(inode, struct kvm_gmem_inode_info, vfs_inode); > +} > + > /** > * folio_file_pfn - like folio_file_page, but return a pfn. > * @folio: The folio which contains this index. > @@ -392,8 +401,33 @@ static struct file_operations kvm_gmem_fops = { > .fallocate = kvm_gmem_fallocate, > }; > > +static struct kmem_cache *kvm_gmem_inode_cachep; > + > +static struct inode *kvm_gmem_alloc_inode(struct super_block *sb) > +{ > + struct kvm_gmem_inode_info *info; > + > + info = alloc_inode_sb(sb, kvm_gmem_inode_cachep, GFP_KERNEL); > + if (!info) > + return NULL; > + > + return &info->vfs_inode; > +} > + > +static void kvm_gmem_destroy_inode(struct inode *inode) > +{ > +} > + > +static void kvm_gmem_free_inode(struct inode *inode) > +{ > + kmem_cache_free(kvm_gmem_inode_cachep, KVM_GMEM_I(inode)); > +} > + > static const struct super_operations kvm_gmem_super_operations = { > .statfs = simple_statfs, > + .alloc_inode = kvm_gmem_alloc_inode, > + .destroy_inode = kvm_gmem_destroy_inode, > + .free_inode = kvm_gmem_free_inode, > }; > > static int kvm_gmem_init_fs_context(struct fs_context *fc) > @@ -426,17 +460,37 @@ static int kvm_gmem_init_mount(void) > return 0; > } > > +static void kvm_gmem_init_inode(void *foo) > +{ > + struct kvm_gmem_inode_info *info = foo; > + > + inode_init_once(&info->vfs_inode); > +} > + > int kvm_gmem_init(struct module *module) > { > - kvm_gmem_fops.owner = module; > + int ret; > > - return kvm_gmem_init_mount(); > + kvm_gmem_fops.owner = module; > + kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache", > + sizeof(struct kvm_gmem_inode_info), > + 0, SLAB_ACCOUNT, > + kvm_gmem_init_inode); Since this is new code, please use the new variant of kmem_cache_create() that takes the args parameter. > + if (!kvm_gmem_inode_cachep) > + return -ENOMEM; > + ret = kvm_gmem_init_mount(); > + if (ret) { > + kmem_cache_destroy(kvm_gmem_inode_cachep); > + return ret; > + } > + return 0; > } > > void kvm_gmem_exit(void) > { > kern_unmount(kvm_gmem_mnt); > kvm_gmem_mnt = NULL; > + kmem_cache_destroy(kvm_gmem_inode_cachep); > } > > static int kvm_gmem_migrate_folio(struct address_space *mapping,
On 7/21/2025 5:14 PM, Vlastimil Babka wrote: >> + kvm_gmem_inode_cachep = kmem_cache_create("kvm_gmem_inode_cache", >> + sizeof(struct kvm_gmem_inode_info), >> + 0, SLAB_ACCOUNT, >> + kvm_gmem_init_inode); > Since this is new code, please use the new variant of kmem_cache_create() > that takes the args parameter. Thank you for the review and suggestion. I'll update this in the next version.
© 2016 - 2025 Red Hat, Inc.