The number of writeback contexts is set to the number of CPUs by
default. This allows XFS to decide how to assign inodes to writeback
contexts based on its allocation groups.
Implement get_inode_wb_ctx_idx() in xfs_super_operations as follows:
- Limit the number of active writeback contexts to the number of AGs.
- Assign inodes from the same AG to a unique writeback context.
Signed-off-by: wangyufei <wangyufei@vivo.com>
---
fs/xfs/xfs_super.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 77acb3e5a..156df0397 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -1279,6 +1279,19 @@ xfs_fs_show_stats(
return 0;
}
+static unsigned int
+xfs_fs_get_inode_wb_ctx_idx(
+ struct inode *inode,
+ int nr_wb_ctx)
+{
+ struct xfs_inode *xfs_inode = XFS_I(inode);
+ struct xfs_mount *mp = XFS_M(inode->i_sb);
+
+ if (mp->m_sb.sb_agcount <= nr_wb_ctx)
+ return XFS_INO_TO_AGNO(mp, xfs_inode->i_ino);
+ return xfs_inode->i_ino % nr_wb_ctx;
+}
+
static const struct super_operations xfs_super_operations = {
.alloc_inode = xfs_fs_alloc_inode,
.destroy_inode = xfs_fs_destroy_inode,
@@ -1295,6 +1308,7 @@ static const struct super_operations xfs_super_operations = {
.free_cached_objects = xfs_fs_free_cached_objects,
.shutdown = xfs_fs_shutdown,
.show_stats = xfs_fs_show_stats,
+ .get_inode_wb_ctx_idx = xfs_fs_get_inode_wb_ctx_idx,
};
static int
--
2.34.1
On Sun, Sep 14, 2025 at 08:11:09PM +0800, wangyufei wrote: > The number of writeback contexts is set to the number of CPUs by > default. This allows XFS to decide how to assign inodes to writeback > contexts based on its allocation groups. > > Implement get_inode_wb_ctx_idx() in xfs_super_operations as follows: > - Limit the number of active writeback contexts to the number of AGs. > - Assign inodes from the same AG to a unique writeback context. I'm not sure this actually works. Data is spread over AGs, just with a default to the parent inode AG if there is space, and even that isn't true for the inode32 option or when using the RT subvolume. > + > + if (mp->m_sb.sb_agcount <= nr_wb_ctx) > + return XFS_INO_TO_AGNO(mp, xfs_inode->i_ino); > + return xfs_inode->i_ino % nr_wb_ctx; > +} > + > static const struct super_operations xfs_super_operations = { > .alloc_inode = xfs_fs_alloc_inode, > .destroy_inode = xfs_fs_destroy_inode, > @@ -1295,6 +1308,7 @@ static const struct super_operations xfs_super_operations = { > .free_cached_objects = xfs_fs_free_cached_objects, > .shutdown = xfs_fs_shutdown, > .show_stats = xfs_fs_show_stats, > + .get_inode_wb_ctx_idx = xfs_fs_get_inode_wb_ctx_idx, > }; > > static int > -- > 2.34.1 ---end quoted text---
On Mon, Sep 22, 2025 at 06:56:42PM +0200, Christoph Hellwig wrote: > On Sun, Sep 14, 2025 at 08:11:09PM +0800, wangyufei wrote: > > The number of writeback contexts is set to the number of CPUs by > > default. This allows XFS to decide how to assign inodes to writeback > > contexts based on its allocation groups. > > > > Implement get_inode_wb_ctx_idx() in xfs_super_operations as follows: > > - Limit the number of active writeback contexts to the number of AGs. > > - Assign inodes from the same AG to a unique writeback context. > > I'm not sure this actually works. Data is spread over AGs, just with > a default to the parent inode AG if there is space, and even that isn't > true for the inode32 option or when using the RT subvolume. I don't know of a better way to shard cheaply -- if you could group inodes dynamically by a rough estimate of the AGs that map to the dirty data (especially delalloc/unwritten/cow mappings) then that would be an improvement, but that's still far from what I would consider the ideal. Ideally (maybe?) one could shard dirty ranges first by the amount of effort (pure overwrite; secondly backed-by-unwritten; thirdly delalloc/cow). The first two groups could then be sharded by AG and issued in parallel. The third group involve so much metadata changes that you could probably just shard evenly across CPUs. Writebacks get initiated in that order, and then we see where the bottlenecks lie in ioend completion. (But that's just my hazy untested brai^Widea :P) --D > > + > > + if (mp->m_sb.sb_agcount <= nr_wb_ctx) > > + return XFS_INO_TO_AGNO(mp, xfs_inode->i_ino); > > + return xfs_inode->i_ino % nr_wb_ctx; > > +} > > + > > static const struct super_operations xfs_super_operations = { > > .alloc_inode = xfs_fs_alloc_inode, > > .destroy_inode = xfs_fs_destroy_inode, > > @@ -1295,6 +1308,7 @@ static const struct super_operations xfs_super_operations = { > > .free_cached_objects = xfs_fs_free_cached_objects, > > .shutdown = xfs_fs_shutdown, > > .show_stats = xfs_fs_show_stats, > > + .get_inode_wb_ctx_idx = xfs_fs_get_inode_wb_ctx_idx, > > }; > > > > static int > > -- > > 2.34.1 > ---end quoted text--- >
© 2016 - 2025 Red Hat, Inc.