During the era of memcg charge migration, the kernel has to be make sure
that the writeback stat updates do not race with the charge migration.
Otherwise it might update the writeback stats of the wrong memcg. Now
with the memcg charge migration deprecated, there is no more race for
writeback stat updates and the previous locking can be removed.
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
---
mm/page-writeback.c | 4 ----
1 file changed, 4 deletions(-)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index a76a73529fd9..9c3317c3a615 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -3083,7 +3083,6 @@ bool __folio_end_writeback(struct folio *folio)
struct address_space *mapping = folio_mapping(folio);
bool ret;
- folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode);
@@ -3114,7 +3113,6 @@ bool __folio_end_writeback(struct folio *folio)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
node_stat_mod_folio(folio, NR_WRITTEN, nr);
- folio_memcg_unlock(folio);
return ret;
}
@@ -3127,7 +3125,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
- folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host;
@@ -3168,7 +3165,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write)
lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
- folio_memcg_unlock(folio);
access_ret = arch_make_folio_accessible(folio);
/*
--
2.43.5
On Thu, Oct 24, 2024 at 06:23:01PM -0700, Shakeel Butt wrote: > During the era of memcg charge migration, the kernel has to be make sure > that the writeback stat updates do not race with the charge migration. > Otherwise it might update the writeback stats of the wrong memcg. Now > with the memcg charge migration deprecated, there is no more race for > writeback stat updates and the previous locking can be removed. > > Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Johannes Weiner <hannes@cmpxchg.org>
On Thu, Oct 24, 2024 at 06:23:01PM -0700, Shakeel Butt wrote: > During the era of memcg charge migration, the kernel has to be make sure > that the writeback stat updates do not race with the charge migration. > Otherwise it might update the writeback stats of the wrong memcg. Now > with the memcg charge migration deprecated, there is no more race for > writeback stat updates and the previous locking can be removed. > > Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
On Thu 24-10-24 18:23:01, Shakeel Butt wrote: > During the era of memcg charge migration, the kernel has to be make sure > that the writeback stat updates do not race with the charge migration. > Otherwise it might update the writeback stats of the wrong memcg. Now > with the memcg charge migration deprecated, there is no more race for s@deprecated@gone > writeback stat updates and the previous locking can be removed. > > Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev> Acked-by: Michal Hocko <mhocko@suse.com> Thanks! > --- > mm/page-writeback.c | 4 ---- > 1 file changed, 4 deletions(-) > > diff --git a/mm/page-writeback.c b/mm/page-writeback.c > index a76a73529fd9..9c3317c3a615 100644 > --- a/mm/page-writeback.c > +++ b/mm/page-writeback.c > @@ -3083,7 +3083,6 @@ bool __folio_end_writeback(struct folio *folio) > struct address_space *mapping = folio_mapping(folio); > bool ret; > > - folio_memcg_lock(folio); > if (mapping && mapping_use_writeback_tags(mapping)) { > struct inode *inode = mapping->host; > struct backing_dev_info *bdi = inode_to_bdi(inode); > @@ -3114,7 +3113,6 @@ bool __folio_end_writeback(struct folio *folio) > lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr); > zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr); > node_stat_mod_folio(folio, NR_WRITTEN, nr); > - folio_memcg_unlock(folio); > > return ret; > } > @@ -3127,7 +3125,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) > > VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); > > - folio_memcg_lock(folio); > if (mapping && mapping_use_writeback_tags(mapping)) { > XA_STATE(xas, &mapping->i_pages, folio_index(folio)); > struct inode *inode = mapping->host; > @@ -3168,7 +3165,6 @@ void __folio_start_writeback(struct folio *folio, bool keep_write) > > lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); > zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); > - folio_memcg_unlock(folio); > > access_ret = arch_make_folio_accessible(folio); > /* > -- > 2.43.5 -- Michal Hocko SUSE Labs
© 2016 - 2024 Red Hat, Inc.