From: Qi Zheng <zhengqi.arch@bytedance.com>
Similar to list_lru, the split queue is relatively independent and does
not need to be reparented along with objcg and LRU folios (holding
objcg lock and lru lock). So let's apply the similar mechanism as list_lru
to reparent the split queue separately when memcg is offine.
This is also a preparation for reparenting LRU folios.
Signed-off-by: Qi Zheng <zhengqi.arch@bytedance.com>
Acked-by: Zi Yan <ziy@nvidia.com>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
---
include/linux/huge_mm.h | 4 ++++
include/linux/memcontrol.h | 10 +++++++++
mm/huge_memory.c | 44 ++++++++++++++++++++++++++++++++++++++
mm/memcontrol.c | 1 +
4 files changed, 59 insertions(+)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 5ba9cac440b92..f381339842fa1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -412,6 +412,9 @@ static inline int split_huge_page(struct page *page)
return split_huge_page_to_list_to_order(page, NULL, 0);
}
void deferred_split_folio(struct folio *folio, bool partially_mapped);
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg);
+#endif
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze);
@@ -644,6 +647,7 @@ static inline int try_folio_split_to_order(struct folio *folio,
}
static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
+static inline void reparent_deferred_split_queue(struct mem_cgroup *memcg) {}
#define split_huge_pmd(__vma, __pmd, __address) \
do { } while (0)
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index fad2661ca55d8..8d2e250535a8a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1774,6 +1774,11 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return memcg ? css_is_dying(&memcg->css) : false;
+}
+
#else
static inline bool mem_cgroup_kmem_disabled(void)
{
@@ -1840,6 +1845,11 @@ static inline bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid)
{
return true;
}
+
+static inline bool memcg_is_dying(struct mem_cgroup *memcg)
+{
+ return false;
+}
#endif /* CONFIG_MEMCG */
#if defined(CONFIG_MEMCG) && defined(CONFIG_ZSWAP)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index db03853a73e3f..8bb63acaa8329 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1118,8 +1118,19 @@ static struct deferred_split *split_queue_lock(int nid, struct mem_cgroup *memcg
{
struct deferred_split *queue;
+retry:
queue = memcg_split_queue(nid, memcg);
spin_lock(&queue->split_queue_lock);
+ /*
+ * There is a period between setting memcg to dying and reparenting
+ * deferred split queue, and during this period the THPs in the deferred
+ * split queue will be hidden from the shrinker side.
+ */
+ if (unlikely(memcg_is_dying(memcg))) {
+ spin_unlock(&queue->split_queue_lock);
+ memcg = parent_mem_cgroup(memcg);
+ goto retry;
+ }
return queue;
}
@@ -1129,8 +1140,14 @@ split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags
{
struct deferred_split *queue;
+retry:
queue = memcg_split_queue(nid, memcg);
spin_lock_irqsave(&queue->split_queue_lock, *flags);
+ if (unlikely(memcg_is_dying(memcg))) {
+ spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
+ memcg = parent_mem_cgroup(memcg);
+ goto retry;
+ }
return queue;
}
@@ -4402,6 +4419,33 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
return split;
}
+#ifdef CONFIG_MEMCG
+void reparent_deferred_split_queue(struct mem_cgroup *memcg)
+{
+ struct mem_cgroup *parent = parent_mem_cgroup(memcg);
+ struct deferred_split *ds_queue = &memcg->deferred_split_queue;
+ struct deferred_split *parent_ds_queue = &parent->deferred_split_queue;
+ int nid;
+
+ spin_lock_irq(&ds_queue->split_queue_lock);
+ spin_lock_nested(&parent_ds_queue->split_queue_lock, SINGLE_DEPTH_NESTING);
+
+ if (!ds_queue->split_queue_len)
+ goto unlock;
+
+ list_splice_tail_init(&ds_queue->split_queue, &parent_ds_queue->split_queue);
+ parent_ds_queue->split_queue_len += ds_queue->split_queue_len;
+ ds_queue->split_queue_len = 0;
+
+ for_each_node(nid)
+ set_shrinker_bit(parent, nid, shrinker_id(deferred_split_shrinker));
+
+unlock:
+ spin_unlock(&parent_ds_queue->split_queue_lock);
+ spin_unlock_irq(&ds_queue->split_queue_lock);
+}
+#endif
+
#ifdef CONFIG_DEBUG_FS
static void split_huge_pages_all(void)
{
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 025da46d9959f..c34029e92baba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3920,6 +3920,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
zswap_memcg_offline_cleanup(memcg);
memcg_offline_kmem(memcg);
+ reparent_deferred_split_queue(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
lru_gen_offline_memcg(memcg);
--
2.20.1
On Mon, 10 Nov 2025 16:17:58 +0800 Qi Zheng <qi.zheng@linux.dev> wrote:
> From: Qi Zheng <zhengqi.arch@bytedance.com>
>
> Similar to list_lru, the split queue is relatively independent and does
> not need to be reparented along with objcg and LRU folios (holding
> objcg lock and lru lock). So let's apply the similar mechanism as list_lru
> to reparent the split queue separately when memcg is offine.
>
Hi everyone,
I'm running all of the MM commits since v6.18 through the AI patch review
tools, and this one was flagged. I'm much less confident in this report,
AI tends to get these races wrong:
[AI review output]
> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> index b0c6a4635c67b..cc6db20d7dca0 100644
> --- a/include/linux/memcontrol.h
> +++ b/include/linux/memcontrol.h
> @@ -1775,6 +1775,12 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
> bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
>
> void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
> +
> +static inline bool memcg_is_dying(struct mem_cgroup *memcg)
> +{
> + return memcg ? css_is_dying(&memcg->css) : false;
> +}
[ ... ]
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 13684e5376e82..d17d3810a882c 100644
[ ... ]
> @@ -1129,8 +1140,14 @@ split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags
> {
> struct deferred_split *queue;
>
> +retry:
> queue = memcg_split_queue(nid, memcg);
> spin_lock_irqsave(&queue->split_queue_lock, *flags);
> + if (unlikely(memcg_is_dying(memcg))) {
> + spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
> + memcg = parent_mem_cgroup(memcg);
> + goto retry;
> + }
>
> return queue;
> }
There appears to be a race window that can cause split_queue_len counter
mismatch. Between the time CSS_DYING is set (in kill_css()) and when
reparent_deferred_split_queue() actually runs (asynchronously in
css_offline), folios can be unqueued via __folio_unqueue_deferred_split().
In this scenario:
__folio_unqueue_deferred_split()
-> folio_split_queue_lock_irqsave()
-> split_queue_lock_irqsave() sees memcg_is_dying() == true
-> returns parent's queue (locked)
-> ds_queue->split_queue_len-- decrements parent's counter
-> list_del_init() removes folio from child's list
The folio is correctly removed from the child's list (list_del_init is
self-referential), but the wrong queue's counter is decremented. If the
parent queue was empty (split_queue_len == 0), this would cause an
unsigned long underflow to ULONG_MAX.
Could this be addressed by checking that the folio is actually in the
queue being unlocked, perhaps by verifying the list head matches before
decrementing split_queue_len?
[ ... ]
> @@ -3920,6 +3920,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
> zswap_memcg_offline_cleanup(memcg);
>
> memcg_offline_kmem(memcg);
> + reparent_deferred_split_queue(memcg);
The race window exists because CSS_DYING is set in kill_css() before
css_offline() callbacks run asynchronously. The comment in split_queue_lock
mentions that THPs will be "hidden from the shrinker side" during this
period, but does not address the counter accounting issue when folios
are unqueued.
On Tue, Jan 13, 2026 at 02:00:43PM -0800, Chris Mason wrote:
> On Mon, 10 Nov 2025 16:17:58 +0800 Qi Zheng <qi.zheng@linux.dev> wrote:
>
> > From: Qi Zheng <zhengqi.arch@bytedance.com>
> >
> > Similar to list_lru, the split queue is relatively independent and does
> > not need to be reparented along with objcg and LRU folios (holding
> > objcg lock and lru lock). So let's apply the similar mechanism as list_lru
> > to reparent the split queue separately when memcg is offine.
> >
>
> Hi everyone,
>
> I'm running all of the MM commits since v6.18 through the AI patch review
> tools, and this one was flagged. I'm much less confident in this report,
> AI tends to get these races wrong:
>
> [AI review output]
>
> > diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
> > index b0c6a4635c67b..cc6db20d7dca0 100644
> > --- a/include/linux/memcontrol.h
> > +++ b/include/linux/memcontrol.h
> > @@ -1775,6 +1775,12 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
> > bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
> >
> > void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
> > +
> > +static inline bool memcg_is_dying(struct mem_cgroup *memcg)
> > +{
> > + return memcg ? css_is_dying(&memcg->css) : false;
> > +}
>
> [ ... ]
>
> > diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> > index 13684e5376e82..d17d3810a882c 100644
>
> [ ... ]
>
> > @@ -1129,8 +1140,14 @@ split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags
> > {
> > struct deferred_split *queue;
> >
> > +retry:
> > queue = memcg_split_queue(nid, memcg);
> > spin_lock_irqsave(&queue->split_queue_lock, *flags);
> > + if (unlikely(memcg_is_dying(memcg))) {
> > + spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
> > + memcg = parent_mem_cgroup(memcg);
> > + goto retry;
> > + }
> >
> > return queue;
> > }
>
> There appears to be a race window that can cause split_queue_len counter
> mismatch. Between the time CSS_DYING is set (in kill_css()) and when
> reparent_deferred_split_queue() actually runs (asynchronously in
> css_offline), folios can be unqueued via __folio_unqueue_deferred_split().
>
> In this scenario:
> __folio_unqueue_deferred_split()
> -> folio_split_queue_lock_irqsave()
> -> split_queue_lock_irqsave() sees memcg_is_dying() == true
> -> returns parent's queue (locked)
> -> ds_queue->split_queue_len-- decrements parent's counter
> -> list_del_init() removes folio from child's list
>
> The folio is correctly removed from the child's list (list_del_init is
> self-referential), but the wrong queue's counter is decremented.
Good point. Sounds pretty possible to me?
I don't think there's anything that prevents it from unqueued
before it's reparented.
> If the parent queue was empty (split_queue_len == 0), this would cause an
> unsigned long underflow to ULONG_MAX.
Although the accounting mismatch will only persist until
reparent_deferred_split_queue() reparents the deferred split queue.
Ideally this should be fixed by checking if the folio has been
reparented after acquiring the split queue lock, but since we don't reparent
LRU pages yet ... do we need a band-aid before then?
Do we want to have is_dying property in the split queue as it was in v2? [1]
[1] https://lore.kernel.org/linux-mm/55370bda7b2df617033ac12116c1712144bb7591.1758618527.git.zhengqi.arch@bytedance.com
> Could this be addressed by checking that the folio is actually in the
> queue being unlocked, perhaps by verifying the list head matches before
> decrementing split_queue_len?
> [ ... ]
>
> > @@ -3920,6 +3920,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
> > zswap_memcg_offline_cleanup(memcg);
> >
> > memcg_offline_kmem(memcg);
> > + reparent_deferred_split_queue(memcg);
>
> The race window exists because CSS_DYING is set in kill_css() before
> css_offline() callbacks run asynchronously. The comment in split_queue_lock
> mentions that THPs will be "hidden from the shrinker side" during this
> period, but does not address the counter accounting issue when folios
> are unqueued.
--
Cheers,
Harry / Hyeonggon
On 1/14/26 1:36 PM, Harry Yoo wrote:
> On Tue, Jan 13, 2026 at 02:00:43PM -0800, Chris Mason wrote:
>> On Mon, 10 Nov 2025 16:17:58 +0800 Qi Zheng <qi.zheng@linux.dev> wrote:
>>
>>> From: Qi Zheng <zhengqi.arch@bytedance.com>
>>>
>>> Similar to list_lru, the split queue is relatively independent and does
>>> not need to be reparented along with objcg and LRU folios (holding
>>> objcg lock and lru lock). So let's apply the similar mechanism as list_lru
>>> to reparent the split queue separately when memcg is offine.
>>>
>>
>> Hi everyone,
>>
>> I'm running all of the MM commits since v6.18 through the AI patch review
>> tools, and this one was flagged. I'm much less confident in this report,
>> AI tends to get these races wrong:
>>
>> [AI review output]
>>
>>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>>> index b0c6a4635c67b..cc6db20d7dca0 100644
>>> --- a/include/linux/memcontrol.h
>>> +++ b/include/linux/memcontrol.h
>>> @@ -1775,6 +1775,12 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
>>> bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
>>>
>>> void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
>>> +
>>> +static inline bool memcg_is_dying(struct mem_cgroup *memcg)
>>> +{
>>> + return memcg ? css_is_dying(&memcg->css) : false;
>>> +}
>>
>> [ ... ]
>>
>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>> index 13684e5376e82..d17d3810a882c 100644
>>
>> [ ... ]
>>
>>> @@ -1129,8 +1140,14 @@ split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags
>>> {
>>> struct deferred_split *queue;
>>>
>>> +retry:
>>> queue = memcg_split_queue(nid, memcg);
>>> spin_lock_irqsave(&queue->split_queue_lock, *flags);
>>> + if (unlikely(memcg_is_dying(memcg))) {
>>> + spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
>>> + memcg = parent_mem_cgroup(memcg);
>>> + goto retry;
>>> + }
>>>
>>> return queue;
>>> }
>>
>> There appears to be a race window that can cause split_queue_len counter
>> mismatch. Between the time CSS_DYING is set (in kill_css()) and when
>> reparent_deferred_split_queue() actually runs (asynchronously in
>> css_offline), folios can be unqueued via __folio_unqueue_deferred_split().
>>
>> In this scenario:
>> __folio_unqueue_deferred_split()
>> -> folio_split_queue_lock_irqsave()
>> -> split_queue_lock_irqsave() sees memcg_is_dying() == true
>> -> returns parent's queue (locked)
>> -> ds_queue->split_queue_len-- decrements parent's counter
>> -> list_del_init() removes folio from child's list
>>
>> The folio is correctly removed from the child's list (list_del_init is
>> self-referential), but the wrong queue's counter is decremented.
>
> Good point. Sounds pretty possible to me?
>
> I don't think there's anything that prevents it from unqueued
> before it's reparented.
>
>> If the parent queue was empty (split_queue_len == 0), this would cause an
>> unsigned long underflow to ULONG_MAX.
>
> Although the accounting mismatch will only persist until
> reparent_deferred_split_queue() reparents the deferred split queue.
This period is very short, and the only thing affected should be
deferred_split_count(), but it does not cause a system error.
So I think maybe we can leave it unrepaired.
>
> Ideally this should be fixed by checking if the folio has been
> reparented after acquiring the split queue lock, but since we don't reparent
> LRU pages yet ... do we need a band-aid before then?
>
> Do we want to have is_dying property in the split queue as it was in v2? [1]
> [1] https://lore.kernel.org/linux-mm/55370bda7b2df617033ac12116c1712144bb7591.1758618527.git.zhengqi.arch@bytedance.com
>
>> Could this be addressed by checking that the folio is actually in the
>> queue being unlocked, perhaps by verifying the list head matches before
>> decrementing split_queue_len?
>
>> [ ... ]
>>
>>> @@ -3920,6 +3920,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
>>> zswap_memcg_offline_cleanup(memcg);
>>>
>>> memcg_offline_kmem(memcg);
>>> + reparent_deferred_split_queue(memcg);
>>
>> The race window exists because CSS_DYING is set in kill_css() before
>> css_offline() callbacks run asynchronously. The comment in split_queue_lock
>> mentions that THPs will be "hidden from the shrinker side" during this
>> period, but does not address the counter accounting issue when folios
>> are unqueued.
>
> On Jan 14, 2026, at 14:25, Qi Zheng <qi.zheng@linux.dev> wrote:
>
>
>
> On 1/14/26 1:36 PM, Harry Yoo wrote:
>> On Tue, Jan 13, 2026 at 02:00:43PM -0800, Chris Mason wrote:
>>> On Mon, 10 Nov 2025 16:17:58 +0800 Qi Zheng <qi.zheng@linux.dev> wrote:
>>>
>>>> From: Qi Zheng <zhengqi.arch@bytedance.com>
>>>>
>>>> Similar to list_lru, the split queue is relatively independent and does
>>>> not need to be reparented along with objcg and LRU folios (holding
>>>> objcg lock and lru lock). So let's apply the similar mechanism as list_lru
>>>> to reparent the split queue separately when memcg is offine.
>>>>
>>>
>>> Hi everyone,
>>>
>>> I'm running all of the MM commits since v6.18 through the AI patch review
>>> tools, and this one was flagged. I'm much less confident in this report,
>>> AI tends to get these races wrong:
>>>
>>> [AI review output]
>>>
>>>> diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
>>>> index b0c6a4635c67b..cc6db20d7dca0 100644
>>>> --- a/include/linux/memcontrol.h
>>>> +++ b/include/linux/memcontrol.h
>>>> @@ -1775,6 +1775,12 @@ static inline void count_objcg_events(struct obj_cgroup *objcg,
>>>> bool mem_cgroup_node_allowed(struct mem_cgroup *memcg, int nid);
>>>>
>>>> void mem_cgroup_show_protected_memory(struct mem_cgroup *memcg);
>>>> +
>>>> +static inline bool memcg_is_dying(struct mem_cgroup *memcg)
>>>> +{
>>>> + return memcg ? css_is_dying(&memcg->css) : false;
>>>> +}
>>>
>>> [ ... ]
>>>
>>>> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
>>>> index 13684e5376e82..d17d3810a882c 100644
>>>
>>> [ ... ]
>>>
>>>> @@ -1129,8 +1140,14 @@ split_queue_lock_irqsave(int nid, struct mem_cgroup *memcg, unsigned long *flags
>>>> {
>>>> struct deferred_split *queue;
>>>>
>>>> +retry:
>>>> queue = memcg_split_queue(nid, memcg);
>>>> spin_lock_irqsave(&queue->split_queue_lock, *flags);
>>>> + if (unlikely(memcg_is_dying(memcg))) {
>>>> + spin_unlock_irqrestore(&queue->split_queue_lock, *flags);
>>>> + memcg = parent_mem_cgroup(memcg);
>>>> + goto retry;
>>>> + }
>>>>
>>>> return queue;
>>>> }
>>>
>>> There appears to be a race window that can cause split_queue_len counter
>>> mismatch. Between the time CSS_DYING is set (in kill_css()) and when
>>> reparent_deferred_split_queue() actually runs (asynchronously in
>>> css_offline), folios can be unqueued via __folio_unqueue_deferred_split().
>>>
>>> In this scenario:
>>> __folio_unqueue_deferred_split()
>>> -> folio_split_queue_lock_irqsave()
>>> -> split_queue_lock_irqsave() sees memcg_is_dying() == true
>>> -> returns parent's queue (locked)
>>> -> ds_queue->split_queue_len-- decrements parent's counter
>>> -> list_del_init() removes folio from child's list
>>>
>>> The folio is correctly removed from the child's list (list_del_init is
>>> self-referential), but the wrong queue's counter is decremented.
>> Good point. Sounds pretty possible to me?
>> I don't think there's anything that prevents it from unqueued
>> before it's reparented.
>>> If the parent queue was empty (split_queue_len == 0), this would cause an
>>> unsigned long underflow to ULONG_MAX.
>> Although the accounting mismatch will only persist until
>> reparent_deferred_split_queue() reparents the deferred split queue.
>
> This period is very short, and the only thing affected should be
> deferred_split_count(), but it does not cause a system error.
>
> So I think maybe we can leave it unrepaired.
I didn’t look closely at the specific issue, but based on Qi’s point,
we can actually take a cue from list_lru_count_one and provide a similar
workaround to keep deferred_split_count from returning an enormous
value—see commit 41d17431df4aa.
>
>> Ideally this should be fixed by checking if the folio has been
>> reparented after acquiring the split queue lock, but since we don't reparent
>> LRU pages yet ... do we need a band-aid before then?
>> Do we want to have is_dying property in the split queue as it was in v2? [1]
>> [1] https://lore.kernel.org/linux-mm/55370bda7b2df617033ac12116c1712144bb7591.1758618527.git.zhengqi.arch@bytedance.com
>>> Could this be addressed by checking that the folio is actually in the
>>> queue being unlocked, perhaps by verifying the list head matches before
>>> decrementing split_queue_len?
>>> [ ... ]
>>>
>>>> @@ -3920,6 +3920,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
>>>> zswap_memcg_offline_cleanup(memcg);
>>>>
>>>> memcg_offline_kmem(memcg);
>>>> + reparent_deferred_split_queue(memcg);
>>>
>>> The race window exists because CSS_DYING is set in kill_css() before
>>> css_offline() callbacks run asynchronously. The comment in split_queue_lock
>>> mentions that THPs will be "hidden from the shrinker side" during this
>>> period, but does not address the counter accounting issue when folios
>>> are unqueued.
>
© 2016 - 2026 Red Hat, Inc.