[PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes

zhongjinji@honor.com posted 3 patches 1 month, 3 weeks ago
[PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by zhongjinji@honor.com 1 month, 3 weeks ago
From: zhongjinji <zhongjinji@honor.com>

The OOM reaper can quickly reap a process's memory when the system encounters
OOM, helping the system recover. Without the OOM reaper, if a process frozen
by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
system stays in a poor state. Even if the process is not frozen by cgroup v1,
reaping victims' memory is still meaningful, because having one more process
working speeds up memory release.

When processes holding robust futexes are OOM killed but waiters on those
futexes remain alive, the robust futexes might be reaped before
futex_cleanup() runs. It would cause the waiters to block indefinitely.
To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
The OOM reaper now rarely runs since many killed processes exit within 2
seconds.

Because robust futex users are few, it is unreasonable to delay OOM reap for
all victims. For processes that do not hold robust futexes, the OOM reaper
should not be delayed and for processes holding robust futexes, the OOM
reaper must still be delayed to prevent the waiters to block indefinitely [1].

Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]

Signed-off-by: zhongjinji <zhongjinji@honor.com>
---
 mm/oom_kill.c | 51 ++++++++++++++++++++++++++++++++++++++-------------
 1 file changed, 38 insertions(+), 13 deletions(-)

diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 25923cfec9c6..7ae4001e47c1 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -39,6 +39,7 @@
 #include <linux/ptrace.h>
 #include <linux/freezer.h>
 #include <linux/ftrace.h>
+#include <linux/futex.h>
 #include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <linux/init.h>
@@ -692,7 +693,7 @@ static void wake_oom_reaper(struct timer_list *timer)
  * before the exit path is able to wake the futex waiters.
  */
 #define OOM_REAPER_DELAY (2*HZ)
-static void queue_oom_reaper(struct task_struct *tsk)
+static void queue_oom_reaper(struct task_struct *tsk, bool delay)
 {
 	/* mm is already queued? */
 	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
@@ -700,7 +701,7 @@ static void queue_oom_reaper(struct task_struct *tsk)
 
 	get_task_struct(tsk);
 	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
-	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+	tsk->oom_reaper_timer.expires = jiffies + (delay ? OOM_REAPER_DELAY : 0);
 	add_timer(&tsk->oom_reaper_timer);
 }
 
@@ -742,7 +743,7 @@ static int __init oom_init(void)
 }
 subsys_initcall(oom_init)
 #else
-static inline void queue_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk, bool delay)
 {
 }
 #endif /* CONFIG_MMU */
@@ -843,6 +844,16 @@ bool oom_killer_disable(signed long timeout)
 	return true;
 }
 
+/*
+ * If the owner thread of robust futexes is killed by OOM, the robust futexes might be freed
+ * by the OOM reaper before futex_cleanup() runs, which could cause the waiters to
+ * block indefinitely. So when the task hold robust futexes, delay oom reaper.
+ */
+static inline bool should_delay_oom_reap(struct task_struct *task)
+{
+	return process_has_robust_futex(task);
+}
+
 static inline bool __task_will_free_mem(struct task_struct *task)
 {
 	struct signal_struct *sig = task->signal;
@@ -865,17 +876,19 @@ static inline bool __task_will_free_mem(struct task_struct *task)
 }
 
 /*
- * Checks whether the given task is dying or exiting and likely to
- * release its address space. This means that all threads and processes
+ * Determine whether the given task should be reaped based on
+ * whether it is dying or exiting and likely to release its
+ * address space. This means that all threads and processes
  * sharing the same mm have to be killed or exiting.
  * Caller has to make sure that task->mm is stable (hold task_lock or
  * it operates on the current).
  */
-static bool task_will_free_mem(struct task_struct *task)
+static bool should_reap_task(struct task_struct *task, bool *delay_reap)
 {
 	struct mm_struct *mm = task->mm;
 	struct task_struct *p;
 	bool ret = true;
+	bool delay;
 
 	/*
 	 * Skip tasks without mm because it might have passed its exit_mm and
@@ -888,6 +901,8 @@ static bool task_will_free_mem(struct task_struct *task)
 	if (!__task_will_free_mem(task))
 		return false;
 
+	delay = should_delay_oom_reap(task);
+
 	/*
 	 * This task has already been drained by the oom reaper so there are
 	 * only small chances it will free some more
@@ -912,8 +927,11 @@ static bool task_will_free_mem(struct task_struct *task)
 		ret = __task_will_free_mem(p);
 		if (!ret)
 			break;
+		if (!delay)
+			delay = should_delay_oom_reap(p);
 	}
 	rcu_read_unlock();
+	*delay_reap = delay;
 
 	return ret;
 }
@@ -923,6 +941,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
 	struct task_struct *p;
 	struct mm_struct *mm;
 	bool can_oom_reap = true;
+	bool delay_reap;
 
 	p = find_lock_task_mm(victim);
 	if (!p) {
@@ -959,6 +978,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
 		from_kuid(&init_user_ns, task_uid(victim)),
 		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
 	task_unlock(victim);
+	delay_reap = should_delay_oom_reap(victim);
 
 	/*
 	 * Kill all user processes sharing victim->mm in other thread groups, if
@@ -990,11 +1010,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
 		if (unlikely(p->flags & PF_KTHREAD))
 			continue;
 		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
+		if (!delay_reap)
+			delay_reap = should_delay_oom_reap(p);
 	}
 	rcu_read_unlock();
 
 	if (can_oom_reap)
-		queue_oom_reaper(victim);
+		queue_oom_reaper(victim, delay_reap);
 
 	mmdrop(mm);
 	put_task_struct(victim);
@@ -1020,6 +1042,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 	struct mem_cgroup *oom_group;
 	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
 					      DEFAULT_RATELIMIT_BURST);
+	bool delay_reap = false;
 
 	/*
 	 * If the task is already exiting, don't alarm the sysadmin or kill
@@ -1027,9 +1050,9 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
 	 * so it can die quickly
 	 */
 	task_lock(victim);
-	if (task_will_free_mem(victim)) {
+	if (should_reap_task(victim, &delay_reap)) {
 		mark_oom_victim(victim);
-		queue_oom_reaper(victim);
+		queue_oom_reaper(victim, delay_reap);
 		task_unlock(victim);
 		put_task_struct(victim);
 		return;
@@ -1112,6 +1135,7 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
 bool out_of_memory(struct oom_control *oc)
 {
 	unsigned long freed = 0;
+	bool delay_reap = false;
 
 	if (oom_killer_disabled)
 		return false;
@@ -1128,9 +1152,9 @@ bool out_of_memory(struct oom_control *oc)
 	 * select it.  The goal is to allow it to allocate so that it may
 	 * quickly exit and free its memory.
 	 */
-	if (task_will_free_mem(current)) {
+	if (should_reap_task(current, &delay_reap)) {
 		mark_oom_victim(current);
-		queue_oom_reaper(current);
+		queue_oom_reaper(current, delay_reap);
 		return true;
 	}
 
@@ -1209,6 +1233,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
 	struct task_struct *p;
 	unsigned int f_flags;
 	bool reap = false;
+	bool delay_reap = false;
 	long ret = 0;
 
 	if (flags)
@@ -1231,7 +1256,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
 	mm = p->mm;
 	mmgrab(mm);
 
-	if (task_will_free_mem(p))
+	if (should_reap_task(p, &delay_reap))
 		reap = true;
 	else {
 		/* Error only if the work has not been done already */
@@ -1240,7 +1265,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
 	}
 	task_unlock(p);
 
-	if (!reap)
+	if (!reap || delay_reap)
 		goto drop_mm;
 
 	if (mmap_read_lock_killable(mm)) {
-- 
2.17.1
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Michal Hocko 1 month, 2 weeks ago
On Thu 14-08-25 21:55:54, zhongjinji@honor.com wrote:
> From: zhongjinji <zhongjinji@honor.com>
> 
> The OOM reaper can quickly reap a process's memory when the system encounters
> OOM, helping the system recover. Without the OOM reaper, if a process frozen
> by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
> system stays in a poor state. Even if the process is not frozen by cgroup v1,
> reaping victims' memory is still meaningful, because having one more process
> working speeds up memory release.
> 
> When processes holding robust futexes are OOM killed but waiters on those
> futexes remain alive, the robust futexes might be reaped before
> futex_cleanup() runs. It would cause the waiters to block indefinitely.
> To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
> The OOM reaper now rarely runs since many killed processes exit within 2
> seconds.
> 
> Because robust futex users are few, it is unreasonable to delay OOM reap for
> all victims. For processes that do not hold robust futexes, the OOM reaper
> should not be delayed and for processes holding robust futexes, the OOM
> reaper must still be delayed to prevent the waiters to block indefinitely [1].
> 
> Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]

What has happened to
https://lore.kernel.org/all/aJGiHyTXS_BqxoK2@tiehlicka/T/#u ?

Generally speaking it would be great to provide a link to previous
versions of the patchset. I do not see v3 in my inbox (which is quite
messy ATM so I might have easily missed it).
-- 
Michal Hocko
SUSE Labs
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by zhongjinji 1 month, 2 weeks ago
> On Thu 14-08-25 21:55:54, zhongjinji@honor.com wrote:
> > From: zhongjinji <zhongjinji@honor.com>
> > 
> > The OOM reaper can quickly reap a process's memory when the system encounters
> > OOM, helping the system recover. Without the OOM reaper, if a process frozen
> > by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
> > system stays in a poor state. Even if the process is not frozen by cgroup v1,
> > reaping victims' memory is still meaningful, because having one more process
> > working speeds up memory release.
> > 
> > When processes holding robust futexes are OOM killed but waiters on those
> > futexes remain alive, the robust futexes might be reaped before
> > futex_cleanup() runs. It would cause the waiters to block indefinitely.
> > To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
> > The OOM reaper now rarely runs since many killed processes exit within 2
> > seconds.
> > 
> > Because robust futex users are few, it is unreasonable to delay OOM reap for
> > all victims. For processes that do not hold robust futexes, the OOM reaper
> > should not be delayed and for processes holding robust futexes, the OOM
> > reaper must still be delayed to prevent the waiters to block indefinitely [1].
> > 
> > Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]
> 
> What has happened to
> https://lore.kernel.org/all/aJGiHyTXS_BqxoK2@tiehlicka/T/#u ?

If a process holding robust futexes gets frozen, robust futexes might be reaped before
futex_cleanup() runs when an OOM occurs. I am not sure if this will actually happen.

> 
> Generally speaking it would be great to provide a link to previous
> versions of the patchset. I do not see v3 in my inbox (which is quite
> messy ATM so I might have easily missed it).

This is version v3, where I mainly fixed the error in the Subject prefix,
changing it from futex to mm/oom_kill.

https://lore.kernel.org/all/20250804030341.18619-1-zhongjinji@honor.com/
https://lore.kernel.org/all/20250804030341.18619-2-zhongjinji@honor.com/

> -- 
> Michal Hocko
> SUSE Labs
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Michal Hocko 1 month, 2 weeks ago
On Mon 18-08-25 20:08:19, zhongjinji wrote:
> > On Thu 14-08-25 21:55:54, zhongjinji@honor.com wrote:
> > > From: zhongjinji <zhongjinji@honor.com>
> > > 
> > > The OOM reaper can quickly reap a process's memory when the system encounters
> > > OOM, helping the system recover. Without the OOM reaper, if a process frozen
> > > by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
> > > system stays in a poor state. Even if the process is not frozen by cgroup v1,
> > > reaping victims' memory is still meaningful, because having one more process
> > > working speeds up memory release.
> > > 
> > > When processes holding robust futexes are OOM killed but waiters on those
> > > futexes remain alive, the robust futexes might be reaped before
> > > futex_cleanup() runs. It would cause the waiters to block indefinitely.
> > > To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
> > > The OOM reaper now rarely runs since many killed processes exit within 2
> > > seconds.
> > > 
> > > Because robust futex users are few, it is unreasonable to delay OOM reap for
> > > all victims. For processes that do not hold robust futexes, the OOM reaper
> > > should not be delayed and for processes holding robust futexes, the OOM
> > > reaper must still be delayed to prevent the waiters to block indefinitely [1].
> > > 
> > > Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]
> > 
> > What has happened to
> > https://lore.kernel.org/all/aJGiHyTXS_BqxoK2@tiehlicka/T/#u ?
> 
> If a process holding robust futexes gets frozen, robust futexes might be reaped before
> futex_cleanup() runs when an OOM occurs. I am not sure if this will actually happen.

Yes, and 2s delay will never rule that out. Especially for frozen tasks
which could be frozen undefinitely. That is not the point I have tried
to make. I was suggesting not treating futex specially because no matter
what we do this will always be racy and a hack to reduce the risk. We
simply cannot deal with that case more gracefully without a major
surgery to the futex implementation which is not desirable for this
specific reason.

So instead to checking for futex which Thomas was not happy about too
let's just reap _frozen_/_freezing_ tasks right away as that makes at
least some sense and it also handles your primary problem AFAIU.

> > Generally speaking it would be great to provide a link to previous
> > versions of the patchset. I do not see v3 in my inbox (which is quite
> > messy ATM so I might have easily missed it).
> 
> This is version v3, where I mainly fixed the error in the Subject prefix,
> changing it from futex to mm/oom_kill.
> 
> https://lore.kernel.org/all/20250804030341.18619-1-zhongjinji@honor.com/
> https://lore.kernel.org/all/20250804030341.18619-2-zhongjinji@honor.com/

please always mention that in the cover letter.

Thanks.
-- 
Michal Hocko
SUSE Labs
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Davidlohr Bueso 1 month, 2 weeks ago
On Tue, 19 Aug 2025, Michal Hocko wrote:

>On Mon 18-08-25 20:08:19, zhongjinji wrote:
>> If a process holding robust futexes gets frozen, robust futexes might be reaped before
>> futex_cleanup() runs when an OOM occurs. I am not sure if this will actually happen.
>
>Yes, and 2s delay will never rule that out. Especially for frozen tasks
>which could be frozen undefinitely. That is not the point I have tried
>to make. I was suggesting not treating futex specially because no matter
>what we do this will always be racy and a hack to reduce the risk. We
>simply cannot deal with that case more gracefully without a major
>surgery to the futex implementation which is not desirable for this
>specific reason.

Yeah, relying on time as a fix is never a good idea. I was going to suggest
skipping the reaping for tasks with a robust list, but that still requires
the racy check, and your suggested workaround seems more practical.

Thanks,
Davidlohr
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Michal Hocko 1 month, 1 week ago
On Tue 19-08-25 19:53:08, Davidlohr Bueso wrote:
> On Tue, 19 Aug 2025, Michal Hocko wrote:
> 
> > On Mon 18-08-25 20:08:19, zhongjinji wrote:
> > > If a process holding robust futexes gets frozen, robust futexes might be reaped before
> > > futex_cleanup() runs when an OOM occurs. I am not sure if this will actually happen.
> > 
> > Yes, and 2s delay will never rule that out. Especially for frozen tasks
> > which could be frozen undefinitely. That is not the point I have tried
> > to make. I was suggesting not treating futex specially because no matter
> > what we do this will always be racy and a hack to reduce the risk. We
> > simply cannot deal with that case more gracefully without a major
> > surgery to the futex implementation which is not desirable for this
> > specific reason.
> 
> Yeah, relying on time as a fix is never a good idea. I was going to suggest
> skipping the reaping for tasks with a robust list, 

let me reiterate that the purpose of the oom reaper is not an oom
killing process optimization. It is crucial to guarantee a forward
progress on the OOM situation by a) async memory reclaim of the oom
victim and b) unblocking oom selection to a different process after a)
is done. That means that the victim cannot block the oom situation for
ever. Therefore we cannot really avoid tasks with robust futex or any
other user processes without achieving b) at the same time.

The current delay is something we can tune and still have b) in place.
Normal mode of operation is that the oom reaper has nothing really to do
and that is really a good thing.

-- 
Michal Hocko
SUSE Labs
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Davidlohr Bueso 1 month, 1 week ago
On Thu, 21 Aug 2025, Michal Hocko wrote:

>On Tue 19-08-25 19:53:08, Davidlohr Bueso wrote:
>> Yeah, relying on time as a fix is never a good idea. I was going to suggest
>> skipping the reaping for tasks with a robust list,
>
>let me reiterate that the purpose of the oom reaper is not an oom
>killing process optimization. It is crucial to guarantee a forward
>progress on the OOM situation by a) async memory reclaim of the oom
>victim and b) unblocking oom selection to a different process after a)
>is done. That means that the victim cannot block the oom situation for
>ever. Therefore we cannot really avoid tasks with robust futex or any
>other user processes without achieving b) at the same time.

Yes, which is why I indicated that skipping it was less practical.

In the real world, users that care enough to use robust futexes should
make sure that their application keep the OOM killer away altogether.

Thanks,
Davidlohr
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by Lorenzo Stoakes 1 month, 2 weeks ago
On Thu, Aug 14, 2025 at 09:55:54PM +0800, zhongjinji@honor.com wrote:
> From: zhongjinji <zhongjinji@honor.com>
>
> The OOM reaper can quickly reap a process's memory when the system encounters
> OOM, helping the system recover. Without the OOM reaper, if a process frozen
> by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
> system stays in a poor state. Even if the process is not frozen by cgroup v1,
> reaping victims' memory is still meaningful, because having one more process
> working speeds up memory release.
>
> When processes holding robust futexes are OOM killed but waiters on those
> futexes remain alive, the robust futexes might be reaped before
> futex_cleanup() runs. It would cause the waiters to block indefinitely.
> To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
> The OOM reaper now rarely runs since many killed processes exit within 2
> seconds.

God, I really don't love that that got merged. So arbitrary. Are futexes really
this broken?

>
> Because robust futex users are few, it is unreasonable to delay OOM reap for
> all victims. For processes that do not hold robust futexes, the OOM reaper
> should not be delayed and for processes holding robust futexes, the OOM
> reaper must still be delayed to prevent the waiters to block indefinitely [1].

I really hate that we do this :/

>
> Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]
>
> Signed-off-by: zhongjinji <zhongjinji@honor.com>



> ---
>  mm/oom_kill.c | 51 ++++++++++++++++++++++++++++++++++++++-------------
>  1 file changed, 38 insertions(+), 13 deletions(-)
>
> diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> index 25923cfec9c6..7ae4001e47c1 100644
> --- a/mm/oom_kill.c
> +++ b/mm/oom_kill.c
> @@ -39,6 +39,7 @@
>  #include <linux/ptrace.h>
>  #include <linux/freezer.h>
>  #include <linux/ftrace.h>
> +#include <linux/futex.h>
>  #include <linux/ratelimit.h>
>  #include <linux/kthread.h>
>  #include <linux/init.h>
> @@ -692,7 +693,7 @@ static void wake_oom_reaper(struct timer_list *timer)
>   * before the exit path is able to wake the futex waiters.
>   */
>  #define OOM_REAPER_DELAY (2*HZ)
> -static void queue_oom_reaper(struct task_struct *tsk)
> +static void queue_oom_reaper(struct task_struct *tsk, bool delay)
>  {
>  	/* mm is already queued? */
>  	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
> @@ -700,7 +701,7 @@ static void queue_oom_reaper(struct task_struct *tsk)
>
>  	get_task_struct(tsk);
>  	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
> -	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
> +	tsk->oom_reaper_timer.expires = jiffies + (delay ? OOM_REAPER_DELAY : 0);

Since this is predicated on the task_struct you have here, can we avoid all this
horrible threading of 'delay' and just check here?

>  	add_timer(&tsk->oom_reaper_timer);
>  }
>
> @@ -742,7 +743,7 @@ static int __init oom_init(void)
>  }
>  subsys_initcall(oom_init)
>  #else
> -static inline void queue_oom_reaper(struct task_struct *tsk)
> +static inline void queue_oom_reaper(struct task_struct *tsk, bool delay)
>  {
>  }
>  #endif /* CONFIG_MMU */
> @@ -843,6 +844,16 @@ bool oom_killer_disable(signed long timeout)
>  	return true;
>  }
>
> +/*
> + * If the owner thread of robust futexes is killed by OOM, the robust futexes might be freed
> + * by the OOM reaper before futex_cleanup() runs, which could cause the waiters to
> + * block indefinitely. So when the task hold robust futexes, delay oom reaper.
> + */
> +static inline bool should_delay_oom_reap(struct task_struct *task)
> +{
> +	return process_has_robust_futex(task);
> +}
> +
>  static inline bool __task_will_free_mem(struct task_struct *task)
>  {
>  	struct signal_struct *sig = task->signal;
> @@ -865,17 +876,19 @@ static inline bool __task_will_free_mem(struct task_struct *task)
>  }
>
>  /*
> - * Checks whether the given task is dying or exiting and likely to
> - * release its address space. This means that all threads and processes
> + * Determine whether the given task should be reaped based on
> + * whether it is dying or exiting and likely to release its
> + * address space. This means that all threads and processes
>   * sharing the same mm have to be killed or exiting.
>   * Caller has to make sure that task->mm is stable (hold task_lock or
>   * it operates on the current).
>   */
> -static bool task_will_free_mem(struct task_struct *task)
> +static bool should_reap_task(struct task_struct *task, bool *delay_reap)
>  {
>  	struct mm_struct *mm = task->mm;
>  	struct task_struct *p;
>  	bool ret = true;
> +	bool delay;
>
>  	/*
>  	 * Skip tasks without mm because it might have passed its exit_mm and
> @@ -888,6 +901,8 @@ static bool task_will_free_mem(struct task_struct *task)
>  	if (!__task_will_free_mem(task))
>  		return false;
>
> +	delay = should_delay_oom_reap(task);
> +
>  	/*
>  	 * This task has already been drained by the oom reaper so there are
>  	 * only small chances it will free some more
> @@ -912,8 +927,11 @@ static bool task_will_free_mem(struct task_struct *task)
>  		ret = __task_will_free_mem(p);
>  		if (!ret)
>  			break;
> +		if (!delay)
> +			delay = should_delay_oom_reap(p);
>  	}
>  	rcu_read_unlock();
> +	*delay_reap = delay;
>
>  	return ret;
>  }
> @@ -923,6 +941,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
>  	struct task_struct *p;
>  	struct mm_struct *mm;
>  	bool can_oom_reap = true;
> +	bool delay_reap;
>
>  	p = find_lock_task_mm(victim);
>  	if (!p) {
> @@ -959,6 +978,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
>  		from_kuid(&init_user_ns, task_uid(victim)),
>  		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
>  	task_unlock(victim);
> +	delay_reap = should_delay_oom_reap(victim);
>

Yeah I really think we can just simplify by testing this at the point where we
decide whether or not to do the horrible 2s thing.

Let's not try to 'generalise' this just yet.

>  	/*
>  	 * Kill all user processes sharing victim->mm in other thread groups, if
> @@ -990,11 +1010,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
>  		if (unlikely(p->flags & PF_KTHREAD))
>  			continue;
>  		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
> +		if (!delay_reap)
> +			delay_reap = should_delay_oom_reap(p);
>  	}
>  	rcu_read_unlock();
>
>  	if (can_oom_reap)
> -		queue_oom_reaper(victim);
> +		queue_oom_reaper(victim, delay_reap);
>
>  	mmdrop(mm);
>  	put_task_struct(victim);
> @@ -1020,6 +1042,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
>  	struct mem_cgroup *oom_group;
>  	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
>  					      DEFAULT_RATELIMIT_BURST);
> +	bool delay_reap = false;
>
>  	/*
>  	 * If the task is already exiting, don't alarm the sysadmin or kill
> @@ -1027,9 +1050,9 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
>  	 * so it can die quickly
>  	 */
>  	task_lock(victim);
> -	if (task_will_free_mem(victim)) {
> +	if (should_reap_task(victim, &delay_reap)) {
>  		mark_oom_victim(victim);
> -		queue_oom_reaper(victim);
> +		queue_oom_reaper(victim, delay_reap);
>  		task_unlock(victim);
>  		put_task_struct(victim);
>  		return;
> @@ -1112,6 +1135,7 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
>  bool out_of_memory(struct oom_control *oc)
>  {
>  	unsigned long freed = 0;
> +	bool delay_reap = false;
>
>  	if (oom_killer_disabled)
>  		return false;
> @@ -1128,9 +1152,9 @@ bool out_of_memory(struct oom_control *oc)
>  	 * select it.  The goal is to allow it to allocate so that it may
>  	 * quickly exit and free its memory.
>  	 */
> -	if (task_will_free_mem(current)) {
> +	if (should_reap_task(current, &delay_reap)) {
>  		mark_oom_victim(current);
> -		queue_oom_reaper(current);
> +		queue_oom_reaper(current, delay_reap);
>  		return true;
>  	}
>
> @@ -1209,6 +1233,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
>  	struct task_struct *p;
>  	unsigned int f_flags;
>  	bool reap = false;
> +	bool delay_reap = false;
>  	long ret = 0;
>
>  	if (flags)
> @@ -1231,7 +1256,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
>  	mm = p->mm;
>  	mmgrab(mm);
>
> -	if (task_will_free_mem(p))
> +	if (should_reap_task(p, &delay_reap))

You can figure out whether you dealyed or not from the task again right? No need
to thread this.

I don't think it's a big deal to check twice, we are in the OOM code path which
is not (or should not be... :) a hot path so we're good.

>  		reap = true;
>  	else {
>  		/* Error only if the work has not been done already */
> @@ -1240,7 +1265,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
>  	}
>  	task_unlock(p);
>
> -	if (!reap)
> +	if (!reap || delay_reap)
>  		goto drop_mm;
>
>  	if (mmap_read_lock_killable(mm)) {
> --
> 2.17.1
>
>
Re: [PATCH v4 2/3] mm/oom_kill: Only delay OOM reaper for processes using robust futexes
Posted by zhongjinji 1 month, 2 weeks ago
> On Thu, Aug 14, 2025 at 09:55:54PM +0800, zhongjinji@honor.com wrote:
> > From: zhongjinji <zhongjinji@honor.com>
> >
> > The OOM reaper can quickly reap a process's memory when the system encounters
> > OOM, helping the system recover. Without the OOM reaper, if a process frozen
> > by cgroup v1 is OOM killed, the victims' memory cannot be freed, and the
> > system stays in a poor state. Even if the process is not frozen by cgroup v1,
> > reaping victims' memory is still meaningful, because having one more process
> > working speeds up memory release.
> >
> > When processes holding robust futexes are OOM killed but waiters on those
> > futexes remain alive, the robust futexes might be reaped before
> > futex_cleanup() runs. It would cause the waiters to block indefinitely.
> > To prevent this issue, the OOM reaper's work is delayed by 2 seconds [1].
> > The OOM reaper now rarely runs since many killed processes exit within 2
> > seconds.
> 
> God, I really don't love that that got merged. So arbitrary. Are futexes really
> this broken?
> 
> >
> > Because robust futex users are few, it is unreasonable to delay OOM reap for
> > all victims. For processes that do not hold robust futexes, the OOM reaper
> > should not be delayed and for processes holding robust futexes, the OOM
> > reaper must still be delayed to prevent the waiters to block indefinitely [1].
> 
> I really hate that we do this :/
> 
> >
> > Link: https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u [1]
> >
> > Signed-off-by: zhongjinji <zhongjinji@honor.com>
> 
> 
> 
> > ---
> >  mm/oom_kill.c | 51 ++++++++++++++++++++++++++++++++++++++-------------
> >  1 file changed, 38 insertions(+), 13 deletions(-)
> >
> > diff --git a/mm/oom_kill.c b/mm/oom_kill.c
> > index 25923cfec9c6..7ae4001e47c1 100644
> > --- a/mm/oom_kill.c
> > +++ b/mm/oom_kill.c
> > @@ -39,6 +39,7 @@
> >  #include <linux/ptrace.h>
> >  #include <linux/freezer.h>
> >  #include <linux/ftrace.h>
> > +#include <linux/futex.h>
> >  #include <linux/ratelimit.h>
> >  #include <linux/kthread.h>
> >  #include <linux/init.h>
> > @@ -692,7 +693,7 @@ static void wake_oom_reaper(struct timer_list *timer)
> >   * before the exit path is able to wake the futex waiters.
> >   */
> >  #define OOM_REAPER_DELAY (2*HZ)
> > -static void queue_oom_reaper(struct task_struct *tsk)
> > +static void queue_oom_reaper(struct task_struct *tsk, bool delay)
> >  {
> >  	/* mm is already queued? */
> >  	if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
> > @@ -700,7 +701,7 @@ static void queue_oom_reaper(struct task_struct *tsk)
> >
> >  	get_task_struct(tsk);
> >  	timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
> > -	tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
> > +	tsk->oom_reaper_timer.expires = jiffies + (delay ? OOM_REAPER_DELAY : 0);
> 
> Since this is predicated on the task_struct you have here, can we avoid all this
> horrible threading of 'delay' and just check here?

Yeah, It is great! I will update it in next version.

> 
> >  	add_timer(&tsk->oom_reaper_timer);
> >  }
> >
> > @@ -742,7 +743,7 @@ static int __init oom_init(void)
> >  }
> >  subsys_initcall(oom_init)
> >  #else
> > -static inline void queue_oom_reaper(struct task_struct *tsk)
> > +static inline void queue_oom_reaper(struct task_struct *tsk, bool delay)
> >  {
> >  }
> >  #endif /* CONFIG_MMU */
> > @@ -843,6 +844,16 @@ bool oom_killer_disable(signed long timeout)
> >  	return true;
> >  }
> >
> > +/*
> > + * If the owner thread of robust futexes is killed by OOM, the robust futexes might be freed
> > + * by the OOM reaper before futex_cleanup() runs, which could cause the waiters to
> > + * block indefinitely. So when the task hold robust futexes, delay oom reaper.
> > + */
> > +static inline bool should_delay_oom_reap(struct task_struct *task)
> > +{
> > +	return process_has_robust_futex(task);
> > +}
> > +
> >  static inline bool __task_will_free_mem(struct task_struct *task)
> >  {
> >  	struct signal_struct *sig = task->signal;
> > @@ -865,17 +876,19 @@ static inline bool __task_will_free_mem(struct task_struct *task)
> >  }
> >
> >  /*
> > - * Checks whether the given task is dying or exiting and likely to
> > - * release its address space. This means that all threads and processes
> > + * Determine whether the given task should be reaped based on
> > + * whether it is dying or exiting and likely to release its
> > + * address space. This means that all threads and processes
> >   * sharing the same mm have to be killed or exiting.
> >   * Caller has to make sure that task->mm is stable (hold task_lock or
> >   * it operates on the current).
> >   */
> > -static bool task_will_free_mem(struct task_struct *task)
> > +static bool should_reap_task(struct task_struct *task, bool *delay_reap)
> >  {
> >  	struct mm_struct *mm = task->mm;
> >  	struct task_struct *p;
> >  	bool ret = true;
> > +	bool delay;
> >
> >  	/*
> >  	 * Skip tasks without mm because it might have passed its exit_mm and
> > @@ -888,6 +901,8 @@ static bool task_will_free_mem(struct task_struct *task)
> >  	if (!__task_will_free_mem(task))
> >  		return false;
> >
> > +	delay = should_delay_oom_reap(task);
> > +
> >  	/*
> >  	 * This task has already been drained by the oom reaper so there are
> >  	 * only small chances it will free some more
> > @@ -912,8 +927,11 @@ static bool task_will_free_mem(struct task_struct *task)
> >  		ret = __task_will_free_mem(p);
> >  		if (!ret)
> >  			break;
> > +		if (!delay)
> > +			delay = should_delay_oom_reap(p);
> >  	}
> >  	rcu_read_unlock();
> > +	*delay_reap = delay;
> >
> >  	return ret;
> >  }
> > @@ -923,6 +941,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
> >  	struct task_struct *p;
> >  	struct mm_struct *mm;
> >  	bool can_oom_reap = true;
> > +	bool delay_reap;
> >
> >  	p = find_lock_task_mm(victim);
> >  	if (!p) {
> > @@ -959,6 +978,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
> >  		from_kuid(&init_user_ns, task_uid(victim)),
> >  		mm_pgtables_bytes(mm) >> 10, victim->signal->oom_score_adj);
> >  	task_unlock(victim);
> > +	delay_reap = should_delay_oom_reap(victim);
> >
> 
> Yeah I really think we can just simplify by testing this at the point where we
> decide whether or not to do the horrible 2s thing.
> 
> Let's not try to 'generalise' this just yet.
> 
> >  	/*
> >  	 * Kill all user processes sharing victim->mm in other thread groups, if
> > @@ -990,11 +1010,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
> >  		if (unlikely(p->flags & PF_KTHREAD))
> >  			continue;
> >  		do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
> > +		if (!delay_reap)
> > +			delay_reap = should_delay_oom_reap(p);
> >  	}
> >  	rcu_read_unlock();
> >
> >  	if (can_oom_reap)
> > -		queue_oom_reaper(victim);
> > +		queue_oom_reaper(victim, delay_reap);
> >
> >  	mmdrop(mm);
> >  	put_task_struct(victim);
> > @@ -1020,6 +1042,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
> >  	struct mem_cgroup *oom_group;
> >  	static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
> >  					      DEFAULT_RATELIMIT_BURST);
> > +	bool delay_reap = false;
> >
> >  	/*
> >  	 * If the task is already exiting, don't alarm the sysadmin or kill
> > @@ -1027,9 +1050,9 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
> >  	 * so it can die quickly
> >  	 */
> >  	task_lock(victim);
> > -	if (task_will_free_mem(victim)) {
> > +	if (should_reap_task(victim, &delay_reap)) {
> >  		mark_oom_victim(victim);
> > -		queue_oom_reaper(victim);
> > +		queue_oom_reaper(victim, delay_reap);
> >  		task_unlock(victim);
> >  		put_task_struct(victim);
> >  		return;
> > @@ -1112,6 +1135,7 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
> >  bool out_of_memory(struct oom_control *oc)
> >  {
> >  	unsigned long freed = 0;
> > +	bool delay_reap = false;
> >
> >  	if (oom_killer_disabled)
> >  		return false;
> > @@ -1128,9 +1152,9 @@ bool out_of_memory(struct oom_control *oc)
> >  	 * select it.  The goal is to allow it to allocate so that it may
> >  	 * quickly exit and free its memory.
> >  	 */
> > -	if (task_will_free_mem(current)) {
> > +	if (should_reap_task(current, &delay_reap)) {
> >  		mark_oom_victim(current);
> > -		queue_oom_reaper(current);
> > +		queue_oom_reaper(current, delay_reap);
> >  		return true;
> >  	}
> >
> > @@ -1209,6 +1233,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
> >  	struct task_struct *p;
> >  	unsigned int f_flags;
> >  	bool reap = false;
> > +	bool delay_reap = false;
> >  	long ret = 0;
> >
> >  	if (flags)
> > @@ -1231,7 +1256,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
> >  	mm = p->mm;
> >  	mmgrab(mm);
> >
> > -	if (task_will_free_mem(p))
> > +	if (should_reap_task(p, &delay_reap))
> 
> You can figure out whether you dealyed or not from the task again right? No need
> to thread this.
> 
> I don't think it's a big deal to check twice, we are in the OOM code path which
> is not (or should not be... :) a hot path so we're good.
> 
> >  		reap = true;
> >  	else {
> >  		/* Error only if the work has not been done already */
> > @@ -1240,7 +1265,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
> >  	}
> >  	task_unlock(p);
> >
> > -	if (!reap)
> > +	if (!reap || delay_reap)
> >  		goto drop_mm;
> >
> >  	if (mmap_read_lock_killable(mm)) {
> > --
> > 2.17.1
> >
> >