mm/oom_kill.c | 41 +++++++++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 10 deletions(-)
From: zhongjinji <zhongjinji@honor.com>
After merging the patch
https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u,
the OOM reaper runs less frequently because many processes exit within 2 seconds.
However, when a process is killed, timely handling by the OOM reaper allows
its memory to be freed faster.
Since relatively few processes use robust futex, delaying the OOM reaper for
all processes is undesirable, as many killed processes cannot release memory
more quickly.
This patch modifies the behavior so that only processes using robust futex
are delayed by the OOM reaper, allowing the OOM reaper to handle more
processes in a timely manner.
Signed-off-by: zhongjinji <zhongjinji@honor.com>
---
mm/oom_kill.c | 41 +++++++++++++++++++++++++++++++----------
1 file changed, 31 insertions(+), 10 deletions(-)
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 25923cfec9c6..3ecb21a1c870 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -30,6 +30,7 @@
#include <linux/syscalls.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
+#include <linux/futex.h>
#include <linux/cpuset.h>
#include <linux/export.h>
#include <linux/notifier.h>
@@ -692,7 +693,7 @@ static void wake_oom_reaper(struct timer_list *timer)
* before the exit path is able to wake the futex waiters.
*/
#define OOM_REAPER_DELAY (2*HZ)
-static void queue_oom_reaper(struct task_struct *tsk)
+static void queue_oom_reaper(struct task_struct *tsk, bool delay)
{
/* mm is already queued? */
if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
@@ -700,7 +701,7 @@ static void queue_oom_reaper(struct task_struct *tsk)
get_task_struct(tsk);
timer_setup(&tsk->oom_reaper_timer, wake_oom_reaper, 0);
- tsk->oom_reaper_timer.expires = jiffies + OOM_REAPER_DELAY;
+ tsk->oom_reaper_timer.expires = jiffies + (delay ? OOM_REAPER_DELAY : 0);
add_timer(&tsk->oom_reaper_timer);
}
@@ -742,7 +743,7 @@ static int __init oom_init(void)
}
subsys_initcall(oom_init)
#else
-static inline void queue_oom_reaper(struct task_struct *tsk)
+static inline void queue_oom_reaper(struct task_struct *tsk, bool delay)
{
}
#endif /* CONFIG_MMU */
@@ -871,11 +872,12 @@ static inline bool __task_will_free_mem(struct task_struct *task)
* Caller has to make sure that task->mm is stable (hold task_lock or
* it operates on the current).
*/
-static bool task_will_free_mem(struct task_struct *task)
+static bool task_will_free_mem(struct task_struct *task, bool *delay_reap)
{
struct mm_struct *mm = task->mm;
struct task_struct *p;
bool ret = true;
+ bool has_robust = !delay_reap;
/*
* Skip tasks without mm because it might have passed its exit_mm and
@@ -888,6 +890,15 @@ static bool task_will_free_mem(struct task_struct *task)
if (!__task_will_free_mem(task))
return false;
+ /*
+ * Check if a process is using robust futexes. If so, delay its handling by the
+ * OOM reaper. The reason is that if the owner of a robust futex lock is killed
+ * while waiters are still alive, the OOM reaper might free the robust futex
+ * resources before futex_cleanup runs, causing the waiters to wait indefinitely.
+ */
+ if (!has_robust)
+ has_robust = check_robust_futex(task);
+
/*
* This task has already been drained by the oom reaper so there are
* only small chances it will free some more
@@ -912,8 +923,12 @@ static bool task_will_free_mem(struct task_struct *task)
ret = __task_will_free_mem(p);
if (!ret)
break;
+ if (!has_robust)
+ has_robust = __check_robust_futex(p);
}
rcu_read_unlock();
+ if (delay_reap)
+ *delay_reap = has_robust;
return ret;
}
@@ -923,6 +938,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
struct task_struct *p;
struct mm_struct *mm;
bool can_oom_reap = true;
+ bool delay_reap;
p = find_lock_task_mm(victim);
if (!p) {
@@ -950,6 +966,7 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
* reserves from the user space under its control.
*/
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, victim, PIDTYPE_TGID);
+ delay_reap = check_robust_futex(victim);
mark_oom_victim(victim);
pr_err("%s: Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB, UID:%u pgtables:%lukB oom_score_adj:%hd\n",
message, task_pid_nr(victim), victim->comm, K(mm->total_vm),
@@ -990,11 +1007,13 @@ static void __oom_kill_process(struct task_struct *victim, const char *message)
if (unlikely(p->flags & PF_KTHREAD))
continue;
do_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_TGID);
+ if (!delay_reap)
+ delay_reap = __check_robust_futex(p);
}
rcu_read_unlock();
if (can_oom_reap)
- queue_oom_reaper(victim);
+ queue_oom_reaper(victim, delay_reap);
mmdrop(mm);
put_task_struct(victim);
@@ -1020,6 +1039,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
struct mem_cgroup *oom_group;
static DEFINE_RATELIMIT_STATE(oom_rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
+ bool delay_reap = false;
/*
* If the task is already exiting, don't alarm the sysadmin or kill
@@ -1027,9 +1047,9 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
* so it can die quickly
*/
task_lock(victim);
- if (task_will_free_mem(victim)) {
+ if (task_will_free_mem(victim, &delay_reap)) {
mark_oom_victim(victim);
- queue_oom_reaper(victim);
+ queue_oom_reaper(victim, delay_reap);
task_unlock(victim);
put_task_struct(victim);
return;
@@ -1112,6 +1132,7 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
bool out_of_memory(struct oom_control *oc)
{
unsigned long freed = 0;
+ bool delay_reap = false;
if (oom_killer_disabled)
return false;
@@ -1128,9 +1149,9 @@ bool out_of_memory(struct oom_control *oc)
* select it. The goal is to allow it to allocate so that it may
* quickly exit and free its memory.
*/
- if (task_will_free_mem(current)) {
+ if (task_will_free_mem(current, &delay_reap)) {
mark_oom_victim(current);
- queue_oom_reaper(current);
+ queue_oom_reaper(current, delay_reap);
return true;
}
@@ -1231,7 +1252,7 @@ SYSCALL_DEFINE2(process_mrelease, int, pidfd, unsigned int, flags)
mm = p->mm;
mmgrab(mm);
- if (task_will_free_mem(p))
+ if (task_will_free_mem(p, NULL))
reap = true;
else {
/* Error only if the work has not been done already */
--
2.17.1
On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: > From: zhongjinji <zhongjinji@honor.com> > > After merging the patch > https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u, > the OOM reaper runs less frequently because many processes exit within 2 seconds. > > However, when a process is killed, timely handling by the OOM reaper allows > its memory to be freed faster. > > Since relatively few processes use robust futex, delaying the OOM reaper for > all processes is undesirable, as many killed processes cannot release memory > more quickly. Could you elaborate more about why this is really needed? OOM should be a very slow path. Why do you care about this potential improvement in that situation? In other words what is the usecase? -- Michal Hocko SUSE Labs
>On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: >> From: zhongjinji <zhongjinji@honor.com> >> >> After merging the patch >> https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u, >> the OOM reaper runs less frequently because many processes exit within 2 seconds. >> >> However, when a process is killed, timely handling by the OOM reaper allows >> its memory to be freed faster. >> >> Since relatively few processes use robust futex, delaying the OOM reaper for >> all processes is undesirable, as many killed processes cannot release memory >> more quickly. > >Could you elaborate more about why this is really needed? OOM should be >a very slow path. Why do you care about this potential improvement in >that situation? In other words what is the usecase? Well, We are using the cgroup v1 freezer. When a frozen process is killed, it cannot exit immediately and is blocked in __refrigerator until it is thawed. When the process cannot be thawed in time, it will result in increased system memory pressure.
On Mon 04-08-25 19:50:37, zhongjinji wrote: > >On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: > >> From: zhongjinji <zhongjinji@honor.com> > >> > >> After merging the patch > >> https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u, > >> the OOM reaper runs less frequently because many processes exit within 2 seconds. > >> > >> However, when a process is killed, timely handling by the OOM reaper allows > >> its memory to be freed faster. > >> > >> Since relatively few processes use robust futex, delaying the OOM reaper for > >> all processes is undesirable, as many killed processes cannot release memory > >> more quickly. > > > >Could you elaborate more about why this is really needed? OOM should be > >a very slow path. Why do you care about this potential improvement in > >that situation? In other words what is the usecase? > > Well, We are using the cgroup v1 freezer. When a frozen process is > killed, it cannot exit immediately and is blocked in __refrigerator until > it is thawed. When the process cannot be thawed in time, it will result in > increased system memory pressure. This is an important information to be part of the changelog! It is also important to note why don't you care about processes that have robust mutexes. Is this purely a probabilistic improvement because those are less common? TBH I find this to be really hackish and justification based on cgroup v1 (which is considered legacy) doesn't make it particularly appealing. -- Michal Hocko SUSE Labs
On Mon 04-08-25 14:01:40, Michal Hocko wrote: > On Mon 04-08-25 19:50:37, zhongjinji wrote: > > >On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: > > >> From: zhongjinji <zhongjinji@honor.com> > > >> > > >> After merging the patch > > >> https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u, > > >> the OOM reaper runs less frequently because many processes exit within 2 seconds. > > >> > > >> However, when a process is killed, timely handling by the OOM reaper allows > > >> its memory to be freed faster. > > >> > > >> Since relatively few processes use robust futex, delaying the OOM reaper for > > >> all processes is undesirable, as many killed processes cannot release memory > > >> more quickly. > > > > > >Could you elaborate more about why this is really needed? OOM should be > > >a very slow path. Why do you care about this potential improvement in > > >that situation? In other words what is the usecase? > > > > Well, We are using the cgroup v1 freezer. When a frozen process is > > killed, it cannot exit immediately and is blocked in __refrigerator until > > it is thawed. When the process cannot be thawed in time, it will result in > > increased system memory pressure. > > This is an important information to be part of the changelog! It is also > important to note why don't you care about processes that have robust > mutexes. Is this purely a probabilistic improvement because those are > less common? > > TBH I find this to be really hackish and justification based on cgroup > v1 (which is considered legacy) doesn't make it particularly appealing. Btw. have you considered to simply not impose any delay for _all_ frozen tasks? -- Michal Hocko SUSE Labs
> On Mon 04-08-25 14:01:40, Michal Hocko wrote: > > On Mon 04-08-25 19:50:37, zhongjinji wrote: > > > >On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: > > > >> From: zhongjinji <zhongjinji@honor.com> > > > >> > > > >> After merging the patch > > > >> https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u, > > > >> the OOM reaper runs less frequently because many processes exit within 2 seconds. > > > >> > > > >> However, when a process is killed, timely handling by the OOM reaper allows > > > >> its memory to be freed faster. > > > >> > > > >> Since relatively few processes use robust futex, delaying the OOM reaper for > > > >> all processes is undesirable, as many killed processes cannot release memory > > > >> more quickly. > > > > > > > >Could you elaborate more about why this is really needed? OOM should be > > > >a very slow path. Why do you care about this potential improvement in > > > >that situation? In other words what is the usecase? > > > > > > Well, We are using the cgroup v1 freezer. When a frozen process is > > > killed, it cannot exit immediately and is blocked in __refrigerator until > > > it is thawed. When the process cannot be thawed in time, it will result in > > > increased system memory pressure. > > > > This is an important information to be part of the changelog! It is also > > important to note why don't you care about processes that have robust > > mutexes. Is this purely a probabilistic improvement because those are > > less common? > > > > TBH I find this to be really hackish and justification based on cgroup > > v1 (which is considered legacy) doesn't make it particularly appealing. > > Btw. have you considered to simply not impose any delay for _all_ frozen > tasks? Thank you, it seems like a good idea. I will try it.
>On Mon 04-08-25 19:50:37, zhongjinji wrote: >> >On Fri 01-08-25 23:36:49, zhongjinji@honor.com wrote: >> >> From: zhongjinji <zhongjinji@honor.com> >> >> >> >> After merging the patch >> >> https://lore.kernel.org/all/20220414144042.677008-1-npache@redhat.com/T/#u >> >> the OOM reaper runs less frequently because many processes exit within 2 seconds. >> >> >> >> However, when a process is killed, timely handling by the OOM reaper allows >> >> its memory to be freed faster. >> >> >> >> Since relatively few processes use robust futex, delaying the OOM reaper for >> >> all processes is undesirable, as many killed processes cannot release memory >> >> more quickly. >> > >> >Could you elaborate more about why this is really needed? OOM should be >> >a very slow path. Why do you care about this potential improvement in >> >that situation? In other words what is the usecase? >> >> Well, We are using the cgroup v1 freezer. When a frozen process is >> killed, it cannot exit immediately and is blocked in __refrigerator until >> it is thawed. When the process cannot be thawed in time, it will result in >> increased system memory pressure. > >This is an important information to be part of the changelog! It is also sorry, I will update those infos in next version. >important to note why don't you care about processes that have robust >mutexes. Is this purely a probabilistic improvement because those are >less common? Yes, My device runs Android. I added a log in futex_cleanup when a process has a robust list, But I have never seen any process on Android having robust mutexes. >TBH I find this to be really hackish and justification based on cgroup >v1 (which is considered legacy) doesn't make it particularly appealing. It seems hackish to check the robust_list during the oom kill, and it is also hard to see the relationship between the robust_list and the OOM killer from this change. However, it is indeed a simple way to decide whether to delay the oom reaper. Would it be better to use a function name like unreap_before_exit or unreap_before_all_exit instead of check_robust_futex?
© 2016 - 2025 Red Hat, Inc.