kernel/exit.c | 68 ++++++++++++++++++++++++--------------------------- 1 file changed, 32 insertions(+), 36 deletions(-)
This reduces single-threaded overhead as it avoids one lock+irq trip on
exit.
It also improves scalability of spawning and killing threads within one
process (just shy of 5% when doing it on 24 cores on my test jig).
Both routines are moved below kcov and kmsan exit, which should be
harmless.
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
---
v3:
- move coredump_task_exit into synchronize_group_exit
v2:
- push the routines after kcov and kmsan
kernel/exit.c | 68 ++++++++++++++++++++++++---------------------------
1 file changed, 32 insertions(+), 36 deletions(-)
diff --git a/kernel/exit.c b/kernel/exit.c
index f97a2bbc9db9..5ffa56bcd659 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -412,44 +412,30 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
}
}
-static void coredump_task_exit(struct task_struct *tsk)
+static void coredump_task_exit(struct task_struct *tsk,
+ struct core_state *core_state)
{
- struct core_state *core_state;
+ struct core_thread self;
+ self.task = tsk;
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
/*
- * Serialize with any possible pending coredump.
- * We must hold siglock around checking core_state
- * and setting PF_POSTCOREDUMP. The core-inducing thread
- * will increment ->nr_threads for each thread in the
- * group without PF_POSTCOREDUMP set.
+ * Implies mb(), the result of xchg() must be visible
+ * to core_state->dumper.
*/
- spin_lock_irq(&tsk->sighand->siglock);
- tsk->flags |= PF_POSTCOREDUMP;
- core_state = tsk->signal->core_state;
- spin_unlock_irq(&tsk->sighand->siglock);
- if (core_state) {
- struct core_thread self;
-
- self.task = current;
- if (self.task->flags & PF_SIGNALED)
- self.next = xchg(&core_state->dumper.next, &self);
- else
- self.task = NULL;
- /*
- * Implies mb(), the result of xchg() must be visible
- * to core_state->dumper.
- */
- if (atomic_dec_and_test(&core_state->nr_threads))
- complete(&core_state->startup);
+ if (atomic_dec_and_test(&core_state->nr_threads))
+ complete(&core_state->startup);
- for (;;) {
- set_current_state(TASK_IDLE|TASK_FREEZABLE);
- if (!self.task) /* see coredump_finish() */
- break;
- schedule();
- }
- __set_current_state(TASK_RUNNING);
+ for (;;) {
+ set_current_state(TASK_IDLE|TASK_FREEZABLE);
+ if (!self.task) /* see coredump_finish() */
+ break;
+ schedule();
}
+ __set_current_state(TASK_RUNNING);
}
#ifdef CONFIG_MEMCG
@@ -877,6 +863,7 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
{
struct sighand_struct *sighand = tsk->sighand;
struct signal_struct *signal = tsk->signal;
+ struct core_state *core_state;
spin_lock_irq(&sighand->siglock);
signal->quick_threads--;
@@ -886,7 +873,19 @@ static void synchronize_group_exit(struct task_struct *tsk, long code)
signal->group_exit_code = code;
signal->group_stop_count = 0;
}
+ /*
+ * Serialize with any possible pending coredump.
+ * We must hold siglock around checking core_state
+ * and setting PF_POSTCOREDUMP. The core-inducing thread
+ * will increment ->nr_threads for each thread in the
+ * group without PF_POSTCOREDUMP set.
+ */
+ tsk->flags |= PF_POSTCOREDUMP;
+ core_state = signal->core_state;
spin_unlock_irq(&sighand->siglock);
+
+ if (unlikely(core_state))
+ coredump_task_exit(tsk, core_state);
}
void __noreturn do_exit(long code)
@@ -895,15 +894,12 @@ void __noreturn do_exit(long code)
int group_dead;
WARN_ON(irqs_disabled());
-
- synchronize_group_exit(tsk, code);
-
WARN_ON(tsk->plug);
kcov_task_exit(tsk);
kmsan_task_exit(tsk);
- coredump_task_exit(tsk);
+ synchronize_group_exit(tsk, code);
ptrace_event(PTRACE_EVENT_EXIT, code);
user_events_exit(tsk);
--
2.43.0
On 03/19, Mateusz Guzik wrote: > > This reduces single-threaded overhead as it avoids one lock+irq trip on > exit. > > It also improves scalability of spawning and killing threads within one > process (just shy of 5% when doing it on 24 cores on my test jig). > > Both routines are moved below kcov and kmsan exit, which should be > harmless. > > Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com>
I think this fell through the cracks. Can you pick it up? Should I prod someone else? thanks On Wed, Mar 19, 2025 at 9:11 PM Oleg Nesterov <oleg@redhat.com> wrote: > > On 03/19, Mateusz Guzik wrote: > > > > This reduces single-threaded overhead as it avoids one lock+irq trip on > > exit. > > > > It also improves scalability of spawning and killing threads within one > > process (just shy of 5% when doing it on 24 cores on my test jig). > > > > Both routines are moved below kcov and kmsan exit, which should be > > harmless. > > > > Signed-off-by: Mateusz Guzik <mjguzik@gmail.com> > > Reviewed-by: Oleg Nesterov <oleg@redhat.com> > -- Mateusz Guzik <mjguzik gmail.com>
© 2016 - 2025 Red Hat, Inc.