thread_group_cputime() does its own locking, we can safely shift
thread_group_cputime_adjusted() which does another for_each_thread loop
outside of ->siglock protected section.
Not only this removes for_each_thread() from the critical section with
irqs disabled, this removes another case when stats_lock is taken with
siglock held. We want to remove this dependency, then we can change the
users of stats_lock to not disable irqs.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
fs/proc/array.c | 10 ++++++----
1 file changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ff08a8957552..45ba91863808 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -511,7 +511,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
sigemptyset(&sigign);
sigemptyset(&sigcatch);
- cutime = cstime = utime = stime = 0;
+ cutime = cstime = 0;
cgtime = gtime = 0;
if (lock_task_sighand(task, &flags)) {
@@ -546,7 +546,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
min_flt += sig->min_flt;
maj_flt += sig->maj_flt;
- thread_group_cputime_adjusted(task, &utime, &stime);
gtime += sig->gtime;
if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
@@ -562,10 +561,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
if (permitted && (!whole || num_threads < 2))
wchan = !task_is_running(task);
- if (!whole) {
+
+ if (whole) {
+ thread_group_cputime_adjusted(task, &utime, &stime);
+ } else {
+ task_cputime_adjusted(task, &utime, &stime);
min_flt = task->min_flt;
maj_flt = task->maj_flt;
- task_cputime_adjusted(task, &utime, &stime);
gtime = task_gtime(task);
}
--
2.25.1.362.g51ebf55
On Tue, Jan 23, 2024 at 7:35 AM Oleg Nesterov <oleg@redhat.com> wrote:
>
> thread_group_cputime() does its own locking, we can safely shift
> thread_group_cputime_adjusted() which does another for_each_thread loop
> outside of ->siglock protected section.
>
> Not only this removes for_each_thread() from the critical section with
> irqs disabled, this removes another case when stats_lock is taken with
> siglock held. We want to remove this dependency, then we can change the
> users of stats_lock to not disable irqs.
>
> Signed-off-by: Oleg Nesterov <oleg@redhat.com>
> ---
> fs/proc/array.c | 10 ++++++----
> 1 file changed, 6 insertions(+), 4 deletions(-)
>
> diff --git a/fs/proc/array.c b/fs/proc/array.c
> index ff08a8957552..45ba91863808 100644
> --- a/fs/proc/array.c
> +++ b/fs/proc/array.c
> @@ -511,7 +511,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
>
> sigemptyset(&sigign);
> sigemptyset(&sigcatch);
> - cutime = cstime = utime = stime = 0;
> + cutime = cstime = 0;
> cgtime = gtime = 0;
>
> if (lock_task_sighand(task, &flags)) {
> @@ -546,7 +546,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
>
> min_flt += sig->min_flt;
> maj_flt += sig->maj_flt;
> - thread_group_cputime_adjusted(task, &utime, &stime);
> gtime += sig->gtime;
>
> if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
> @@ -562,10 +561,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
>
> if (permitted && (!whole || num_threads < 2))
> wchan = !task_is_running(task);
> - if (!whole) {
> +
> + if (whole) {
> + thread_group_cputime_adjusted(task, &utime, &stime);
> + } else {
> + task_cputime_adjusted(task, &utime, &stime);
> min_flt = task->min_flt;
> maj_flt = task->maj_flt;
> - task_cputime_adjusted(task, &utime, &stime);
> gtime = task_gtime(task);
> }
>
> --
> 2.25.1.362.g51ebf55
>
Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
© 2016 - 2025 Red Hat, Inc.