The work_atoms should be freed after use. Add free_work_atoms() to
make sure to release all. It should use list_splice_init() when merging
atoms to prevent accessing invalid pointers.
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/builtin-sched.c | 27 ++++++++++++++++++++++++---
1 file changed, 24 insertions(+), 3 deletions(-)
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index 087d4eaba5f7160d..4bbebd6ef2e4a791 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -1111,6 +1111,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
atoms->nb_atoms++;
}
+static void free_work_atoms(struct work_atoms *atoms)
+{
+ struct work_atom *atom, *tmp;
+
+ if (atoms == NULL)
+ return;
+
+ list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
+ list_del(&atom->list);
+ free(atom);
+ }
+ thread__zput(atoms->thread);
+ free(atoms);
+}
+
static int latency_switch_event(struct perf_sched *sched,
struct evsel *evsel,
struct perf_sample *sample,
@@ -3426,13 +3441,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
this->total_runtime += data->total_runtime;
this->nb_atoms += data->nb_atoms;
this->total_lat += data->total_lat;
- list_splice(&data->work_list, &this->work_list);
+ list_splice_init(&data->work_list, &this->work_list);
if (this->max_lat < data->max_lat) {
this->max_lat = data->max_lat;
this->max_lat_start = data->max_lat_start;
this->max_lat_end = data->max_lat_end;
}
- zfree(&data);
+ free_work_atoms(data);
return;
}
}
@@ -3511,7 +3526,6 @@ static int perf_sched__lat(struct perf_sched *sched)
work_list = rb_entry(next, struct work_atoms, node);
output_lat_thread(sched, work_list);
next = rb_next(next);
- thread__zput(work_list->thread);
}
printf(" -----------------------------------------------------------------------------------------------------------------\n");
@@ -3525,6 +3539,13 @@ static int perf_sched__lat(struct perf_sched *sched)
rc = 0;
+ while ((next = rb_first_cached(&sched->sorted_atom_root))) {
+ struct work_atoms *data;
+
+ data = rb_entry(next, struct work_atoms, node);
+ rb_erase_cached(next, &sched->sorted_atom_root);
+ free_work_atoms(data);
+ }
out_free_cpus_switch_event:
free_cpus_switch_event(sched);
return rc;
--
2.50.0.727.gbf7dc18ff4-goog
On Wed, Jul 2, 2025 at 6:49 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> The work_atoms should be freed after use. Add free_work_atoms() to
> make sure to release all. It should use list_splice_init() when merging
> atoms to prevent accessing invalid pointers.
>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Reviewed-by: Ian Rogers <irogers@google.com>
Thanks,
Ian
> ---
> tools/perf/builtin-sched.c | 27 ++++++++++++++++++++++++---
> 1 file changed, 24 insertions(+), 3 deletions(-)
>
> diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
> index 087d4eaba5f7160d..4bbebd6ef2e4a791 100644
> --- a/tools/perf/builtin-sched.c
> +++ b/tools/perf/builtin-sched.c
> @@ -1111,6 +1111,21 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
> atoms->nb_atoms++;
> }
>
> +static void free_work_atoms(struct work_atoms *atoms)
> +{
> + struct work_atom *atom, *tmp;
> +
> + if (atoms == NULL)
> + return;
> +
> + list_for_each_entry_safe(atom, tmp, &atoms->work_list, list) {
> + list_del(&atom->list);
> + free(atom);
> + }
> + thread__zput(atoms->thread);
> + free(atoms);
> +}
> +
> static int latency_switch_event(struct perf_sched *sched,
> struct evsel *evsel,
> struct perf_sample *sample,
> @@ -3426,13 +3441,13 @@ static void __merge_work_atoms(struct rb_root_cached *root, struct work_atoms *d
> this->total_runtime += data->total_runtime;
> this->nb_atoms += data->nb_atoms;
> this->total_lat += data->total_lat;
> - list_splice(&data->work_list, &this->work_list);
> + list_splice_init(&data->work_list, &this->work_list);
> if (this->max_lat < data->max_lat) {
> this->max_lat = data->max_lat;
> this->max_lat_start = data->max_lat_start;
> this->max_lat_end = data->max_lat_end;
> }
> - zfree(&data);
> + free_work_atoms(data);
> return;
> }
> }
> @@ -3511,7 +3526,6 @@ static int perf_sched__lat(struct perf_sched *sched)
> work_list = rb_entry(next, struct work_atoms, node);
> output_lat_thread(sched, work_list);
> next = rb_next(next);
> - thread__zput(work_list->thread);
> }
>
> printf(" -----------------------------------------------------------------------------------------------------------------\n");
> @@ -3525,6 +3539,13 @@ static int perf_sched__lat(struct perf_sched *sched)
>
> rc = 0;
>
> + while ((next = rb_first_cached(&sched->sorted_atom_root))) {
> + struct work_atoms *data;
> +
> + data = rb_entry(next, struct work_atoms, node);
> + rb_erase_cached(next, &sched->sorted_atom_root);
> + free_work_atoms(data);
> + }
> out_free_cpus_switch_event:
> free_cpus_switch_event(sched);
> return rc;
> --
> 2.50.0.727.gbf7dc18ff4-goog
>
© 2016 - 2026 Red Hat, Inc.