tools/perf/builtin-top.c | 8 +++ tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++ tools/perf/util/env.h | 1 + 3 files changed, 123 insertions(+)
Calling perf top with brach filters enabled on Intel hybrid CPU's
with branch counter event logging support results in a segfault.
$ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
perf: Segmentation fault
-------- backtrace --------
./perf() [0x55f460]
/lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
./perf() [0x57b4a7]
./perf() [0x561e5a]
./perf() [0x604a81]
./perf() [0x4395b5]
./perf() [0x601732]
./perf() [0x439bc1]
./perf() [0x5d35b3]
./perf() [0x43936c]
/lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
/lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
The cause is that perf_env__find_br_cntr_info tries to access a
null pointer pmu_caps in the perf_env struct. Presumably this would
also be an issue when using the cpu_pmu_caps structure available for
homogeneous core CPU's.
Fix this by populating cpu_pmu_caps and pmu_caps structures with
values from sysfs when calling perf top with branch stack sampling
enabled.
Signed-off-by: Thomas Falcon <thomas.falcon@intel.com>
---
tools/perf/builtin-top.c | 8 +++
tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 1 +
3 files changed, 123 insertions(+)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1061f4eebc3f..c2688e4ef3c4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1729,6 +1729,14 @@ int cmd_top(int argc, const char **argv)
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
+ if (opts->branch_stack) {
+ status = perf_env__read_core_pmu_caps(&perf_env);
+ if (status) {
+ pr_err("PMU capability data is not available\n");
+ goto out_delete_evlist;
+ }
+ }
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 36411749e007..37ed6dc52cf3 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -416,6 +416,120 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
return env->nr_cpus_avail ? 0 : -ENOENT;
}
+static int __perf_env__read_core_pmu_caps(struct perf_pmu *pmu,
+ int *nr_caps, char ***caps,
+ unsigned int *max_branches,
+ unsigned int *br_cntr_nr,
+ unsigned int *br_cntr_width)
+{
+ struct perf_pmu_caps *pcaps = NULL;
+ char *ptr, **tmp;
+ int ret = 0;
+
+ *nr_caps = 0;
+ *caps = NULL;
+
+ if (!pmu->nr_caps)
+ return 0;
+
+ *caps = zalloc(sizeof(char *) * pmu->nr_caps);
+ if (!*caps)
+ return -ENOMEM;
+
+ tmp = *caps;
+ list_for_each_entry(pcaps, &pmu->caps, list) {
+
+ if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ *tmp++ = ptr;
+
+ if (!strcmp(pcaps->name, "branches"))
+ *max_branches = atoi(pcaps->value);
+
+ if (!strcmp(pcaps->name, "branch_counter_nr"))
+ *br_cntr_nr = atoi(pcaps->value);
+
+ if (!strcmp(pcaps->name, "branch_counter_width"))
+ *br_cntr_width = atoi(pcaps->value);
+ }
+ *nr_caps = pmu->nr_caps;
+ return 0;
+error:
+ while (tmp-- != *caps)
+ free(*tmp);
+ free(*caps);
+ *caps = NULL;
+ *nr_caps = 0;
+ return ret;
+}
+
+int perf_env__read_core_pmu_caps(struct perf_env *env)
+{
+ struct perf_pmu *pmu = NULL;
+ struct pmu_caps *pmu_caps;
+ int nr_pmu = 0, i = 0, j;
+ int ret;
+
+ nr_pmu = perf_pmus__num_core_pmus();
+
+ if (!nr_pmu)
+ return -ENODEV;
+
+ if (nr_pmu == 1) {
+ pmu = perf_pmus__scan_core(NULL);
+ if (!pmu)
+ return -ENODEV;
+ ret = perf_pmu__caps_parse(pmu);
+ if (ret < 0)
+ return ret;
+ return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
+ &env->cpu_pmu_caps,
+ &env->max_branches,
+ &env->br_cntr_nr,
+ &env->br_cntr_width);
+ }
+
+ pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
+ if (!pmu_caps)
+ return -ENOMEM;
+
+ while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
+ if (perf_pmu__caps_parse(pmu) <= 0)
+ continue;
+ ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
+ &pmu_caps[i].caps,
+ &pmu_caps[i].max_branches,
+ &pmu_caps[i].br_cntr_nr,
+ &pmu_caps[i].br_cntr_width);
+ if (ret)
+ goto error;
+
+ pmu_caps[i].pmu_name = strdup(pmu->name);
+ if (!pmu_caps[i].pmu_name) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ i++;
+ }
+
+ env->nr_pmus_with_caps = nr_pmu;
+ env->pmu_caps = pmu_caps;
+
+ return 0;
+error:
+ for (i = 0; i < nr_pmu; i++) {
+ for (j = 0; j < pmu_caps[i].nr_caps; j++)
+ free(pmu_caps[i].caps[j]);
+ free(pmu_caps[i].caps);
+ free(pmu_caps[i].pmu_name);
+ }
+ free(pmu_caps);
+ return ret;
+}
+
const char *perf_env__raw_arch(struct perf_env *env)
{
return env && !perf_env__read_arch(env) ? env->arch : "unknown";
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d90e343cf1fa..135a1f714905 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -152,6 +152,7 @@ struct btf_node;
extern struct perf_env perf_env;
+int perf_env__read_core_pmu_caps(struct perf_env *env);
void perf_env__exit(struct perf_env *env);
int perf_env__kernel_is_64_bit(struct perf_env *env);
--
2.49.0
On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@intel.com> wrote:
>
> Calling perf top with brach filters enabled on Intel hybrid CPU's
> with branch counter event logging support results in a segfault.
>
> $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> perf: Segmentation fault
> -------- backtrace --------
> ./perf() [0x55f460]
> /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> ./perf() [0x57b4a7]
> ./perf() [0x561e5a]
> ./perf() [0x604a81]
> ./perf() [0x4395b5]
> ./perf() [0x601732]
> ./perf() [0x439bc1]
> ./perf() [0x5d35b3]
> ./perf() [0x43936c]
> /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
Thanks Thomas. Could you generate this backtrace in GDB? I did write a
patch to symbolize backtraces like this:
https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
Sadly without any reviewed tags and unmerged - the code calls routines
that malloc so it isn't strictly sound if say the backtrace was needed
from a SEGV in the malloc implementation, it is nicely
self-referencing the perf APIs, ..
> The cause is that perf_env__find_br_cntr_info tries to access a
> null pointer pmu_caps in the perf_env struct. Presumably this would
> also be an issue when using the cpu_pmu_caps structure available for
> homogeneous core CPU's.
I'm a little confused in the top code, we have the global perf_env
being used and one in the session:
https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/builtin-top.c?h=perf-tools-next#n649
```
ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
&top->session->header.env, !top->record_opts.overwrite);
```
and the global perf_env:
https://web.git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/builtin-top.c?h=perf-tools-next#n1641
```
status = perf_env__read_cpuid(&perf_env);
```
I kind of wish we didn't have the global one as what's the deal with
ownership with it.
> Fix this by populating cpu_pmu_caps and pmu_caps structures with
> values from sysfs when calling perf top with branch stack sampling
> enabled.
I wonder if we could encounter similar problems from say a perf script
handling live data and so some kind of lazy initialization should be
employed. It is hard to say without seeing the backtrace.
Thanks,
Ian
> Signed-off-by: Thomas Falcon <thomas.falcon@intel.com>
> ---
> tools/perf/builtin-top.c | 8 +++
> tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++
> tools/perf/util/env.h | 1 +
> 3 files changed, 123 insertions(+)
>
> diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
> index 1061f4eebc3f..c2688e4ef3c4 100644
> --- a/tools/perf/builtin-top.c
> +++ b/tools/perf/builtin-top.c
> @@ -1729,6 +1729,14 @@ int cmd_top(int argc, const char **argv)
> if (opts->branch_stack && callchain_param.enabled)
> symbol_conf.show_branchflag_count = true;
>
> + if (opts->branch_stack) {
> + status = perf_env__read_core_pmu_caps(&perf_env);
> + if (status) {
> + pr_err("PMU capability data is not available\n");
> + goto out_delete_evlist;
> + }
> + }
> +
> sort__mode = SORT_MODE__TOP;
> /* display thread wants entries to be collapsed in a different tree */
> perf_hpp_list.need_collapse = 1;
> diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> index 36411749e007..37ed6dc52cf3 100644
> --- a/tools/perf/util/env.c
> +++ b/tools/perf/util/env.c
> @@ -416,6 +416,120 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
> return env->nr_cpus_avail ? 0 : -ENOENT;
> }
>
> +static int __perf_env__read_core_pmu_caps(struct perf_pmu *pmu,
> + int *nr_caps, char ***caps,
> + unsigned int *max_branches,
> + unsigned int *br_cntr_nr,
> + unsigned int *br_cntr_width)
> +{
> + struct perf_pmu_caps *pcaps = NULL;
> + char *ptr, **tmp;
> + int ret = 0;
> +
> + *nr_caps = 0;
> + *caps = NULL;
> +
> + if (!pmu->nr_caps)
> + return 0;
> +
> + *caps = zalloc(sizeof(char *) * pmu->nr_caps);
> + if (!*caps)
> + return -ENOMEM;
> +
> + tmp = *caps;
> + list_for_each_entry(pcaps, &pmu->caps, list) {
> +
> + if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
> + ret = -ENOMEM;
> + goto error;
> + }
> +
> + *tmp++ = ptr;
> +
> + if (!strcmp(pcaps->name, "branches"))
> + *max_branches = atoi(pcaps->value);
> +
> + if (!strcmp(pcaps->name, "branch_counter_nr"))
> + *br_cntr_nr = atoi(pcaps->value);
> +
> + if (!strcmp(pcaps->name, "branch_counter_width"))
> + *br_cntr_width = atoi(pcaps->value);
> + }
> + *nr_caps = pmu->nr_caps;
> + return 0;
> +error:
> + while (tmp-- != *caps)
> + free(*tmp);
> + free(*caps);
> + *caps = NULL;
> + *nr_caps = 0;
> + return ret;
> +}
> +
> +int perf_env__read_core_pmu_caps(struct perf_env *env)
> +{
> + struct perf_pmu *pmu = NULL;
> + struct pmu_caps *pmu_caps;
> + int nr_pmu = 0, i = 0, j;
> + int ret;
> +
> + nr_pmu = perf_pmus__num_core_pmus();
> +
> + if (!nr_pmu)
> + return -ENODEV;
> +
> + if (nr_pmu == 1) {
> + pmu = perf_pmus__scan_core(NULL);
> + if (!pmu)
> + return -ENODEV;
> + ret = perf_pmu__caps_parse(pmu);
> + if (ret < 0)
> + return ret;
> + return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
> + &env->cpu_pmu_caps,
> + &env->max_branches,
> + &env->br_cntr_nr,
> + &env->br_cntr_width);
> + }
> +
> + pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
> + if (!pmu_caps)
> + return -ENOMEM;
> +
> + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
> + if (perf_pmu__caps_parse(pmu) <= 0)
> + continue;
> + ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
> + &pmu_caps[i].caps,
> + &pmu_caps[i].max_branches,
> + &pmu_caps[i].br_cntr_nr,
> + &pmu_caps[i].br_cntr_width);
> + if (ret)
> + goto error;
> +
> + pmu_caps[i].pmu_name = strdup(pmu->name);
> + if (!pmu_caps[i].pmu_name) {
> + ret = -ENOMEM;
> + goto error;
> + }
> + i++;
> + }
> +
> + env->nr_pmus_with_caps = nr_pmu;
> + env->pmu_caps = pmu_caps;
> +
> + return 0;
> +error:
> + for (i = 0; i < nr_pmu; i++) {
> + for (j = 0; j < pmu_caps[i].nr_caps; j++)
> + free(pmu_caps[i].caps[j]);
> + free(pmu_caps[i].caps);
> + free(pmu_caps[i].pmu_name);
> + }
> + free(pmu_caps);
> + return ret;
> +}
> +
> const char *perf_env__raw_arch(struct perf_env *env)
> {
> return env && !perf_env__read_arch(env) ? env->arch : "unknown";
> diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
> index d90e343cf1fa..135a1f714905 100644
> --- a/tools/perf/util/env.h
> +++ b/tools/perf/util/env.h
> @@ -152,6 +152,7 @@ struct btf_node;
>
> extern struct perf_env perf_env;
>
> +int perf_env__read_core_pmu_caps(struct perf_env *env);
> void perf_env__exit(struct perf_env *env);
>
> int perf_env__kernel_is_64_bit(struct perf_env *env);
> --
> 2.49.0
>
On Thu, 2025-05-01 at 13:00 -0700, Ian Rogers wrote:
> On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@intel.com> wrote:
> >
> > Calling perf top with brach filters enabled on Intel hybrid CPU's
> > with branch counter event logging support results in a segfault.
> >
> > $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> > frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> > perf: Segmentation fault
> > -------- backtrace --------
> > ./perf() [0x55f460]
> > /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> > ./perf() [0x57b4a7]
> > ./perf() [0x561e5a]
> > ./perf() [0x604a81]
> > ./perf() [0x4395b5]
> > ./perf() [0x601732]
> > ./perf() [0x439bc1]
> > ./perf() [0x5d35b3]
> > ./perf() [0x43936c]
> > /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> > /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
>
Hi Ian, thanks for reviewing.
> Thanks Thomas. Could you generate this backtrace in GDB? I did write a
> patch to symbolize backtraces like this:
> https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
> Sadly without any reviewed tags and unmerged - the code calls routines
> that malloc so it isn't strictly sound if say the backtrace was needed
> from a SEGV in the malloc implementation, it is nicely
> self-referencing the perf APIs, ..
Sorry about that, here is the backtrace I'm seeing when running the perf top command in gdb:
Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffcb7fe6c0 (LWP 812169)]
0x000000000057b4a7 in perf_env.find_br_cntr_info ()
(gdb) backtrace
#0 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
#1 0x0000000000561e5a in addr_map_symbol.account_cycles ()
#2 0x0000000000604a81 in hist.account_cycles ()
#3 0x00000000004395b5 in hist_iter.top_callback ()
#4 0x0000000000601732 in hist_entry_iter.add ()
#5 0x0000000000439bc1 in deliver_event ()
#6 0x00000000005d35b3 in __ordered_events__flush ()
#7 0x000000000043936c in process_thread ()
#8 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
#9 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
Thanks,
Tom
> ```
> status = perf_env__read_cpuid(&perf_env);
> ```
>
> I kind of wish we didn't have the global one as what's the deal with
> ownership with it.
>
> > Fix this by populating cpu_pmu_caps and pmu_caps structures with
> > values from sysfs when calling perf top with branch stack sampling
> > enabled.
>
> I wonder if we could encounter similar problems from say a perf script
> handling live data and so some kind of lazy initialization should be
> employed. It is hard to say without seeing the backtrace.
>
> Thanks,
> Ian
>
> > Signed-off-by: Thomas Falcon <thomas.falcon@intel.com>
> > ---
> > tools/perf/builtin-top.c | 8 +++
> > tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++
> > tools/perf/util/env.h | 1 +
> > 3 files changed, 123 insertions(+)
> >
> > diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
> > index 1061f4eebc3f..c2688e4ef3c4 100644
> > --- a/tools/perf/builtin-top.c
> > +++ b/tools/perf/builtin-top.c
> > @@ -1729,6 +1729,14 @@ int cmd_top(int argc, const char **argv)
> > if (opts->branch_stack && callchain_param.enabled)
> > symbol_conf.show_branchflag_count = true;
> >
> > + if (opts->branch_stack) {
> > + status = perf_env__read_core_pmu_caps(&perf_env);
> > + if (status) {
> > + pr_err("PMU capability data is not available\n");
> > + goto out_delete_evlist;
> > + }
> > + }
> > +
> > sort__mode = SORT_MODE__TOP;
> > /* display thread wants entries to be collapsed in a different tree */
> > perf_hpp_list.need_collapse = 1;
> > diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> > index 36411749e007..37ed6dc52cf3 100644
> > --- a/tools/perf/util/env.c
> > +++ b/tools/perf/util/env.c
> > @@ -416,6 +416,120 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
> > return env->nr_cpus_avail ? 0 : -ENOENT;
> > }
> >
> > +static int __perf_env__read_core_pmu_caps(struct perf_pmu *pmu,
> > + int *nr_caps, char ***caps,
> > + unsigned int *max_branches,
> > + unsigned int *br_cntr_nr,
> > + unsigned int *br_cntr_width)
> > +{
> > + struct perf_pmu_caps *pcaps = NULL;
> > + char *ptr, **tmp;
> > + int ret = 0;
> > +
> > + *nr_caps = 0;
> > + *caps = NULL;
> > +
> > + if (!pmu->nr_caps)
> > + return 0;
> > +
> > + *caps = zalloc(sizeof(char *) * pmu->nr_caps);
> > + if (!*caps)
> > + return -ENOMEM;
> > +
> > + tmp = *caps;
> > + list_for_each_entry(pcaps, &pmu->caps, list) {
> > +
> > + if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > +
> > + *tmp++ = ptr;
> > +
> > + if (!strcmp(pcaps->name, "branches"))
> > + *max_branches = atoi(pcaps->value);
> > +
> > + if (!strcmp(pcaps->name, "branch_counter_nr"))
> > + *br_cntr_nr = atoi(pcaps->value);
> > +
> > + if (!strcmp(pcaps->name, "branch_counter_width"))
> > + *br_cntr_width = atoi(pcaps->value);
> > + }
> > + *nr_caps = pmu->nr_caps;
> > + return 0;
> > +error:
> > + while (tmp-- != *caps)
> > + free(*tmp);
> > + free(*caps);
> > + *caps = NULL;
> > + *nr_caps = 0;
> > + return ret;
> > +}
> > +
> > +int perf_env__read_core_pmu_caps(struct perf_env *env)
> > +{
> > + struct perf_pmu *pmu = NULL;
> > + struct pmu_caps *pmu_caps;
> > + int nr_pmu = 0, i = 0, j;
> > + int ret;
> > +
> > + nr_pmu = perf_pmus__num_core_pmus();
> > +
> > + if (!nr_pmu)
> > + return -ENODEV;
> > +
> > + if (nr_pmu == 1) {
> > + pmu = perf_pmus__scan_core(NULL);
> > + if (!pmu)
> > + return -ENODEV;
> > + ret = perf_pmu__caps_parse(pmu);
> > + if (ret < 0)
> > + return ret;
> > + return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
> > + &env->cpu_pmu_caps,
> > + &env->max_branches,
> > + &env->br_cntr_nr,
> > + &env->br_cntr_width);
> > + }
> > +
> > + pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
> > + if (!pmu_caps)
> > + return -ENOMEM;
> > +
> > + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
> > + if (perf_pmu__caps_parse(pmu) <= 0)
> > + continue;
> > + ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
> > + &pmu_caps[i].caps,
> > + &pmu_caps[i].max_branches,
> > + &pmu_caps[i].br_cntr_nr,
> > + &pmu_caps[i].br_cntr_width);
> > + if (ret)
> > + goto error;
> > +
> > + pmu_caps[i].pmu_name = strdup(pmu->name);
> > + if (!pmu_caps[i].pmu_name) {
> > + ret = -ENOMEM;
> > + goto error;
> > + }
> > + i++;
> > + }
> > +
> > + env->nr_pmus_with_caps = nr_pmu;
> > + env->pmu_caps = pmu_caps;
> > +
> > + return 0;
> > +error:
> > + for (i = 0; i < nr_pmu; i++) {
> > + for (j = 0; j < pmu_caps[i].nr_caps; j++)
> > + free(pmu_caps[i].caps[j]);
> > + free(pmu_caps[i].caps);
> > + free(pmu_caps[i].pmu_name);
> > + }
> > + free(pmu_caps);
> > + return ret;
> > +}
> > +
> > const char *perf_env__raw_arch(struct perf_env *env)
> > {
> > return env && !perf_env__read_arch(env) ? env->arch : "unknown";
> > diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
> > index d90e343cf1fa..135a1f714905 100644
> > --- a/tools/perf/util/env.h
> > +++ b/tools/perf/util/env.h
> > @@ -152,6 +152,7 @@ struct btf_node;
> >
> > extern struct perf_env perf_env;
> >
> > +int perf_env__read_core_pmu_caps(struct perf_env *env);
> > void perf_env__exit(struct perf_env *env);
> >
> > int perf_env__kernel_is_64_bit(struct perf_env *env);
> > --
> > 2.49.0
> >
On Thu, 2025-05-01 at 20:46 +0000, Falcon, Thomas wrote:
> On Thu, 2025-05-01 at 13:00 -0700, Ian Rogers wrote:
> > On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@intel.com> wrote:
> > >
> > > Calling perf top with brach filters enabled on Intel hybrid CPU's
> > > with branch counter event logging support results in a segfault.
> > >
> > > $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> > > frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> > > perf: Segmentation fault
> > > -------- backtrace --------
> > > ./perf() [0x55f460]
> > > /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> > > ./perf() [0x57b4a7]
> > > ./perf() [0x561e5a]
> > > ./perf() [0x604a81]
> > > ./perf() [0x4395b5]
> > > ./perf() [0x601732]
> > > ./perf() [0x439bc1]
> > > ./perf() [0x5d35b3]
> > > ./perf() [0x43936c]
> > > /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> > > /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
> >
>
> Hi Ian, thanks for reviewing.
>
> > Thanks Thomas. Could you generate this backtrace in GDB? I did write a
> > patch to symbolize backtraces like this:
> > https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
> > Sadly without any reviewed tags and unmerged - the code calls routines
> > that malloc so it isn't strictly sound if say the backtrace was needed
> > from a SEGV in the malloc implementation, it is nicely
> > self-referencing the perf APIs, ..
>
> Sorry about that, here is the backtrace I'm seeing when running the perf top command in gdb:
>
> Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
>
> [Switching to Thread 0x7fffcb7fe6c0 (LWP 812169)]
> 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> (gdb) backtrace
> #0 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> #1 0x0000000000561e5a in addr_map_symbol.account_cycles ()
> #2 0x0000000000604a81 in hist.account_cycles ()
> #3 0x00000000004395b5 in hist_iter.top_callback ()
> #4 0x0000000000601732 in hist_entry_iter.add ()
> #5 0x0000000000439bc1 in deliver_event ()
> #6 0x00000000005d35b3 in __ordered_events__flush ()
> #7 0x000000000043936c in process_thread ()
> #8 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
> #9 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
>
> Thanks,
> Tom
>
Sorry, let me try this again...
Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
[Switching to Thread 0x7fffcf7fe6c0 (LWP 940046)]
perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
653 *width = env->cpu_pmu_caps ? env->br_cntr_width :
(gdb) bt
#0 perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
#1 0x00000000005ad829 in symbol__account_br_cntr (branch=0x7fffd11f9c00, evsel=0xfae480, offset=20, br_cntr=4) at util/annotate.c:345
#2 0x00000000005ada8b in symbol__account_cycles (addr=5580436, start=5580433, sym=0x7fffd00d3010, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:389
#3 0x00000000005adc06 in addr_map_symbol__account_cycles (ams=0x7fffd17b1e20, start=0x7fffd17b1f00, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:422
#4 0x0000000000688ab4 in hist__account_cycles (bs=0x10cbaa8, al=0x7fffcf7fd540, sample=0x7fffcf7fd760, nonany_branch_mode=false, total_cycles=0x0, evsel=0xfae480) at util/hist.c:2774
#5 0x0000000000446004 in hist_iter__top_callback (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, single=true, arg=0x7fffffff9de0) at builtin-top.c:737
#6 0x0000000000684d2a in hist_entry_iter__add (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, max_stack_depth=127, arg=0x7fffffff9de0) at util/hist.c:1291
#7 0x00000000004464fe in perf_event__process_sample (tool=0x7fffffff9de0, event=0x10cba70, evsel=0xfae480, sample=0x7fffcf7fd760, machine=0x105ec68) at builtin-top.c:845
#8 0x0000000000447523 in deliver_event (qe=0x7fffffffa0f8, qevent=0x10cdd60) at builtin-top.c:1211
#9 0x0000000000648aff in do_flush (oe=0x7fffffffa0f8, show_progress=false) at util/ordered-events.c:245
#10 0x0000000000648e56 in __ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP, timestamp=0) at util/ordered-events.c:324
#11 0x0000000000648f40 in ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP) at util/ordered-events.c:342
#12 0x0000000000447097 in process_thread (arg=0x7fffffff9de0) at builtin-top.c:1120
#13 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
#14 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
> > ```
> > status = perf_env__read_cpuid(&perf_env);
> > ```
> >
> > I kind of wish we didn't have the global one as what's the deal with
> > ownership with it.
> >
> > > Fix this by populating cpu_pmu_caps and pmu_caps structures with
> > > values from sysfs when calling perf top with branch stack sampling
> > > enabled.
> >
> > I wonder if we could encounter similar problems from say a perf script
> > handling live data and so some kind of lazy initialization should be
> > employed. It is hard to say without seeing the backtrace.
> >
> > Thanks,
> > Ian
> >
> > > Signed-off-by: Thomas Falcon <thomas.falcon@intel.com>
> > > ---
> > > tools/perf/builtin-top.c | 8 +++
> > > tools/perf/util/env.c | 114 +++++++++++++++++++++++++++++++++++++++
> > > tools/perf/util/env.h | 1 +
> > > 3 files changed, 123 insertions(+)
> > >
> > > diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
> > > index 1061f4eebc3f..c2688e4ef3c4 100644
> > > --- a/tools/perf/builtin-top.c
> > > +++ b/tools/perf/builtin-top.c
> > > @@ -1729,6 +1729,14 @@ int cmd_top(int argc, const char **argv)
> > > if (opts->branch_stack && callchain_param.enabled)
> > > symbol_conf.show_branchflag_count = true;
> > >
> > > + if (opts->branch_stack) {
> > > + status = perf_env__read_core_pmu_caps(&perf_env);
> > > + if (status) {
> > > + pr_err("PMU capability data is not available\n");
> > > + goto out_delete_evlist;
> > > + }
> > > + }
> > > +
> > > sort__mode = SORT_MODE__TOP;
> > > /* display thread wants entries to be collapsed in a different tree */
> > > perf_hpp_list.need_collapse = 1;
> > > diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
> > > index 36411749e007..37ed6dc52cf3 100644
> > > --- a/tools/perf/util/env.c
> > > +++ b/tools/perf/util/env.c
> > > @@ -416,6 +416,120 @@ static int perf_env__read_nr_cpus_avail(struct perf_env *env)
> > > return env->nr_cpus_avail ? 0 : -ENOENT;
> > > }
> > >
> > > +static int __perf_env__read_core_pmu_caps(struct perf_pmu *pmu,
> > > + int *nr_caps, char ***caps,
> > > + unsigned int *max_branches,
> > > + unsigned int *br_cntr_nr,
> > > + unsigned int *br_cntr_width)
> > > +{
> > > + struct perf_pmu_caps *pcaps = NULL;
> > > + char *ptr, **tmp;
> > > + int ret = 0;
> > > +
> > > + *nr_caps = 0;
> > > + *caps = NULL;
> > > +
> > > + if (!pmu->nr_caps)
> > > + return 0;
> > > +
> > > + *caps = zalloc(sizeof(char *) * pmu->nr_caps);
> > > + if (!*caps)
> > > + return -ENOMEM;
> > > +
> > > + tmp = *caps;
> > > + list_for_each_entry(pcaps, &pmu->caps, list) {
> > > +
> > > + if (asprintf(&ptr, "%s=%s", pcaps->name, pcaps->value) < 0) {
> > > + ret = -ENOMEM;
> > > + goto error;
> > > + }
> > > +
> > > + *tmp++ = ptr;
> > > +
> > > + if (!strcmp(pcaps->name, "branches"))
> > > + *max_branches = atoi(pcaps->value);
> > > +
> > > + if (!strcmp(pcaps->name, "branch_counter_nr"))
> > > + *br_cntr_nr = atoi(pcaps->value);
> > > +
> > > + if (!strcmp(pcaps->name, "branch_counter_width"))
> > > + *br_cntr_width = atoi(pcaps->value);
> > > + }
> > > + *nr_caps = pmu->nr_caps;
> > > + return 0;
> > > +error:
> > > + while (tmp-- != *caps)
> > > + free(*tmp);
> > > + free(*caps);
> > > + *caps = NULL;
> > > + *nr_caps = 0;
> > > + return ret;
> > > +}
> > > +
> > > +int perf_env__read_core_pmu_caps(struct perf_env *env)
> > > +{
> > > + struct perf_pmu *pmu = NULL;
> > > + struct pmu_caps *pmu_caps;
> > > + int nr_pmu = 0, i = 0, j;
> > > + int ret;
> > > +
> > > + nr_pmu = perf_pmus__num_core_pmus();
> > > +
> > > + if (!nr_pmu)
> > > + return -ENODEV;
> > > +
> > > + if (nr_pmu == 1) {
> > > + pmu = perf_pmus__scan_core(NULL);
> > > + if (!pmu)
> > > + return -ENODEV;
> > > + ret = perf_pmu__caps_parse(pmu);
> > > + if (ret < 0)
> > > + return ret;
> > > + return __perf_env__read_core_pmu_caps(pmu, &env->nr_cpu_pmu_caps,
> > > + &env->cpu_pmu_caps,
> > > + &env->max_branches,
> > > + &env->br_cntr_nr,
> > > + &env->br_cntr_width);
> > > + }
> > > +
> > > + pmu_caps = zalloc(sizeof(*pmu_caps) * nr_pmu);
> > > + if (!pmu_caps)
> > > + return -ENOMEM;
> > > +
> > > + while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
> > > + if (perf_pmu__caps_parse(pmu) <= 0)
> > > + continue;
> > > + ret = __perf_env__read_core_pmu_caps(pmu, &pmu_caps[i].nr_caps,
> > > + &pmu_caps[i].caps,
> > > + &pmu_caps[i].max_branches,
> > > + &pmu_caps[i].br_cntr_nr,
> > > + &pmu_caps[i].br_cntr_width);
> > > + if (ret)
> > > + goto error;
> > > +
> > > + pmu_caps[i].pmu_name = strdup(pmu->name);
> > > + if (!pmu_caps[i].pmu_name) {
> > > + ret = -ENOMEM;
> > > + goto error;
> > > + }
> > > + i++;
> > > + }
> > > +
> > > + env->nr_pmus_with_caps = nr_pmu;
> > > + env->pmu_caps = pmu_caps;
> > > +
> > > + return 0;
> > > +error:
> > > + for (i = 0; i < nr_pmu; i++) {
> > > + for (j = 0; j < pmu_caps[i].nr_caps; j++)
> > > + free(pmu_caps[i].caps[j]);
> > > + free(pmu_caps[i].caps);
> > > + free(pmu_caps[i].pmu_name);
> > > + }
> > > + free(pmu_caps);
> > > + return ret;
> > > +}
> > > +
> > > const char *perf_env__raw_arch(struct perf_env *env)
> > > {
> > > return env && !perf_env__read_arch(env) ? env->arch : "unknown";
> > > diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
> > > index d90e343cf1fa..135a1f714905 100644
> > > --- a/tools/perf/util/env.h
> > > +++ b/tools/perf/util/env.h
> > > @@ -152,6 +152,7 @@ struct btf_node;
> > >
> > > extern struct perf_env perf_env;
> > >
> > > +int perf_env__read_core_pmu_caps(struct perf_env *env);
> > > void perf_env__exit(struct perf_env *env);
> > >
> > > int perf_env__kernel_is_64_bit(struct perf_env *env);
> > > --
> > > 2.49.0
> > >
>
On Tue, May 06, 2025 at 04:09:37PM +0000, Falcon, Thomas wrote:
> On Thu, 2025-05-01 at 20:46 +0000, Falcon, Thomas wrote:
> > On Thu, 2025-05-01 at 13:00 -0700, Ian Rogers wrote:
> > > On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@intel.com> wrote:
> > > > Calling perf top with brach filters enabled on Intel hybrid CPU's
> > > > with branch counter event logging support results in a segfault.
> > > > $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> > > > frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> > > > perf: Segmentation fault
> > > > -------- backtrace --------
> > > > ./perf() [0x55f460]
> > > > /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> > > > ./perf() [0x57b4a7]
> > > > ./perf() [0x561e5a]
> > > > ./perf() [0x604a81]
> > > > ./perf() [0x4395b5]
> > > > ./perf() [0x601732]
> > > > ./perf() [0x439bc1]
> > > > ./perf() [0x5d35b3]
> > > > ./perf() [0x43936c]
> > > > /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> > > > /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
> > Hi Ian, thanks for reviewing.
> > > Thanks Thomas. Could you generate this backtrace in GDB? I did write a
> > > patch to symbolize backtraces like this:
> > > https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
> > > Sadly without any reviewed tags and unmerged - the code calls routines
> > > that malloc so it isn't strictly sound if say the backtrace was needed
> > > from a SEGV in the malloc implementation, it is nicely
> > > self-referencing the perf APIs, ..
> > Sorry about that, here is the backtrace I'm seeing when running the perf top command in gdb:
> > Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
> > [Switching to Thread 0x7fffcb7fe6c0 (LWP 812169)]
> > 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> > (gdb) backtrace
> > #0 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> > #1 0x0000000000561e5a in addr_map_symbol.account_cycles ()
> > #2 0x0000000000604a81 in hist.account_cycles ()
> > #3 0x00000000004395b5 in hist_iter.top_callback ()
> > #4 0x0000000000601732 in hist_entry_iter.add ()
> > #5 0x0000000000439bc1 in deliver_event ()
> > #6 0x00000000005d35b3 in __ordered_events__flush ()
> > #7 0x000000000043936c in process_thread ()
> > #8 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
> > #9 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
> Sorry, let me try this again...
> Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
> [Switching to Thread 0x7fffcf7fe6c0 (LWP 940046)]
> perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
> 653 *width = env->cpu_pmu_caps ? env->br_cntr_width :
> (gdb) bt
> #0 perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
> #1 0x00000000005ad829 in symbol__account_br_cntr (branch=0x7fffd11f9c00, evsel=0xfae480, offset=20, br_cntr=4) at util/annotate.c:345
> #2 0x00000000005ada8b in symbol__account_cycles (addr=5580436, start=5580433, sym=0x7fffd00d3010, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:389
> #3 0x00000000005adc06 in addr_map_symbol__account_cycles (ams=0x7fffd17b1e20, start=0x7fffd17b1f00, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:422
> #4 0x0000000000688ab4 in hist__account_cycles (bs=0x10cbaa8, al=0x7fffcf7fd540, sample=0x7fffcf7fd760, nonany_branch_mode=false, total_cycles=0x0, evsel=0xfae480) at util/hist.c:2774
> #5 0x0000000000446004 in hist_iter__top_callback (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, single=true, arg=0x7fffffff9de0) at builtin-top.c:737
> #6 0x0000000000684d2a in hist_entry_iter__add (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, max_stack_depth=127, arg=0x7fffffff9de0) at util/hist.c:1291
> #7 0x00000000004464fe in perf_event__process_sample (tool=0x7fffffff9de0, event=0x10cba70, evsel=0xfae480, sample=0x7fffcf7fd760, machine=0x105ec68) at builtin-top.c:845
> #8 0x0000000000447523 in deliver_event (qe=0x7fffffffa0f8, qevent=0x10cdd60) at builtin-top.c:1211
> #9 0x0000000000648aff in do_flush (oe=0x7fffffffa0f8, show_progress=false) at util/ordered-events.c:245
> #10 0x0000000000648e56 in __ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP, timestamp=0) at util/ordered-events.c:324
> #11 0x0000000000648f40 in ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP) at util/ordered-events.c:342
> #12 0x0000000000447097 in process_thread (arg=0x7fffffff9de0) at builtin-top.c:1120
> #13 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
> #14 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
I'll test on a 14700 later today, but on this one it is working:
root@x1:~# grep -m1 "model name" /proc/cpuinfo
model name : 13th Gen Intel(R) Core(TM) i7-1365U
root@x1:~# perf -vv
perf version 6.15.rc2.g8feafba59c51
aio: [ on ] # HAVE_AIO_SUPPORT
bpf: [ on ] # HAVE_LIBBPF_SUPPORT
bpf_skeletons: [ on ] # HAVE_BPF_SKEL
debuginfod: [ on ] # HAVE_DEBUGINFOD_SUPPORT
dwarf: [ on ] # HAVE_LIBDW_SUPPORT
dwarf_getlocations: [ on ] # HAVE_LIBDW_SUPPORT
dwarf-unwind: [ on ] # HAVE_DWARF_UNWIND_SUPPORT
auxtrace: [ on ] # HAVE_AUXTRACE_SUPPORT
libbfd: [ OFF ] # HAVE_LIBBFD_SUPPORT ( tip: Deprecated, license incompatibility, use BUILD_NONDISTRO=1 and install binutils-dev[el] )
libcapstone: [ on ] # HAVE_LIBCAPSTONE_SUPPORT
libcrypto: [ on ] # HAVE_LIBCRYPTO_SUPPORT
libdw-dwarf-unwind: [ on ] # HAVE_LIBDW_SUPPORT
libelf: [ on ] # HAVE_LIBELF_SUPPORT
libnuma: [ on ] # HAVE_LIBNUMA_SUPPORT
libopencsd: [ on ] # HAVE_CSTRACE_SUPPORT
libperl: [ on ] # HAVE_LIBPERL_SUPPORT
libpfm4: [ on ] # HAVE_LIBPFM
libpython: [ on ] # HAVE_LIBPYTHON_SUPPORT
libslang: [ on ] # HAVE_SLANG_SUPPORT
libtraceevent: [ on ] # HAVE_LIBTRACEEVENT
libunwind: [ OFF ] # HAVE_LIBUNWIND_SUPPORT ( tip: Deprecated, use LIBUNWIND=1 and install libunwind-dev[el] to build with it )
lzma: [ on ] # HAVE_LZMA_SUPPORT
numa_num_possible_cpus: [ on ] # HAVE_LIBNUMA_SUPPORT
zlib: [ on ] # HAVE_ZLIB_SUPPORT
zstd: [ on ] # HAVE_ZSTD_SUPPORT
root@x1:~# perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counte
With what is in perf-tools-next/perf-tools-next:
⬢ [acme@toolbx perf-tools-next]$ git log --oneline -10
8feafba59c510be3 (HEAD -> perf-tools-next, x1/perf-tools-next, x1/HEAD) perf test: Add direct off-cpu tests
9557c000768741bb perf record --off-cpu: Add --off-cpu-thresh option
74069a01609ef0f4 perf record --off-cpu: Dump the remaining PERF_SAMPLE_ in sample_type from BPF's stack trace map
8ae7a5769b0a3ac2 perf script: Display off-cpu samples correctly
7de1a87f1ee75743 perf record --off-cpu: Disable perf_event's callchain collection
7f8f56475d585117 perf evsel: Assemble off-cpu samples
d6948f2af24e04ea perf record --off-cpu: Dump off-cpu samples in BPF
282c195906c76ddf perf record --off-cpu: Preparation of off-cpu BPF program
0f72027bb9fb77a2 perf record --off-cpu: Parse off-cpu event
671e943452b18001 perf evsel: Expose evsel__is_offcpu_event() for future use
⬢ [acme@toolbx perf-tools-next]$
- Arnaldo
On Tue, 2025-05-06 at 13:56 -0300, Arnaldo Carvalho de Melo wrote:
> On Tue, May 06, 2025 at 04:09:37PM +0000, Falcon, Thomas wrote:
> > On Thu, 2025-05-01 at 20:46 +0000, Falcon, Thomas wrote:
> > > On Thu, 2025-05-01 at 13:00 -0700, Ian Rogers wrote:
> > > > On Thu, May 1, 2025 at 11:42 AM Thomas Falcon <thomas.falcon@intel.com> wrote:
> > > > > Calling perf top with brach filters enabled on Intel hybrid CPU's
> > > > > with branch counter event logging support results in a segfault.
>
> > > > > $ ./perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,\
> > > > > frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
> > > > > perf: Segmentation fault
> > > > > -------- backtrace --------
> > > > > ./perf() [0x55f460]
> > > > > /lib64/libc.so.6(+0x1a050) [0x7fd8be227050]
> > > > > ./perf() [0x57b4a7]
> > > > > ./perf() [0x561e5a]
> > > > > ./perf() [0x604a81]
> > > > > ./perf() [0x4395b5]
> > > > > ./perf() [0x601732]
> > > > > ./perf() [0x439bc1]
> > > > > ./perf() [0x5d35b3]
> > > > > ./perf() [0x43936c]
> > > > > /lib64/libc.so.6(+0x70ba8) [0x7fd8be27dba8]
> > > > > /lib64/libc.so.6(+0xf4b8c) [0x7fd8be301b8c]
>
> > > Hi Ian, thanks for reviewing.
>
> > > > Thanks Thomas. Could you generate this backtrace in GDB? I did write a
> > > > patch to symbolize backtraces like this:
> > > > https://lore.kernel.org/lkml/20250313052952.871958-2-irogers@google.com/
> > > > Sadly without any reviewed tags and unmerged - the code calls routines
> > > > that malloc so it isn't strictly sound if say the backtrace was needed
> > > > from a SEGV in the malloc implementation, it is nicely
> > > > self-referencing the perf APIs, ..
>
> > > Sorry about that, here is the backtrace I'm seeing when running the perf top command in gdb:
>
> > > Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
>
> > > [Switching to Thread 0x7fffcb7fe6c0 (LWP 812169)]
> > > 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> > > (gdb) backtrace
> > > #0 0x000000000057b4a7 in perf_env.find_br_cntr_info ()
> > > #1 0x0000000000561e5a in addr_map_symbol.account_cycles ()
> > > #2 0x0000000000604a81 in hist.account_cycles ()
> > > #3 0x00000000004395b5 in hist_iter.top_callback ()
> > > #4 0x0000000000601732 in hist_entry_iter.add ()
> > > #5 0x0000000000439bc1 in deliver_event ()
> > > #6 0x00000000005d35b3 in __ordered_events__flush ()
> > > #7 0x000000000043936c in process_thread ()
> > > #8 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
> > > #9 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
>
> > Sorry, let me try this again...
>
> > Thread 27 "perf" received signal SIGSEGV, Segmentation fault.
> > [Switching to Thread 0x7fffcf7fe6c0 (LWP 940046)]
> > perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
> > 653 *width = env->cpu_pmu_caps ? env->br_cntr_width :
> > (gdb) bt
> > #0 perf_env__find_br_cntr_info (env=0xf328c0 <perf_env>, nr=0x0, width=0x7fffcf7fd2c0) at util/env.c:653
> > #1 0x00000000005ad829 in symbol__account_br_cntr (branch=0x7fffd11f9c00, evsel=0xfae480, offset=20, br_cntr=4) at util/annotate.c:345
> > #2 0x00000000005ada8b in symbol__account_cycles (addr=5580436, start=5580433, sym=0x7fffd00d3010, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:389
> > #3 0x00000000005adc06 in addr_map_symbol__account_cycles (ams=0x7fffd17b1e20, start=0x7fffd17b1f00, cycles=1, evsel=0xfae480, br_cntr=4) at util/annotate.c:422
> > #4 0x0000000000688ab4 in hist__account_cycles (bs=0x10cbaa8, al=0x7fffcf7fd540, sample=0x7fffcf7fd760, nonany_branch_mode=false, total_cycles=0x0, evsel=0xfae480) at util/hist.c:2774
> > #5 0x0000000000446004 in hist_iter__top_callback (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, single=true, arg=0x7fffffff9de0) at builtin-top.c:737
> > #6 0x0000000000684d2a in hist_entry_iter__add (iter=0x7fffcf7fd590, al=0x7fffcf7fd540, max_stack_depth=127, arg=0x7fffffff9de0) at util/hist.c:1291
> > #7 0x00000000004464fe in perf_event__process_sample (tool=0x7fffffff9de0, event=0x10cba70, evsel=0xfae480, sample=0x7fffcf7fd760, machine=0x105ec68) at builtin-top.c:845
> > #8 0x0000000000447523 in deliver_event (qe=0x7fffffffa0f8, qevent=0x10cdd60) at builtin-top.c:1211
> > #9 0x0000000000648aff in do_flush (oe=0x7fffffffa0f8, show_progress=false) at util/ordered-events.c:245
> > #10 0x0000000000648e56 in __ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP, timestamp=0) at util/ordered-events.c:324
> > #11 0x0000000000648f40 in ordered_events__flush (oe=0x7fffffffa0f8, how=OE_FLUSH__TOP) at util/ordered-events.c:342
> > #12 0x0000000000447097 in process_thread (arg=0x7fffffff9de0) at builtin-top.c:1120
> > #13 0x00007ffff6e7dba8 in start_thread (arg=<optimized out>) at pthread_create.c:448
> > #14 0x00007ffff6f01b8c in __GI___clone3 () at ../sysdeps/unix/sysv/linux/x86_64/clone3.S:78
>
> I'll test on a 14700 later today, but on this one it is working:
>
> root@x1:~# grep -m1 "model name" /proc/cpuinfo
> model name : 13th Gen Intel(R) Core(TM) i7-1365U
> root@x1:~# perf -vv
> perf version 6.15.rc2.g8feafba59c51
> aio: [ on ] # HAVE_AIO_SUPPORT
> bpf: [ on ] # HAVE_LIBBPF_SUPPORT
> bpf_skeletons: [ on ] # HAVE_BPF_SKEL
> debuginfod: [ on ] # HAVE_DEBUGINFOD_SUPPORT
> dwarf: [ on ] # HAVE_LIBDW_SUPPORT
> dwarf_getlocations: [ on ] # HAVE_LIBDW_SUPPORT
> dwarf-unwind: [ on ] # HAVE_DWARF_UNWIND_SUPPORT
> auxtrace: [ on ] # HAVE_AUXTRACE_SUPPORT
> libbfd: [ OFF ] # HAVE_LIBBFD_SUPPORT ( tip: Deprecated, license incompatibility, use BUILD_NONDISTRO=1 and install binutils-dev[el] )
> libcapstone: [ on ] # HAVE_LIBCAPSTONE_SUPPORT
> libcrypto: [ on ] # HAVE_LIBCRYPTO_SUPPORT
> libdw-dwarf-unwind: [ on ] # HAVE_LIBDW_SUPPORT
> libelf: [ on ] # HAVE_LIBELF_SUPPORT
> libnuma: [ on ] # HAVE_LIBNUMA_SUPPORT
> libopencsd: [ on ] # HAVE_CSTRACE_SUPPORT
> libperl: [ on ] # HAVE_LIBPERL_SUPPORT
> libpfm4: [ on ] # HAVE_LIBPFM
> libpython: [ on ] # HAVE_LIBPYTHON_SUPPORT
> libslang: [ on ] # HAVE_SLANG_SUPPORT
> libtraceevent: [ on ] # HAVE_LIBTRACEEVENT
> libunwind: [ OFF ] # HAVE_LIBUNWIND_SUPPORT ( tip: Deprecated, use LIBUNWIND=1 and install libunwind-dev[el] to build with it )
> lzma: [ on ] # HAVE_LZMA_SUPPORT
> numa_num_possible_cpus: [ on ] # HAVE_LIBNUMA_SUPPORT
> zlib: [ on ] # HAVE_ZLIB_SUPPORT
> zstd: [ on ] # HAVE_ZSTD_SUPPORT
> root@x1:~# perf top -e '{cpu_core/cpu-cycles/,cpu_core/event=0xc6,umask=0x3,frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counte
>
I should have been clearer in my commit message but it requires PMU's with "branch_counter_nr" and
"branch_counter_width" capabilities to recreate.
$ ls /sys/devices/cpu_core/caps
branch_counter_nr branch_counter_width branches max_precise pmu_name
$ sudo perf top -e '{cpu_core/cpu-
cycles/,cpu_core/event=0xc6,umask=0x3,frontend=0x11,name=frontend_retired_dsb_miss/}' -j any,counter
perf: Segmentation fault
-------- backtrace --------
perf(+0xfa734) [0x561b5b19f734]
/lib64/libc.so.6(+0x1a050) [0x7faa77a27050]
perf(perf_env__find_br_cntr_info+0x77) [0x561b5b1b8377]
perf(addr_map_symbol__account_cycles+0x22e) [0x561b5b1a27de]
perf(hist__account_cycles+0x241) [0x561b5b23dae1]
perf(+0x2c459) [0x561b5b0d1459]
perf(hist_entry_iter__add+0xe2) [0x561b5b239a32]
perf(+0x2ca08) [0x561b5b0d1a08]
perf(+0x168373) [0x561b5b20d373]
perf(+0x2c214) [0x561b5b0d1214]
/lib64/libc.so.6(+0x71168) [0x7faa77a7e168]
/lib64/libc.so.6(+0xf514c) [0x7faa77b0214c]
Thanks,
Tom
> Add --off-cpu-thresh option
> 74069a01609ef0f4 perf record --off-cpu: Dump the remaining PERF_SAMPLE_ in sample_type from BPF's stack trace map
> 8ae7a5769b0a3ac2 perf script: Display off-cpu samples correctly
> 7de1a87f1ee75743 perf record --off-cpu: Disable perf_event's callchain collection
> 7f8f56475d585117 perf evsel: Assemble off-cpu samples
> d6948f2af24e04ea perf record --off-cpu: Dump off-cpu samples in BPF
> 282c195906c76ddf perf record --off-cpu: Preparation of off-cpu BPF program
> 0f72027bb9fb77a2 perf record --off-cpu: Parse off-cpu event
> 671e943452b18001 perf evsel: Expose evsel__is_offcpu_event() for future use
> ⬢ [acme@toolbx perf-tools-next]$
>
> - Arnaldo
© 2016 - 2026 Red Hat, Inc.