When reading a metric like memory bandwidth on multiple sockets, the
additional sockets will be on CPUS > 0. Because of the affinity
reading, the counters are read on CPU 0 along with the time, then the
later sockets are read. This can lead to the later sockets having a
bandwidth larger than is possible for the period of time. To avoid
this moving the reading of tool events to occur after all other events
are read.
Signed-off-by: Ian Rogers <irogers@google.com>
---
tools/perf/builtin-stat.c | 29 ++++++++++++++++++++++++++++-
tools/perf/util/evlist.c | 4 ----
2 files changed, 28 insertions(+), 5 deletions(-)
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 947f11b8b106..aec93b91fd11 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -379,6 +379,9 @@ static int read_counters_with_affinity(void)
if (evsel__is_bpf(counter))
continue;
+ if (evsel__is_tool(counter))
+ continue;
+
if (!counter->err)
counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
}
@@ -402,6 +405,24 @@ static int read_bpf_map_counters(void)
return 0;
}
+static int read_tool_counters(void)
+{
+ struct evsel *counter;
+
+ evlist__for_each_entry(evsel_list, counter) {
+ int idx;
+
+ if (!evsel__is_tool(counter))
+ continue;
+
+ perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
+ if (!counter->err)
+ counter->err = read_counter_cpu(counter, idx);
+ }
+ }
+ return 0;
+}
+
static int read_counters(void)
{
int ret;
@@ -415,7 +436,13 @@ static int read_counters(void)
return ret;
// Read non-BPF and non-tool counters next.
- return read_counters_with_affinity();
+ ret = read_counters_with_affinity();
+ if (ret)
+ return ret;
+
+ // Read the tool counters last. This way the duration_time counter
+ // should always be greater than any other counter's enabled time.
+ return read_tool_counters();
}
static void process_counters(void)
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index b6df81b8a236..fc3dae7cdfca 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -368,10 +368,6 @@ static bool evlist__use_affinity(struct evlist *evlist)
struct perf_cpu_map *used_cpus = NULL;
bool ret = false;
- /*
- * With perf record core.user_requested_cpus is usually NULL.
- * Use the old method to handle this for now.
- */
if (!evlist->core.user_requested_cpus ||
cpu_map__is_dummy(evlist->core.user_requested_cpus))
return false;
--
2.51.2.1041.gc1ab5b90ca-goog
On Thu, Nov 13, 2025 at 10:05:15AM -0800, Ian Rogers wrote:
> When reading a metric like memory bandwidth on multiple sockets, the
> additional sockets will be on CPUS > 0. Because of the affinity
> reading, the counters are read on CPU 0 along with the time, then the
> later sockets are read. This can lead to the later sockets having a
> bandwidth larger than is possible for the period of time. To avoid
> this moving the reading of tool events to occur after all other events
> are read.
Can you move this change before the affinity updates? I think it's
straight-forward and can be applied independently.
Thanks,
Namhyung
>
> Signed-off-by: Ian Rogers <irogers@google.com>
> ---
> tools/perf/builtin-stat.c | 29 ++++++++++++++++++++++++++++-
> tools/perf/util/evlist.c | 4 ----
> 2 files changed, 28 insertions(+), 5 deletions(-)
>
> diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> index 947f11b8b106..aec93b91fd11 100644
> --- a/tools/perf/builtin-stat.c
> +++ b/tools/perf/builtin-stat.c
> @@ -379,6 +379,9 @@ static int read_counters_with_affinity(void)
> if (evsel__is_bpf(counter))
> continue;
>
> + if (evsel__is_tool(counter))
> + continue;
> +
> if (!counter->err)
> counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
> }
> @@ -402,6 +405,24 @@ static int read_bpf_map_counters(void)
> return 0;
> }
>
> +static int read_tool_counters(void)
> +{
> + struct evsel *counter;
> +
> + evlist__for_each_entry(evsel_list, counter) {
> + int idx;
> +
> + if (!evsel__is_tool(counter))
> + continue;
> +
> + perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
> + if (!counter->err)
> + counter->err = read_counter_cpu(counter, idx);
> + }
> + }
> + return 0;
> +}
> +
> static int read_counters(void)
> {
> int ret;
> @@ -415,7 +436,13 @@ static int read_counters(void)
> return ret;
>
> // Read non-BPF and non-tool counters next.
> - return read_counters_with_affinity();
> + ret = read_counters_with_affinity();
> + if (ret)
> + return ret;
> +
> + // Read the tool counters last. This way the duration_time counter
> + // should always be greater than any other counter's enabled time.
> + return read_tool_counters();
> }
>
> static void process_counters(void)
> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> index b6df81b8a236..fc3dae7cdfca 100644
> --- a/tools/perf/util/evlist.c
> +++ b/tools/perf/util/evlist.c
> @@ -368,10 +368,6 @@ static bool evlist__use_affinity(struct evlist *evlist)
> struct perf_cpu_map *used_cpus = NULL;
> bool ret = false;
>
> - /*
> - * With perf record core.user_requested_cpus is usually NULL.
> - * Use the old method to handle this for now.
> - */
> if (!evlist->core.user_requested_cpus ||
> cpu_map__is_dummy(evlist->core.user_requested_cpus))
> return false;
> --
> 2.51.2.1041.gc1ab5b90ca-goog
>
On Mon, Nov 17, 2025 at 6:35 PM Namhyung Kim <namhyung@kernel.org> wrote:
>
> On Thu, Nov 13, 2025 at 10:05:15AM -0800, Ian Rogers wrote:
> > When reading a metric like memory bandwidth on multiple sockets, the
> > additional sockets will be on CPUS > 0. Because of the affinity
> > reading, the counters are read on CPU 0 along with the time, then the
> > later sockets are read. This can lead to the later sockets having a
> > bandwidth larger than is possible for the period of time. To avoid
> > this moving the reading of tool events to occur after all other events
> > are read.
>
> Can you move this change before the affinity updates? I think it's
> straight-forward and can be applied independently.
It is straightforward but will require changes to
read_counters_with_affinity. I can do a v5 once I know what to do with
the other patches.
Thanks,
Ian
> Thanks,
> Namhyung
>
> >
> > Signed-off-by: Ian Rogers <irogers@google.com>
> > ---
> > tools/perf/builtin-stat.c | 29 ++++++++++++++++++++++++++++-
> > tools/perf/util/evlist.c | 4 ----
> > 2 files changed, 28 insertions(+), 5 deletions(-)
> >
> > diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> > index 947f11b8b106..aec93b91fd11 100644
> > --- a/tools/perf/builtin-stat.c
> > +++ b/tools/perf/builtin-stat.c
> > @@ -379,6 +379,9 @@ static int read_counters_with_affinity(void)
> > if (evsel__is_bpf(counter))
> > continue;
> >
> > + if (evsel__is_tool(counter))
> > + continue;
> > +
> > if (!counter->err)
> > counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
> > }
> > @@ -402,6 +405,24 @@ static int read_bpf_map_counters(void)
> > return 0;
> > }
> >
> > +static int read_tool_counters(void)
> > +{
> > + struct evsel *counter;
> > +
> > + evlist__for_each_entry(evsel_list, counter) {
> > + int idx;
> > +
> > + if (!evsel__is_tool(counter))
> > + continue;
> > +
> > + perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
> > + if (!counter->err)
> > + counter->err = read_counter_cpu(counter, idx);
> > + }
> > + }
> > + return 0;
> > +}
> > +
> > static int read_counters(void)
> > {
> > int ret;
> > @@ -415,7 +436,13 @@ static int read_counters(void)
> > return ret;
> >
> > // Read non-BPF and non-tool counters next.
> > - return read_counters_with_affinity();
> > + ret = read_counters_with_affinity();
> > + if (ret)
> > + return ret;
> > +
> > + // Read the tool counters last. This way the duration_time counter
> > + // should always be greater than any other counter's enabled time.
> > + return read_tool_counters();
> > }
> >
> > static void process_counters(void)
> > diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> > index b6df81b8a236..fc3dae7cdfca 100644
> > --- a/tools/perf/util/evlist.c
> > +++ b/tools/perf/util/evlist.c
> > @@ -368,10 +368,6 @@ static bool evlist__use_affinity(struct evlist *evlist)
> > struct perf_cpu_map *used_cpus = NULL;
> > bool ret = false;
> >
> > - /*
> > - * With perf record core.user_requested_cpus is usually NULL.
> > - * Use the old method to handle this for now.
> > - */
> > if (!evlist->core.user_requested_cpus ||
> > cpu_map__is_dummy(evlist->core.user_requested_cpus))
> > return false;
> > --
> > 2.51.2.1041.gc1ab5b90ca-goog
> >
On Mon, Nov 17, 2025 at 08:38:15PM -0800, Ian Rogers wrote:
> On Mon, Nov 17, 2025 at 6:35 PM Namhyung Kim <namhyung@kernel.org> wrote:
> >
> > On Thu, Nov 13, 2025 at 10:05:15AM -0800, Ian Rogers wrote:
> > > When reading a metric like memory bandwidth on multiple sockets, the
> > > additional sockets will be on CPUS > 0. Because of the affinity
> > > reading, the counters are read on CPU 0 along with the time, then the
> > > later sockets are read. This can lead to the later sockets having a
> > > bandwidth larger than is possible for the period of time. To avoid
> > > this moving the reading of tool events to occur after all other events
> > > are read.
> >
> > Can you move this change before the affinity updates? I think it's
> > straight-forward and can be applied independently.
>
> It is straightforward but will require changes to
> read_counters_with_affinity. I can do a v5 once I know what to do with
> the other patches.
I plan to merge patch 1 to 7 first. And I think it'd good to have this
one as well.
Thanks,
Namhyung
> >
> > >
> > > Signed-off-by: Ian Rogers <irogers@google.com>
> > > ---
> > > tools/perf/builtin-stat.c | 29 ++++++++++++++++++++++++++++-
> > > tools/perf/util/evlist.c | 4 ----
> > > 2 files changed, 28 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
> > > index 947f11b8b106..aec93b91fd11 100644
> > > --- a/tools/perf/builtin-stat.c
> > > +++ b/tools/perf/builtin-stat.c
> > > @@ -379,6 +379,9 @@ static int read_counters_with_affinity(void)
> > > if (evsel__is_bpf(counter))
> > > continue;
> > >
> > > + if (evsel__is_tool(counter))
> > > + continue;
> > > +
> > > if (!counter->err)
> > > counter->err = read_counter_cpu(counter, evlist_cpu_itr.cpu_map_idx);
> > > }
> > > @@ -402,6 +405,24 @@ static int read_bpf_map_counters(void)
> > > return 0;
> > > }
> > >
> > > +static int read_tool_counters(void)
> > > +{
> > > + struct evsel *counter;
> > > +
> > > + evlist__for_each_entry(evsel_list, counter) {
> > > + int idx;
> > > +
> > > + if (!evsel__is_tool(counter))
> > > + continue;
> > > +
> > > + perf_cpu_map__for_each_idx(idx, counter->core.cpus) {
> > > + if (!counter->err)
> > > + counter->err = read_counter_cpu(counter, idx);
> > > + }
> > > + }
> > > + return 0;
> > > +}
> > > +
> > > static int read_counters(void)
> > > {
> > > int ret;
> > > @@ -415,7 +436,13 @@ static int read_counters(void)
> > > return ret;
> > >
> > > // Read non-BPF and non-tool counters next.
> > > - return read_counters_with_affinity();
> > > + ret = read_counters_with_affinity();
> > > + if (ret)
> > > + return ret;
> > > +
> > > + // Read the tool counters last. This way the duration_time counter
> > > + // should always be greater than any other counter's enabled time.
> > > + return read_tool_counters();
> > > }
> > >
> > > static void process_counters(void)
> > > diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> > > index b6df81b8a236..fc3dae7cdfca 100644
> > > --- a/tools/perf/util/evlist.c
> > > +++ b/tools/perf/util/evlist.c
> > > @@ -368,10 +368,6 @@ static bool evlist__use_affinity(struct evlist *evlist)
> > > struct perf_cpu_map *used_cpus = NULL;
> > > bool ret = false;
> > >
> > > - /*
> > > - * With perf record core.user_requested_cpus is usually NULL.
> > > - * Use the old method to handle this for now.
> > > - */
> > > if (!evlist->core.user_requested_cpus ||
> > > cpu_map__is_dummy(evlist->core.user_requested_cpus))
> > > return false;
> > > --
> > > 2.51.2.1041.gc1ab5b90ca-goog
> > >
© 2016 - 2026 Red Hat, Inc.