Set the perf_event map in BPF for dumping off-cpu samples.
Set the offcpu_thresh to specify the threshold.
Reviewed-by: Ian Rogers <irogers@google.com>
Signed-off-by: Howard Chu <howardchu95@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@linaro.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lore.kernel.org/r/20241108204137.2444151-5-howardchu95@gmail.com
[ Added some missing iteration variables to off_cpu_config() and fixed up
a manually edited patch hunk line boundary line ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
tools/perf/util/bpf_off_cpu.c | 25 +++++++++++++++++++++++++
tools/perf/util/bpf_skel/off_cpu.bpf.c | 11 +++++++++++
2 files changed, 36 insertions(+)
diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index 558c5e5c2dc3..61729a65b529 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -13,6 +13,7 @@
#include "util/cgroup.h"
#include "util/strlist.h"
#include <bpf/bpf.h>
+#include <internal/xyarray.h>
#include "bpf_skel/off_cpu.skel.h"
@@ -60,6 +61,9 @@ static int off_cpu_config(struct evlist *evlist)
static void off_cpu_start(void *arg)
{
struct evlist *evlist = arg;
+ struct evsel *evsel;
+ struct perf_cpu pcpu;
+ int i;
/* update task filter for the given workload */
if (skel->rodata->has_task && skel->rodata->uses_tgid &&
@@ -73,6 +77,25 @@ static void off_cpu_start(void *arg)
bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
}
+ /* update BPF perf_event map */
+ evsel = evlist__find_evsel_by_str(evlist, OFFCPU_EVENT);
+ if (evsel == NULL) {
+ pr_err("%s evsel not found\n", OFFCPU_EVENT);
+ return;
+ }
+
+ perf_cpu_map__for_each_cpu(pcpu, i, evsel->core.cpus) {
+ int err;
+
+ err = bpf_map__update_elem(skel->maps.offcpu_output, &pcpu.cpu, sizeof(__u32),
+ xyarray__entry(evsel->core.fd, i, 0),
+ sizeof(__u32), BPF_ANY);
+ if (err) {
+ pr_err("Failed to update perf event map for direct off-cpu dumping\n");
+ return;
+ }
+ }
+
skel->bss->enabled = 1;
}
@@ -261,6 +284,8 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target,
}
}
+ skel->bss->offcpu_thresh_ns = opts->off_cpu_thresh_us * 1000;
+
err = off_cpu_bpf__attach(skel);
if (err) {
pr_err("Failed to attach off-cpu BPF skeleton\n");
diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c
index c152116df72f..c87132e01eb3 100644
--- a/tools/perf/util/bpf_skel/off_cpu.bpf.c
+++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c
@@ -18,6 +18,8 @@
#define MAX_STACKS 32
#define MAX_ENTRIES 102400
+#define MAX_CPUS 4096
+
struct tstamp_data {
__u32 stack_id;
__u32 state;
@@ -39,6 +41,13 @@ struct {
__uint(max_entries, MAX_ENTRIES);
} stacks SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+ __uint(max_entries, MAX_CPUS);
+} offcpu_output SEC(".maps");
+
struct {
__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
__uint(map_flags, BPF_F_NO_PREALLOC);
@@ -97,6 +106,8 @@ const volatile bool uses_cgroup_v1 = false;
int perf_subsys_id = -1;
+__u64 offcpu_thresh_ns;
+
/*
* Old kernel used to call it task_struct->state and now it's '__state'.
* Use BPF CO-RE "ignored suffix rule" to deal with it like below:
--
2.43.0
On Tue, Nov 12, 2024 at 4:28 PM Howard Chu <howardchu95@gmail.com> wrote: > > Set the perf_event map in BPF for dumping off-cpu samples. > > Set the offcpu_thresh to specify the threshold. > > Reviewed-by: Ian Rogers <irogers@google.com> > Signed-off-by: Howard Chu <howardchu95@gmail.com> > Cc: Adrian Hunter <adrian.hunter@intel.com> > Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> > Cc: Ingo Molnar <mingo@redhat.com> > Cc: James Clark <james.clark@linaro.org> > Cc: Jiri Olsa <jolsa@kernel.org> > Cc: Kan Liang <kan.liang@linux.intel.com> > Cc: Mark Rutland <mark.rutland@arm.com> > Cc: Namhyung Kim <namhyung@kernel.org> > Cc: Peter Zijlstra <peterz@infradead.org> > Link: https://lore.kernel.org/r/20241108204137.2444151-5-howardchu95@gmail.com > [ Added some missing iteration variables to off_cpu_config() and fixed up > a manually edited patch hunk line boundary line ] > Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> > --- > tools/perf/util/bpf_off_cpu.c | 25 +++++++++++++++++++++++++ > tools/perf/util/bpf_skel/off_cpu.bpf.c | 11 +++++++++++ > 2 files changed, 36 insertions(+) > > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c > index 558c5e5c2dc3..61729a65b529 100644 > --- a/tools/perf/util/bpf_off_cpu.c > +++ b/tools/perf/util/bpf_off_cpu.c > @@ -13,6 +13,7 @@ > #include "util/cgroup.h" > #include "util/strlist.h" > #include <bpf/bpf.h> > +#include <internal/xyarray.h> > > #include "bpf_skel/off_cpu.skel.h" > > @@ -60,6 +61,9 @@ static int off_cpu_config(struct evlist *evlist) > static void off_cpu_start(void *arg) > { > struct evlist *evlist = arg; > + struct evsel *evsel; > + struct perf_cpu pcpu; > + int i; > > /* update task filter for the given workload */ > if (skel->rodata->has_task && skel->rodata->uses_tgid && > @@ -73,6 +77,25 @@ static void off_cpu_start(void *arg) > bpf_map_update_elem(fd, &pid, &val, BPF_ANY); > } > > + /* update BPF perf_event map */ > + evsel = evlist__find_evsel_by_str(evlist, OFFCPU_EVENT); > + if (evsel == NULL) { > + pr_err("%s evsel not found\n", OFFCPU_EVENT); > + return; > + } > + > + perf_cpu_map__for_each_cpu(pcpu, i, evsel->core.cpus) { > + int err; > + > + err = bpf_map__update_elem(skel->maps.offcpu_output, &pcpu.cpu, sizeof(__u32), > + xyarray__entry(evsel->core.fd, i, 0), > + sizeof(__u32), BPF_ANY); > + if (err) { > + pr_err("Failed to update perf event map for direct off-cpu dumping\n"); > + return; > + } > + } > + > skel->bss->enabled = 1; > } > > @@ -261,6 +284,8 @@ int off_cpu_prepare(struct evlist *evlist, struct target *target, > } > } > > + skel->bss->offcpu_thresh_ns = opts->off_cpu_thresh_us * 1000; Thanks for the suffixes, readability++. Ian > + > err = off_cpu_bpf__attach(skel); > if (err) { > pr_err("Failed to attach off-cpu BPF skeleton\n"); > diff --git a/tools/perf/util/bpf_skel/off_cpu.bpf.c b/tools/perf/util/bpf_skel/off_cpu.bpf.c > index c152116df72f..c87132e01eb3 100644 > --- a/tools/perf/util/bpf_skel/off_cpu.bpf.c > +++ b/tools/perf/util/bpf_skel/off_cpu.bpf.c > @@ -18,6 +18,8 @@ > #define MAX_STACKS 32 > #define MAX_ENTRIES 102400 > > +#define MAX_CPUS 4096 > + > struct tstamp_data { > __u32 stack_id; > __u32 state; > @@ -39,6 +41,13 @@ struct { > __uint(max_entries, MAX_ENTRIES); > } stacks SEC(".maps"); > > +struct { > + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); > + __uint(key_size, sizeof(__u32)); > + __uint(value_size, sizeof(__u32)); > + __uint(max_entries, MAX_CPUS); > +} offcpu_output SEC(".maps"); > + > struct { > __uint(type, BPF_MAP_TYPE_TASK_STORAGE); > __uint(map_flags, BPF_F_NO_PREALLOC); > @@ -97,6 +106,8 @@ const volatile bool uses_cgroup_v1 = false; > > int perf_subsys_id = -1; > > +__u64 offcpu_thresh_ns; > + > /* > * Old kernel used to call it task_struct->state and now it's '__state'. > * Use BPF CO-RE "ignored suffix rule" to deal with it like below: > -- > 2.43.0 >
© 2016 - 2024 Red Hat, Inc.