[PATCH v7 09/10] perf record --off-cpu: Dump the remaining samples in BPF's stack trace map

Howard Chu posted 10 patches 2 weeks, 1 day ago
There is a newer version of this series
[PATCH v7 09/10] perf record --off-cpu: Dump the remaining samples in BPF's stack trace map
Posted by Howard Chu 2 weeks, 1 day ago
Dump the remaining samples, as if it is dumping a direct sample.

Put the stack trace, tid, off-cpu time and cgroup id into the raw_data
section, just like a direct off-cpu sample coming from BPF's
bpf_perf_event_output().

This ensures that evsel__parse_sample() correctly parses both direct
samples and accumulated samples.

Suggested-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Howard Chu <howardchu95@gmail.com>
---
 tools/perf/util/bpf_off_cpu.c | 62 +++++++++++++++++++++--------------
 1 file changed, 38 insertions(+), 24 deletions(-)

diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
index cfe5b17393e9..f1be354e2fe7 100644
--- a/tools/perf/util/bpf_off_cpu.c
+++ b/tools/perf/util/bpf_off_cpu.c
@@ -37,6 +37,8 @@ union off_cpu_data {
 	u64 array[1024 / sizeof(u64)];
 };
 
+u64 off_cpu_raw[MAX_STACKS + 5];
+
 static int off_cpu_config(struct evlist *evlist)
 {
 	char off_cpu_event[64];
@@ -61,6 +63,9 @@ static int off_cpu_config(struct evlist *evlist)
 static void off_cpu_start(void *arg)
 {
 	struct evlist *evlist = arg;
+	struct evsel *evsel;
+	struct perf_cpu pcpu;
+	int i, err;
 
 	/* update task filter for the given workload */
 	if (skel->rodata->has_task && skel->rodata->uses_tgid &&
@@ -304,6 +309,7 @@ int off_cpu_write(struct perf_session *session)
 {
 	int bytes = 0, size;
 	int fd, stack;
+	u32 raw_size;
 	u64 sample_type, val, sid = 0;
 	struct evsel *evsel;
 	struct perf_data_file *file = &session->data->file;
@@ -343,46 +349,54 @@ int off_cpu_write(struct perf_session *session)
 
 	while (!bpf_map_get_next_key(fd, &prev, &key)) {
 		int n = 1;  /* start from perf_event_header */
-		int ip_pos = -1;
 
 		bpf_map_lookup_elem(fd, &key, &val);
 
+		/* zero-fill some of the fields, will be overwritten by raw_data when parsing */
 		if (sample_type & PERF_SAMPLE_IDENTIFIER)
 			data.array[n++] = sid;
-		if (sample_type & PERF_SAMPLE_IP) {
-			ip_pos = n;
+		if (sample_type & PERF_SAMPLE_IP)
 			data.array[n++] = 0;  /* will be updated */
-		}
 		if (sample_type & PERF_SAMPLE_TID)
-			data.array[n++] = (u64)key.pid << 32 | key.tgid;
+			data.array[n++] = 0;
 		if (sample_type & PERF_SAMPLE_TIME)
 			data.array[n++] = tstamp;
-		if (sample_type & PERF_SAMPLE_ID)
-			data.array[n++] = sid;
 		if (sample_type & PERF_SAMPLE_CPU)
 			data.array[n++] = 0;
 		if (sample_type & PERF_SAMPLE_PERIOD)
-			data.array[n++] = val;
-		if (sample_type & PERF_SAMPLE_CALLCHAIN) {
-			int len = 0;
-
-			/* data.array[n] is callchain->nr (updated later) */
-			data.array[n + 1] = PERF_CONTEXT_USER;
-			data.array[n + 2] = 0;
-
-			bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
-			while (data.array[n + 2 + len])
+			data.array[n++] = 0;
+		if (sample_type & PERF_SAMPLE_RAW) {
+			/*
+			 *  [ size ][ data ]
+			 *  [     data     ]
+			 *  [     data     ]
+			 *  [     data     ]
+			 *  [ data ][ empty]
+			 */
+			int len = 0, i = 0;
+			void *raw_data = (void *)data.array + n * sizeof(u64);
+
+			off_cpu_raw[i++] = (u64)key.pid << 32 | key.tgid;
+			off_cpu_raw[i++] = val;
+
+			/* off_cpu_raw[i] is callchain->nr (updated later) */
+			off_cpu_raw[i + 1] = PERF_CONTEXT_USER;
+			off_cpu_raw[i + 2] = 0;
+
+			bpf_map_lookup_elem(stack, &key.stack_id, &off_cpu_raw[i + 2]);
+			while (off_cpu_raw[i + 2 + len])
 				len++;
 
-			/* update length of callchain */
-			data.array[n] = len + 1;
+			off_cpu_raw[i] = len + 1;
+			i += len + 2;
+
+			off_cpu_raw[i++] = key.cgroup_id;
 
-			/* update sample ip with the first callchain entry */
-			if (ip_pos >= 0)
-				data.array[ip_pos] = data.array[n + 2];
+			raw_size = i * sizeof(u64) + sizeof(u32); /* 4 bytes for alignment */
+			memcpy(raw_data, &raw_size, sizeof(raw_size));
+			memcpy(raw_data + sizeof(u32), off_cpu_raw, i * sizeof(u64));
 
-			/* calculate sample callchain data array length */
-			n += len + 2;
+			n += i + 1;
 		}
 		if (sample_type & PERF_SAMPLE_CGROUP)
 			data.array[n++] = key.cgroup_id;
-- 
2.43.0
Re: [PATCH v7 09/10] perf record --off-cpu: Dump the remaining samples in BPF's stack trace map
Posted by Ian Rogers 1 week, 5 days ago
On Fri, Nov 8, 2024 at 12:41 PM Howard Chu <howardchu95@gmail.com> wrote:
>
> Dump the remaining samples, as if it is dumping a direct sample.
>
> Put the stack trace, tid, off-cpu time and cgroup id into the raw_data
> section, just like a direct off-cpu sample coming from BPF's
> bpf_perf_event_output().
>
> This ensures that evsel__parse_sample() correctly parses both direct
> samples and accumulated samples.

Nice, hopefully this should mean we can get rid of the logic holding
all threads live for the sake of off-CPU, as off-CPU events were being
"sampled" after threads had terminated
(symbol_conf.keep_exited_threads). An example of this logic is:
https://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/builtin-report.c?h=perf-tools-next#n1484
Perhaps there is a follow up to dump exiting processes with a BPF
program on `tp/sched/sched_process_exit`. If the exited process is
already "dumped" then its map entry can be removed which may lower
overhead if off-CPU is running for a long time system wide.


> Suggested-by: Namhyung Kim <namhyung@kernel.org>
> Signed-off-by: Howard Chu <howardchu95@gmail.com>

Reviewed-by: Ian Rogers <irogers@google.com>

Thanks,
Ian

> ---
>  tools/perf/util/bpf_off_cpu.c | 62 +++++++++++++++++++++--------------
>  1 file changed, 38 insertions(+), 24 deletions(-)
>
> diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
> index cfe5b17393e9..f1be354e2fe7 100644
> --- a/tools/perf/util/bpf_off_cpu.c
> +++ b/tools/perf/util/bpf_off_cpu.c
> @@ -37,6 +37,8 @@ union off_cpu_data {
>         u64 array[1024 / sizeof(u64)];
>  };
>
> +u64 off_cpu_raw[MAX_STACKS + 5];
> +
>  static int off_cpu_config(struct evlist *evlist)
>  {
>         char off_cpu_event[64];
> @@ -61,6 +63,9 @@ static int off_cpu_config(struct evlist *evlist)
>  static void off_cpu_start(void *arg)
>  {
>         struct evlist *evlist = arg;
> +       struct evsel *evsel;
> +       struct perf_cpu pcpu;
> +       int i, err;
>
>         /* update task filter for the given workload */
>         if (skel->rodata->has_task && skel->rodata->uses_tgid &&
> @@ -304,6 +309,7 @@ int off_cpu_write(struct perf_session *session)
>  {
>         int bytes = 0, size;
>         int fd, stack;
> +       u32 raw_size;
>         u64 sample_type, val, sid = 0;
>         struct evsel *evsel;
>         struct perf_data_file *file = &session->data->file;
> @@ -343,46 +349,54 @@ int off_cpu_write(struct perf_session *session)
>
>         while (!bpf_map_get_next_key(fd, &prev, &key)) {
>                 int n = 1;  /* start from perf_event_header */
> -               int ip_pos = -1;
>
>                 bpf_map_lookup_elem(fd, &key, &val);
>
> +               /* zero-fill some of the fields, will be overwritten by raw_data when parsing */
>                 if (sample_type & PERF_SAMPLE_IDENTIFIER)
>                         data.array[n++] = sid;
> -               if (sample_type & PERF_SAMPLE_IP) {
> -                       ip_pos = n;
> +               if (sample_type & PERF_SAMPLE_IP)
>                         data.array[n++] = 0;  /* will be updated */
> -               }
>                 if (sample_type & PERF_SAMPLE_TID)
> -                       data.array[n++] = (u64)key.pid << 32 | key.tgid;
> +                       data.array[n++] = 0;
>                 if (sample_type & PERF_SAMPLE_TIME)
>                         data.array[n++] = tstamp;
> -               if (sample_type & PERF_SAMPLE_ID)
> -                       data.array[n++] = sid;
>                 if (sample_type & PERF_SAMPLE_CPU)
>                         data.array[n++] = 0;
>                 if (sample_type & PERF_SAMPLE_PERIOD)
> -                       data.array[n++] = val;
> -               if (sample_type & PERF_SAMPLE_CALLCHAIN) {
> -                       int len = 0;
> -
> -                       /* data.array[n] is callchain->nr (updated later) */
> -                       data.array[n + 1] = PERF_CONTEXT_USER;
> -                       data.array[n + 2] = 0;
> -
> -                       bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
> -                       while (data.array[n + 2 + len])
> +                       data.array[n++] = 0;
> +               if (sample_type & PERF_SAMPLE_RAW) {
> +                       /*
> +                        *  [ size ][ data ]
> +                        *  [     data     ]
> +                        *  [     data     ]
> +                        *  [     data     ]
> +                        *  [ data ][ empty]
> +                        */
> +                       int len = 0, i = 0;
> +                       void *raw_data = (void *)data.array + n * sizeof(u64);
> +
> +                       off_cpu_raw[i++] = (u64)key.pid << 32 | key.tgid;
> +                       off_cpu_raw[i++] = val;
> +
> +                       /* off_cpu_raw[i] is callchain->nr (updated later) */
> +                       off_cpu_raw[i + 1] = PERF_CONTEXT_USER;
> +                       off_cpu_raw[i + 2] = 0;
> +
> +                       bpf_map_lookup_elem(stack, &key.stack_id, &off_cpu_raw[i + 2]);
> +                       while (off_cpu_raw[i + 2 + len])
>                                 len++;
>
> -                       /* update length of callchain */
> -                       data.array[n] = len + 1;
> +                       off_cpu_raw[i] = len + 1;
> +                       i += len + 2;
> +
> +                       off_cpu_raw[i++] = key.cgroup_id;
>
> -                       /* update sample ip with the first callchain entry */
> -                       if (ip_pos >= 0)
> -                               data.array[ip_pos] = data.array[n + 2];
> +                       raw_size = i * sizeof(u64) + sizeof(u32); /* 4 bytes for alignment */
> +                       memcpy(raw_data, &raw_size, sizeof(raw_size));
> +                       memcpy(raw_data + sizeof(u32), off_cpu_raw, i * sizeof(u64));
>
> -                       /* calculate sample callchain data array length */
> -                       n += len + 2;
> +                       n += i + 1;
>                 }
>                 if (sample_type & PERF_SAMPLE_CGROUP)
>                         data.array[n++] = key.cgroup_id;
> --
> 2.43.0
>
Re: [PATCH v7 09/10] perf record --off-cpu: Dump the remaining samples in BPF's stack trace map
Posted by Howard Chu 1 week, 4 days ago
Hello Ian,

On Mon, Nov 11, 2024 at 10:07 AM Ian Rogers <irogers@google.com> wrote:
>
> On Fri, Nov 8, 2024 at 12:41 PM Howard Chu <howardchu95@gmail.com> wrote:
> >
> > Dump the remaining samples, as if it is dumping a direct sample.
> >
> > Put the stack trace, tid, off-cpu time and cgroup id into the raw_data
> > section, just like a direct off-cpu sample coming from BPF's
> > bpf_perf_event_output().
> >
> > This ensures that evsel__parse_sample() correctly parses both direct
> > samples and accumulated samples.
>
> Nice, hopefully this should mean we can get rid of the logic holding
> all threads live for the sake of off-CPU, as off-CPU events were being
> "sampled" after threads had terminated
> (symbol_conf.keep_exited_threads). An example of this logic is:
> https://git.kernel.org/pub/scm/linux/kernel/git/perf/perf-tools-next.git/tree/tools/perf/builtin-report.c?h=perf-tools-next#n1484
> Perhaps there is a follow up to dump exiting processes with a BPF
> program on `tp/sched/sched_process_exit`. If the exited process is
> already "dumped" then its map entry can be removed which may lower
> overhead if off-CPU is running for a long time system wide.

Thanks for the advice, that's actually a great idea. IIUC, when a task
exits, dump its off-cpu time regardless of the threshold, and delete
the map entry to save space for other tasks.

>
>
> > Suggested-by: Namhyung Kim <namhyung@kernel.org>
> > Signed-off-by: Howard Chu <howardchu95@gmail.com>
>
> Reviewed-by: Ian Rogers <irogers@google.com>
>
> Thanks,
> Ian
>
> > ---
> >  tools/perf/util/bpf_off_cpu.c | 62 +++++++++++++++++++++--------------
> >  1 file changed, 38 insertions(+), 24 deletions(-)
> >
> > diff --git a/tools/perf/util/bpf_off_cpu.c b/tools/perf/util/bpf_off_cpu.c
> > index cfe5b17393e9..f1be354e2fe7 100644
> > --- a/tools/perf/util/bpf_off_cpu.c
> > +++ b/tools/perf/util/bpf_off_cpu.c
> > @@ -37,6 +37,8 @@ union off_cpu_data {
> >         u64 array[1024 / sizeof(u64)];
> >  };
> >
> > +u64 off_cpu_raw[MAX_STACKS + 5];
> > +
> >  static int off_cpu_config(struct evlist *evlist)
> >  {
> >         char off_cpu_event[64];
> > @@ -61,6 +63,9 @@ static int off_cpu_config(struct evlist *evlist)
> >  static void off_cpu_start(void *arg)
> >  {
> >         struct evlist *evlist = arg;
> > +       struct evsel *evsel;
> > +       struct perf_cpu pcpu;
> > +       int i, err;
> >
> >         /* update task filter for the given workload */
> >         if (skel->rodata->has_task && skel->rodata->uses_tgid &&
> > @@ -304,6 +309,7 @@ int off_cpu_write(struct perf_session *session)
> >  {
> >         int bytes = 0, size;
> >         int fd, stack;
> > +       u32 raw_size;
> >         u64 sample_type, val, sid = 0;
> >         struct evsel *evsel;
> >         struct perf_data_file *file = &session->data->file;
> > @@ -343,46 +349,54 @@ int off_cpu_write(struct perf_session *session)
> >
> >         while (!bpf_map_get_next_key(fd, &prev, &key)) {
> >                 int n = 1;  /* start from perf_event_header */
> > -               int ip_pos = -1;
> >
> >                 bpf_map_lookup_elem(fd, &key, &val);
> >
> > +               /* zero-fill some of the fields, will be overwritten by raw_data when parsing */
> >                 if (sample_type & PERF_SAMPLE_IDENTIFIER)
> >                         data.array[n++] = sid;
> > -               if (sample_type & PERF_SAMPLE_IP) {
> > -                       ip_pos = n;
> > +               if (sample_type & PERF_SAMPLE_IP)
> >                         data.array[n++] = 0;  /* will be updated */
> > -               }
> >                 if (sample_type & PERF_SAMPLE_TID)
> > -                       data.array[n++] = (u64)key.pid << 32 | key.tgid;
> > +                       data.array[n++] = 0;
> >                 if (sample_type & PERF_SAMPLE_TIME)
> >                         data.array[n++] = tstamp;
> > -               if (sample_type & PERF_SAMPLE_ID)
> > -                       data.array[n++] = sid;
> >                 if (sample_type & PERF_SAMPLE_CPU)
> >                         data.array[n++] = 0;
> >                 if (sample_type & PERF_SAMPLE_PERIOD)
> > -                       data.array[n++] = val;
> > -               if (sample_type & PERF_SAMPLE_CALLCHAIN) {
> > -                       int len = 0;
> > -
> > -                       /* data.array[n] is callchain->nr (updated later) */
> > -                       data.array[n + 1] = PERF_CONTEXT_USER;
> > -                       data.array[n + 2] = 0;
> > -
> > -                       bpf_map_lookup_elem(stack, &key.stack_id, &data.array[n + 2]);
> > -                       while (data.array[n + 2 + len])
> > +                       data.array[n++] = 0;
> > +               if (sample_type & PERF_SAMPLE_RAW) {
> > +                       /*
> > +                        *  [ size ][ data ]
> > +                        *  [     data     ]
> > +                        *  [     data     ]
> > +                        *  [     data     ]
> > +                        *  [ data ][ empty]
> > +                        */
> > +                       int len = 0, i = 0;
> > +                       void *raw_data = (void *)data.array + n * sizeof(u64);
> > +
> > +                       off_cpu_raw[i++] = (u64)key.pid << 32 | key.tgid;
> > +                       off_cpu_raw[i++] = val;
> > +
> > +                       /* off_cpu_raw[i] is callchain->nr (updated later) */
> > +                       off_cpu_raw[i + 1] = PERF_CONTEXT_USER;
> > +                       off_cpu_raw[i + 2] = 0;
> > +
> > +                       bpf_map_lookup_elem(stack, &key.stack_id, &off_cpu_raw[i + 2]);
> > +                       while (off_cpu_raw[i + 2 + len])
> >                                 len++;
> >
> > -                       /* update length of callchain */
> > -                       data.array[n] = len + 1;
> > +                       off_cpu_raw[i] = len + 1;
> > +                       i += len + 2;
> > +
> > +                       off_cpu_raw[i++] = key.cgroup_id;
> >
> > -                       /* update sample ip with the first callchain entry */
> > -                       if (ip_pos >= 0)
> > -                               data.array[ip_pos] = data.array[n + 2];
> > +                       raw_size = i * sizeof(u64) + sizeof(u32); /* 4 bytes for alignment */
> > +                       memcpy(raw_data, &raw_size, sizeof(raw_size));
> > +                       memcpy(raw_data + sizeof(u32), off_cpu_raw, i * sizeof(u64));
> >
> > -                       /* calculate sample callchain data array length */
> > -                       n += len + 2;
> > +                       n += i + 1;
> >                 }
> >                 if (sample_type & PERF_SAMPLE_CGROUP)
> >                         data.array[n++] = key.cgroup_id;
> > --
> > 2.43.0
> >

Thanks,
Howard