The bpf_get_kmem_cache() kfunc can return an address of the slab cache
(kmem_cache). As it has the name of the slab cache from the iterator,
we can use it to symbolize some dynamic kernel locks in a slab.
Before:
root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
contended total wait max wait avg wait address symbol
2 3.34 us 2.87 us 1.67 us ffff9d7800ad9600 (mutex)
2 2.16 us 1.93 us 1.08 us ffff9d7804b992d8 (mutex)
4 1.37 us 517 ns 343 ns ffff9d78036e6e00 (mutex)
1 1.27 us 1.27 us 1.27 us ffff9d7804b99378 (mutex)
2 845 ns 599 ns 422 ns ffffffff9e1c3620 delayed_uprobe_lock (mutex)
1 845 ns 845 ns 845 ns ffffffff9da0b280 jiffies_lock (spinlock)
2 377 ns 259 ns 188 ns ffffffff9e1cf840 pcpu_alloc_mutex (mutex)
1 305 ns 305 ns 305 ns ffffffff9e1b4cf8 tracepoint_srcu_srcu_usage (mutex)
1 295 ns 295 ns 295 ns ffffffff9e1c0940 pack_mutex (mutex)
1 232 ns 232 ns 232 ns ffff9d7804b7d8d8 (mutex)
1 180 ns 180 ns 180 ns ffffffff9e1b4c28 tracepoint_srcu_srcu_usage (mutex)
1 165 ns 165 ns 165 ns ffffffff9da8b3a0 text_mutex (mutex)
After:
root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
contended total wait max wait avg wait address symbol
2 1.95 us 1.77 us 975 ns ffff9d5e852d3498 &task_struct (mutex)
1 1.18 us 1.18 us 1.18 us ffff9d5e852d3538 &task_struct (mutex)
4 1.12 us 354 ns 279 ns ffff9d5e841ca800 &kmalloc-cg-512 (mutex)
2 859 ns 617 ns 429 ns ffffffffa41c3620 delayed_uprobe_lock (mutex)
3 691 ns 388 ns 230 ns ffffffffa41c0940 pack_mutex (mutex)
3 421 ns 164 ns 140 ns ffffffffa3a8b3a0 text_mutex (mutex)
1 409 ns 409 ns 409 ns ffffffffa41b4cf8 tracepoint_srcu_srcu_usage (mutex)
2 362 ns 239 ns 181 ns ffffffffa41cf840 pcpu_alloc_mutex (mutex)
1 220 ns 220 ns 220 ns ffff9d5e82b534d8 &signal_cache (mutex)
1 215 ns 215 ns 215 ns ffffffffa41b4c28 tracepoint_srcu_srcu_usage (mutex)
Note that the name starts with '&' sign for slab objects to inform they
are dynamic locks. It won't give the accurate lock or type names but
it's still useful. We may add type info to the slab cache later to get
the exact name of the lock in the type later.
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/util/bpf_lock_contention.c | 52 +++++++++++++++++++
.../perf/util/bpf_skel/lock_contention.bpf.c | 21 +++++++-
2 files changed, 71 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/bpf_lock_contention.c b/tools/perf/util/bpf_lock_contention.c
index 558590c3111390fc..3f127fc6b95f8326 100644
--- a/tools/perf/util/bpf_lock_contention.c
+++ b/tools/perf/util/bpf_lock_contention.c
@@ -2,6 +2,7 @@
#include "util/cgroup.h"
#include "util/debug.h"
#include "util/evlist.h"
+#include "util/hashmap.h"
#include "util/machine.h"
#include "util/map.h"
#include "util/symbol.h"
@@ -20,12 +21,25 @@
static struct lock_contention_bpf *skel;
static bool has_slab_iter;
+static struct hashmap slab_hash;
+
+static size_t slab_cache_hash(long key, void *ctx __maybe_unused)
+{
+ return key;
+}
+
+static bool slab_cache_equal(long key1, long key2, void *ctx __maybe_unused)
+{
+ return key1 == key2;
+}
static void check_slab_cache_iter(struct lock_contention *con)
{
struct btf *btf = btf__load_vmlinux_btf();
s32 ret;
+ hashmap__init(&slab_hash, slab_cache_hash, slab_cache_equal, /*ctx=*/NULL);
+
if (btf == NULL) {
pr_debug("BTF loading failed: %s\n", strerror(errno));
return;
@@ -49,6 +63,7 @@ static void run_slab_cache_iter(void)
{
int fd;
char buf[256];
+ long key, *prev_key;
if (!has_slab_iter)
return;
@@ -64,6 +79,34 @@ static void run_slab_cache_iter(void)
continue;
close(fd);
+
+ /* Read the slab cache map and build a hash with IDs */
+ fd = bpf_map__fd(skel->maps.slab_caches);
+ prev_key = NULL;
+ while (!bpf_map_get_next_key(fd, prev_key, &key)) {
+ struct slab_cache_data *data;
+
+ data = malloc(sizeof(*data));
+ if (data == NULL)
+ break;
+
+ if (bpf_map_lookup_elem(fd, &key, data) < 0)
+ break;
+
+ hashmap__add(&slab_hash, data->id, data);
+ prev_key = &key;
+ }
+}
+
+static void exit_slab_cache_iter(void)
+{
+ struct hashmap_entry *cur;
+ unsigned bkt;
+
+ hashmap__for_each_entry(&slab_hash, cur, bkt)
+ free(cur->pvalue);
+
+ hashmap__clear(&slab_hash);
}
int lock_contention_prepare(struct lock_contention *con)
@@ -397,6 +440,7 @@ static const char *lock_contention_get_name(struct lock_contention *con,
if (con->aggr_mode == LOCK_AGGR_ADDR) {
int lock_fd = bpf_map__fd(skel->maps.lock_syms);
+ struct slab_cache_data *slab_data;
/* per-process locks set upper bits of the flags */
if (flags & LCD_F_MMAP_LOCK)
@@ -415,6 +459,12 @@ static const char *lock_contention_get_name(struct lock_contention *con,
return "rq_lock";
}
+ /* look slab_hash for dynamic locks in a slab object */
+ if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
+ snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
+ return name_buf;
+ }
+
return "";
}
@@ -589,5 +639,7 @@ int lock_contention_finish(struct lock_contention *con)
cgroup__put(cgrp);
}
+ exit_slab_cache_iter();
+
return 0;
}
diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
index fd24ccb00faec0ba..b5bc37955560a58e 100644
--- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
+++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
@@ -123,6 +123,8 @@ struct mm_struct___new {
struct rw_semaphore mmap_lock;
} __attribute__((preserve_access_index));
+extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym __weak;
+
/* control flags */
const volatile int has_cpu;
const volatile int has_task;
@@ -496,8 +498,23 @@ int contention_end(u64 *ctx)
};
int err;
- if (aggr_mode == LOCK_AGGR_ADDR)
- first.flags |= check_lock_type(pelem->lock, pelem->flags);
+ if (aggr_mode == LOCK_AGGR_ADDR) {
+ first.flags |= check_lock_type(pelem->lock,
+ pelem->flags & LCB_F_TYPE_MASK);
+
+ /* Check if it's from a slab object */
+ if (bpf_get_kmem_cache) {
+ struct kmem_cache *s;
+ struct slab_cache_data *d;
+
+ s = bpf_get_kmem_cache(pelem->lock);
+ if (s != NULL) {
+ d = bpf_map_lookup_elem(&slab_caches, &s);
+ if (d != NULL)
+ first.flags |= d->id;
+ }
+ }
+ }
err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
if (err < 0) {
--
2.47.0.277.g8800431eea-goog
On 11/8/24 07:14, Namhyung Kim wrote:
> The bpf_get_kmem_cache() kfunc can return an address of the slab cache
> (kmem_cache). As it has the name of the slab cache from the iterator,
> we can use it to symbolize some dynamic kernel locks in a slab.
>
> Before:
> root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
> contended total wait max wait avg wait address symbol
>
> 2 3.34 us 2.87 us 1.67 us ffff9d7800ad9600 (mutex)
> 2 2.16 us 1.93 us 1.08 us ffff9d7804b992d8 (mutex)
> 4 1.37 us 517 ns 343 ns ffff9d78036e6e00 (mutex)
> 1 1.27 us 1.27 us 1.27 us ffff9d7804b99378 (mutex)
> 2 845 ns 599 ns 422 ns ffffffff9e1c3620 delayed_uprobe_lock (mutex)
> 1 845 ns 845 ns 845 ns ffffffff9da0b280 jiffies_lock (spinlock)
> 2 377 ns 259 ns 188 ns ffffffff9e1cf840 pcpu_alloc_mutex (mutex)
> 1 305 ns 305 ns 305 ns ffffffff9e1b4cf8 tracepoint_srcu_srcu_usage (mutex)
> 1 295 ns 295 ns 295 ns ffffffff9e1c0940 pack_mutex (mutex)
> 1 232 ns 232 ns 232 ns ffff9d7804b7d8d8 (mutex)
> 1 180 ns 180 ns 180 ns ffffffff9e1b4c28 tracepoint_srcu_srcu_usage (mutex)
> 1 165 ns 165 ns 165 ns ffffffff9da8b3a0 text_mutex (mutex)
>
> After:
> root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
> contended total wait max wait avg wait address symbol
>
> 2 1.95 us 1.77 us 975 ns ffff9d5e852d3498 &task_struct (mutex)
> 1 1.18 us 1.18 us 1.18 us ffff9d5e852d3538 &task_struct (mutex)
> 4 1.12 us 354 ns 279 ns ffff9d5e841ca800 &kmalloc-cg-512 (mutex)
> 2 859 ns 617 ns 429 ns ffffffffa41c3620 delayed_uprobe_lock (mutex)
> 3 691 ns 388 ns 230 ns ffffffffa41c0940 pack_mutex (mutex)
> 3 421 ns 164 ns 140 ns ffffffffa3a8b3a0 text_mutex (mutex)
> 1 409 ns 409 ns 409 ns ffffffffa41b4cf8 tracepoint_srcu_srcu_usage (mutex)
> 2 362 ns 239 ns 181 ns ffffffffa41cf840 pcpu_alloc_mutex (mutex)
> 1 220 ns 220 ns 220 ns ffff9d5e82b534d8 &signal_cache (mutex)
> 1 215 ns 215 ns 215 ns ffffffffa41b4c28 tracepoint_srcu_srcu_usage (mutex)
>
> Note that the name starts with '&' sign for slab objects to inform they
> are dynamic locks. It won't give the accurate lock or type names but
> it's still useful. We may add type info to the slab cache later to get
> the exact name of the lock in the type later.
>
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>
<snip>
> diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
> index fd24ccb00faec0ba..b5bc37955560a58e 100644
> --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
> +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
> @@ -123,6 +123,8 @@ struct mm_struct___new {
> struct rw_semaphore mmap_lock;
> } __attribute__((preserve_access_index));
>
> +extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym __weak;
> +
> /* control flags */
> const volatile int has_cpu;
> const volatile int has_task;
> @@ -496,8 +498,23 @@ int contention_end(u64 *ctx)
> };
> int err;
>
> - if (aggr_mode == LOCK_AGGR_ADDR)
> - first.flags |= check_lock_type(pelem->lock, pelem->flags);
> + if (aggr_mode == LOCK_AGGR_ADDR) {
> + first.flags |= check_lock_type(pelem->lock,
> + pelem->flags & LCB_F_TYPE_MASK);
> +
> + /* Check if it's from a slab object */
> + if (bpf_get_kmem_cache) {
> + struct kmem_cache *s;
> + struct slab_cache_data *d;
> +
> + s = bpf_get_kmem_cache(pelem->lock);
> + if (s != NULL) {
> + d = bpf_map_lookup_elem(&slab_caches, &s);
> + if (d != NULL)
> + first.flags |= d->id;
> + }
Is this being executed as part of obtaining a perf event record, or as part
of a postprocessing pass? I'm not familiar enough with the code to be certain.
- if it's part of perf event record, can you just store 's' and defer
resolving the cache by bpf_map_lookup_elem() to postprocessing?
- if it's postprocessing, it would be too late for bpf_get_kmem_cache() as
the object might be gone already?
The second alternative would be worse as it could miss the cache or
misattribute (in case page is reallocated by another cache), the first is
just less efficient than possible.
> + }
> + }
>
> err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
> if (err < 0) {
On Tue, Nov 12, 2024 at 12:09:24PM +0100, Vlastimil Babka wrote:
> On 11/8/24 07:14, Namhyung Kim wrote:
> > The bpf_get_kmem_cache() kfunc can return an address of the slab cache
> > (kmem_cache). As it has the name of the slab cache from the iterator,
> > we can use it to symbolize some dynamic kernel locks in a slab.
> >
> > Before:
> > root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
> > contended total wait max wait avg wait address symbol
> >
> > 2 3.34 us 2.87 us 1.67 us ffff9d7800ad9600 (mutex)
> > 2 2.16 us 1.93 us 1.08 us ffff9d7804b992d8 (mutex)
> > 4 1.37 us 517 ns 343 ns ffff9d78036e6e00 (mutex)
> > 1 1.27 us 1.27 us 1.27 us ffff9d7804b99378 (mutex)
> > 2 845 ns 599 ns 422 ns ffffffff9e1c3620 delayed_uprobe_lock (mutex)
> > 1 845 ns 845 ns 845 ns ffffffff9da0b280 jiffies_lock (spinlock)
> > 2 377 ns 259 ns 188 ns ffffffff9e1cf840 pcpu_alloc_mutex (mutex)
> > 1 305 ns 305 ns 305 ns ffffffff9e1b4cf8 tracepoint_srcu_srcu_usage (mutex)
> > 1 295 ns 295 ns 295 ns ffffffff9e1c0940 pack_mutex (mutex)
> > 1 232 ns 232 ns 232 ns ffff9d7804b7d8d8 (mutex)
> > 1 180 ns 180 ns 180 ns ffffffff9e1b4c28 tracepoint_srcu_srcu_usage (mutex)
> > 1 165 ns 165 ns 165 ns ffffffff9da8b3a0 text_mutex (mutex)
> >
> > After:
> > root@virtme-ng:/home/namhyung/project/linux# tools/perf/perf lock con -abl sleep 1
> > contended total wait max wait avg wait address symbol
> >
> > 2 1.95 us 1.77 us 975 ns ffff9d5e852d3498 &task_struct (mutex)
> > 1 1.18 us 1.18 us 1.18 us ffff9d5e852d3538 &task_struct (mutex)
> > 4 1.12 us 354 ns 279 ns ffff9d5e841ca800 &kmalloc-cg-512 (mutex)
> > 2 859 ns 617 ns 429 ns ffffffffa41c3620 delayed_uprobe_lock (mutex)
> > 3 691 ns 388 ns 230 ns ffffffffa41c0940 pack_mutex (mutex)
> > 3 421 ns 164 ns 140 ns ffffffffa3a8b3a0 text_mutex (mutex)
> > 1 409 ns 409 ns 409 ns ffffffffa41b4cf8 tracepoint_srcu_srcu_usage (mutex)
> > 2 362 ns 239 ns 181 ns ffffffffa41cf840 pcpu_alloc_mutex (mutex)
> > 1 220 ns 220 ns 220 ns ffff9d5e82b534d8 &signal_cache (mutex)
> > 1 215 ns 215 ns 215 ns ffffffffa41b4c28 tracepoint_srcu_srcu_usage (mutex)
> >
> > Note that the name starts with '&' sign for slab objects to inform they
> > are dynamic locks. It won't give the accurate lock or type names but
> > it's still useful. We may add type info to the slab cache later to get
> > the exact name of the lock in the type later.
> >
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
>
> <snip>
>
> > diff --git a/tools/perf/util/bpf_skel/lock_contention.bpf.c b/tools/perf/util/bpf_skel/lock_contention.bpf.c
> > index fd24ccb00faec0ba..b5bc37955560a58e 100644
> > --- a/tools/perf/util/bpf_skel/lock_contention.bpf.c
> > +++ b/tools/perf/util/bpf_skel/lock_contention.bpf.c
> > @@ -123,6 +123,8 @@ struct mm_struct___new {
> > struct rw_semaphore mmap_lock;
> > } __attribute__((preserve_access_index));
> >
> > +extern struct kmem_cache *bpf_get_kmem_cache(u64 addr) __ksym __weak;
> > +
> > /* control flags */
> > const volatile int has_cpu;
> > const volatile int has_task;
> > @@ -496,8 +498,23 @@ int contention_end(u64 *ctx)
> > };
> > int err;
> >
> > - if (aggr_mode == LOCK_AGGR_ADDR)
> > - first.flags |= check_lock_type(pelem->lock, pelem->flags);
> > + if (aggr_mode == LOCK_AGGR_ADDR) {
> > + first.flags |= check_lock_type(pelem->lock,
> > + pelem->flags & LCB_F_TYPE_MASK);
> > +
> > + /* Check if it's from a slab object */
> > + if (bpf_get_kmem_cache) {
> > + struct kmem_cache *s;
> > + struct slab_cache_data *d;
> > +
> > + s = bpf_get_kmem_cache(pelem->lock);
> > + if (s != NULL) {
> > + d = bpf_map_lookup_elem(&slab_caches, &s);
> > + if (d != NULL)
> > + first.flags |= d->id;
> > + }
>
> Is this being executed as part of obtaining a perf event record, or as part
> of a postprocessing pass? I'm not familiar enough with the code to be certain.
>
> - if it's part of perf event record, can you just store 's' and defer
> resolving the cache by bpf_map_lookup_elem() to postprocessing?
Namhyung is in vacation this week, so lemme try to help (and learn more
about this patchset since we discussed about it back in LSFMM :-)):
tldr;: He wants to store a 10 bit cookie for the slab cache, to avoid
storing 64 bits per contention record.
My reading of his code:
'first' is a 'struct contention_data' instance, that he will use for
post processing in tools/perf/builtin-lock.c, the relevant part:
if (use_bpf) {
lock_contention_start();
if (argc)
evlist__start_workload(con.evlist);
/* wait for signal */
pause();
lock_contention_stop();
lock_contention_read(&con);
} else
process records from a perf.data file with tons
of lock:lock_contention_{begin,end}, which the use_bpf
mode above "pre-processes" at begin+end pairs and
turns into 'struct contention_data' records in a BPF
map for later post processing in the common part after
this if/else block.
The post processing is in lock_contention_read(), that is in
tools/perf/util/bpf_lock_contention.c, I stripped out prep steps, etc,
the "meat" is:
struct contention_data data = {};
struct lock_stat *st = NULL;
<SNIP>
while (!bpf_map_get_next_key(fd, prev_key, &key)) {
s64 ls_key;
const char *name;
bpf_map_lookup_elem(fd, &key, &data);
name = lock_contention_get_name(con, &key, stack_trace, data.flags);
st = lock_stat_findnew(ls_key, name, data.flags);
That 'lock_stat' struct is then filled up and later, in the common part
to using or not BPF, it gets printed out in the builtin-lock.c main tool
codebase.
The part we're interested here is that lock_contention_get_name(), that
before this patch series returns "(mutex)" and now resolves it to the
slab cache name "&task_struct (mutex)".
key is:
struct contention_key {
s32 stack_id;
u32 pid;
u64 lock_addr_or_cgroup;
};
lock_contention_get_name() tries to resolve the name to the usual
suspects:
/* per-process locks set upper bits of the flags */
if (flags & LCD_F_MMAP_LOCK)
return "mmap_lock";
if (flags & LCD_F_SIGHAND_LOCK)
return "siglock";
/* global locks with symbols */
sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
if (sym)
return sym->name;
And then if all of the above (there is another case for rq_lock) it
then gets to look the the ID area of contention_data->flags:
+#define LCB_F_SLAB_ID_SHIFT 16
+#define LCB_F_SLAB_ID_START (1U << 16)
+#define LCB_F_SLAB_ID_END (1U << 26)
+#define LCB_F_SLAB_ID_MASK 0x03FF0000U
>>> bin(0x03FF0000)
'0b11111111110000000000000000'
>>>
+ /* look slab_hash for dynamic locks in a slab object */
+ if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
+ snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
+ return name_buf;
+ }
He wants to avoid storing 64 bytes (the slab cache pointer, 's'), instead
he wants to store a shorter 'id' and encode it in the upper bits of the
'struct contention_data' 'flags' field.
The iterator, at the beggining of the session attributes this id,
starting from zero, to each of the slab caches, so it needs to map it
back from the address at contention_end tracepoint.
At post processing time it converts the id back to the name of the slab
cache.
I hope this helps,
- Arnaldo
> - if it's postprocessing, it would be too late for bpf_get_kmem_cache() as
> the object might be gone already?
>
> The second alternative would be worse as it could miss the cache or
> misattribute (in case page is reallocated by another cache), the first is
> just less efficient than possible.
>
> > + }
> > + }
> >
> > err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
> > if (err < 0) {
On 11/12/24 15:50, Arnaldo Carvalho de Melo wrote:
> On Tue, Nov 12, 2024 at 12:09:24PM +0100, Vlastimil Babka wrote:
> + /* look slab_hash for dynamic locks in a slab object */
> + if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
> + snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
> + return name_buf;
> + }
>
> He wants to avoid storing 64 bytes (the slab cache pointer, 's'), instead
> he wants to store a shorter 'id' and encode it in the upper bits of the
> 'struct contention_data' 'flags' field.
>
> The iterator, at the beggining of the session attributes this id,
> starting from zero, to each of the slab caches, so it needs to map it
> back from the address at contention_end tracepoint.
>
> At post processing time it converts the id back to the name of the slab
> cache.
>
> I hope this helps,
Thanks a lot, if it's a tradeoff to do a bit more work in order to store
less data, then it makes sense to me.
Vlastimil
> - Arnaldo
>
>> - if it's postprocessing, it would be too late for bpf_get_kmem_cache() as
>> the object might be gone already?
>>
>> The second alternative would be worse as it could miss the cache or
>> misattribute (in case page is reallocated by another cache), the first is
>> just less efficient than possible.
>>
>> > + }
>> > + }
>> >
>> > err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
>> > if (err < 0) {
Hello,
On Wed, Nov 13, 2024 at 03:20:43PM +0100, Vlastimil Babka wrote:
> On 11/12/24 15:50, Arnaldo Carvalho de Melo wrote:
> > On Tue, Nov 12, 2024 at 12:09:24PM +0100, Vlastimil Babka wrote:
> > + /* look slab_hash for dynamic locks in a slab object */
> > + if (hashmap__find(&slab_hash, flags & LCB_F_SLAB_ID_MASK, &slab_data)) {
> > + snprintf(name_buf, sizeof(name_buf), "&%s", slab_data->name);
> > + return name_buf;
> > + }
> >
> > He wants to avoid storing 64 bytes (the slab cache pointer, 's'), instead
> > he wants to store a shorter 'id' and encode it in the upper bits of the
> > 'struct contention_data' 'flags' field.
> >
> > The iterator, at the beggining of the session attributes this id,
> > starting from zero, to each of the slab caches, so it needs to map it
> > back from the address at contention_end tracepoint.
> >
> > At post processing time it converts the id back to the name of the slab
> > cache.
> >
> > I hope this helps,
Thanks Analdo for the explanation!
>
> Thanks a lot, if it's a tradeoff to do a bit more work in order to store
> less data, then it makes sense to me.
Right, I don't want to increase the data size for this as we have some
unused bits in the flags. It'd call one more bpf hashmap lookup during
record but I don't think it's gonna be a problem.
Thanks,
Namhyung
> >
> >> - if it's postprocessing, it would be too late for bpf_get_kmem_cache() as
> >> the object might be gone already?
> >>
> >> The second alternative would be worse as it could miss the cache or
> >> misattribute (in case page is reallocated by another cache), the first is
> >> just less efficient than possible.
> >>
> >> > + }
> >> > + }
> >> >
> >> > err = bpf_map_update_elem(&lock_stat, &key, &first, BPF_NOEXIST);
> >> > if (err < 0) {
>
© 2016 - 2025 Red Hat, Inc.