kernel/trace/tracing_map.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
"tracing_map->next_elt" in get_free_elt() is at risk of overflowing.
Once it overflows, new elements can still be inserted into the tracing_map
even though the maximum number of elements (`max_elts`) has been reached.
Continuing to insert elements after the overflow could result in the
tracing_map containing "tracing_map->max_size" elements, leaving no empty
entries.
If any attempt is made to insert an element into a full tracing_map using
`__tracing_map_insert()`, it will cause an infinite loop with preemption
disabled, leading to a CPU hang problem.
Fix this by preventing any further increments to "tracing_map->next_elt"
once it reaches "tracing_map->max_elt".
Co-developed-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
Signed-off-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com>
Signed-off-by: Tze-nan Wu <Tze-nan.Wu@mediatek.com>
---
kernel/trace/tracing_map.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
index a4dcf0f24352..3a56e7c8aa4f 100644
--- a/kernel/trace/tracing_map.c
+++ b/kernel/trace/tracing_map.c
@@ -454,7 +454,7 @@ static struct tracing_map_elt *get_free_elt(struct tracing_map *map)
struct tracing_map_elt *elt = NULL;
int idx;
- idx = atomic_inc_return(&map->next_elt);
+ idx = atomic_fetch_add_unless(&map->next_elt, 1, map->max_elts);
if (idx < map->max_elts) {
elt = *(TRACING_MAP_ELT(map->elts, idx));
if (map->ops && map->ops->elt_init)
@@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map)
{
unsigned int i;
- atomic_set(&map->next_elt, -1);
+ atomic_set(&map->next_elt, 0);
atomic64_set(&map->hits, 0);
atomic64_set(&map->drops, 0);
@@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned int map_bits,
map->map_bits = map_bits;
map->max_elts = (1 << map_bits);
- atomic_set(&map->next_elt, -1);
+ atomic_set(&map->next_elt, 0);
map->map_size = (1 << (map_bits + 1));
map->ops = ops;
--
2.18.0
On Wed, 2024-07-10 at 17:19 +0800, Tze-nan Wu wrote: > "tracing_map->next_elt" in get_free_elt() is at risk of overflowing. > > Once it overflows, new elements can still be inserted into the > tracing_map > even though the maximum number of elements (`max_elts`) has been > reached. > Continuing to insert elements after the overflow could result in the > tracing_map containing "tracing_map->max_size" elements, leaving no > empty > entries. > If any attempt is made to insert an element into a full tracing_map > using > `__tracing_map_insert()`, it will cause an infinite loop with > preemption > disabled, leading to a CPU hang problem. > > Fix this by preventing any further increments to "tracing_map- > >next_elt" > once it reaches "tracing_map->max_elt". > > Co-developed-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com> > Signed-off-by: Cheng-Jui Wang <cheng-jui.wang@mediatek.com> > Signed-off-by: Tze-nan Wu <Tze-nan.Wu@mediatek.com> > --- > kernel/trace/tracing_map.c | 6 +++--- > 1 file changed, 3 insertions(+), 3 deletions(-) > Just a gentle ping. Any comments on this patch will be appreciated. Actually we have encountered this issue internally after enabling the throttle_rss_stat feature in Perfetto for an extended duration, during which the rss_stat tracepoint was invoked over 2^32 times. Then the CPU could hang in function "__tracing_map_insert()" after the tracing_map left no empty entry. throttle_rss_stat is literally: 1. $echo "rss_stat_throttled unsigned int mm_id unsigned int curr int member long size" >> /sys/kernel/tracing/synthetic_events 2. $echo 'hist:keys=mm_id,member:bucket=size/0x80000:onchange($bucket).rss_stat_ throttled(mm_id,curr,member,size)' > /sys/kernel/tracing/events/kmem/rss_stat/trigger > diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c > index a4dcf0f24352..3a56e7c8aa4f 100644 > --- a/kernel/trace/tracing_map.c > +++ b/kernel/trace/tracing_map.c > @@ -454,7 +454,7 @@ static struct tracing_map_elt > *get_free_elt(struct tracing_map *map) > struct tracing_map_elt *elt = NULL; > int idx; > > - idx = atomic_inc_return(&map->next_elt); > + idx = atomic_fetch_add_unless(&map->next_elt, 1, map- > >max_elts); > if (idx < map->max_elts) { > elt = *(TRACING_MAP_ELT(map->elts, idx)); > if (map->ops && map->ops->elt_init) > @@ -699,7 +699,7 @@ void tracing_map_clear(struct tracing_map *map) > { > unsigned int i; > > - atomic_set(&map->next_elt, -1); > + atomic_set(&map->next_elt, 0); > atomic64_set(&map->hits, 0); > atomic64_set(&map->drops, 0); > > @@ -783,7 +783,7 @@ struct tracing_map *tracing_map_create(unsigned > int map_bits, > > map->map_bits = map_bits; > map->max_elts = (1 << map_bits); > - atomic_set(&map->next_elt, -1); > + atomic_set(&map->next_elt, 0); > > map->map_size = (1 << (map_bits + 1)); > map->ops = ops;
© 2016 - 2024 Red Hat, Inc.