[PATCH] bpf: hashtab: fix 32-bit overflow in memory usage calculation

Alexei Safin posted 1 patch 1 month, 1 week ago
There is a newer version of this series
kernel/bpf/hashtab.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
[PATCH] bpf: hashtab: fix 32-bit overflow in memory usage calculation
Posted by Alexei Safin 1 month, 1 week ago
The intermediate product value_size * num_possible_cpus() is evaluated
in 32-bit arithmetic and only then promoted to 64 bits. On systems with
large value_size and many possible CPUs this can overflow and lead to
an underestimated memory usage.

Cast value_size to u64 before multiplying.

Found by Linux Verification Center (linuxtesting.org) with SVACE.

Fixes: 304849a27b34 ("bpf: hashtab memory usage")
Cc: stable@vger.kernel.org
Signed-off-by: Alexei Safin <a.safin@rosa.ru>
---
 kernel/bpf/hashtab.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 570e2f723144..7ad6b5137ba1 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2269,7 +2269,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
 		usage += htab->elem_size * num_entries;
 
 		if (percpu)
-			usage += value_size * num_possible_cpus() * num_entries;
+			usage += (u64)value_size * num_possible_cpus() * num_entries;
 		else if (!lru)
 			usage += sizeof(struct htab_elem *) * num_possible_cpus();
 	} else {
@@ -2281,7 +2281,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
 		usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
 		if (percpu) {
 			usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
-			usage += value_size * num_possible_cpus() * num_entries;
+			usage += (u64)value_size * num_possible_cpus() * num_entries;
 		}
 	}
 	return usage;
-- 
2.50.1 (Apple Git-155)
Re: [PATCH] bpf: hashtab: fix 32-bit overflow in memory usage calculation
Posted by Yafang Shao 1 month, 1 week ago
On Fri, Nov 7, 2025 at 4:59 AM Alexei Safin <a.safin@rosa.ru> wrote:
>
> The intermediate product value_size * num_possible_cpus() is evaluated
> in 32-bit arithmetic and only then promoted to 64 bits. On systems with
> large value_size and many possible CPUs this can overflow and lead to
> an underestimated memory usage.
>
> Cast value_size to u64 before multiplying.
>
> Found by Linux Verification Center (linuxtesting.org) with SVACE.
>
> Fixes: 304849a27b34 ("bpf: hashtab memory usage")
> Cc: stable@vger.kernel.org
> Signed-off-by: Alexei Safin <a.safin@rosa.ru>
> ---
>  kernel/bpf/hashtab.c | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
> index 570e2f723144..7ad6b5137ba1 100644
> --- a/kernel/bpf/hashtab.c
> +++ b/kernel/bpf/hashtab.c
> @@ -2269,7 +2269,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
>                 usage += htab->elem_size * num_entries;
>
>                 if (percpu)
> -                       usage += value_size * num_possible_cpus() * num_entries;
> +                       usage += (u64)value_size * num_possible_cpus() * num_entries;
>                 else if (!lru)
>                         usage += sizeof(struct htab_elem *) * num_possible_cpus();
>         } else {
> @@ -2281,7 +2281,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
>                 usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
>                 if (percpu) {
>                         usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
> -                       usage += value_size * num_possible_cpus() * num_entries;
> +                       usage += (u64)value_size * num_possible_cpus() * num_entries;
>                 }
>         }
>         return usage;
> --
> 2.50.1 (Apple Git-155)
>

Thanks for the fix. What do you think about this change?

diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4a9eeb7aef85..f9084158bfe2 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -2251,7 +2251,7 @@ static long bpf_for_each_hash_elem(struct
bpf_map *map, bpf_callback_t callback_
 static u64 htab_map_mem_usage(const struct bpf_map *map)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       u32 value_size = round_up(htab->map.value_size, 8);
+       u64 value_size = round_up(htab->map.value_size, 8);
        bool prealloc = htab_is_prealloc(htab);
        bool percpu = htab_is_percpu(htab);
        bool lru = htab_is_lru(htab);


-- 
Regards
Yafang
Re: [PATCH] bpf: hashtab: fix 32-bit overflow in memory usage calculation
Posted by Алексей Сафин 1 month, 1 week ago
Yes, that looks even better to me. Changing value_size to u64 at declaration
makes the arithmetic safe everywhere and keeps the code cleaner.

I agree with this version.

Should I prepare a v2 patch with this modification, or will you take it 
from here?

07.11.2025 04:58, Yafang Shao пишет:
> On Fri, Nov 7, 2025 at 4:59 AM Alexei Safin <a.safin@rosa.ru> wrote:
>> The intermediate product value_size * num_possible_cpus() is evaluated
>> in 32-bit arithmetic and only then promoted to 64 bits. On systems with
>> large value_size and many possible CPUs this can overflow and lead to
>> an underestimated memory usage.
>>
>> Cast value_size to u64 before multiplying.
>>
>> Found by Linux Verification Center (linuxtesting.org) with SVACE.
>>
>> Fixes: 304849a27b34 ("bpf: hashtab memory usage")
>> Cc: stable@vger.kernel.org
>> Signed-off-by: Alexei Safin <a.safin@rosa.ru>
>> ---
>>   kernel/bpf/hashtab.c | 4 ++--
>>   1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
>> index 570e2f723144..7ad6b5137ba1 100644
>> --- a/kernel/bpf/hashtab.c
>> +++ b/kernel/bpf/hashtab.c
>> @@ -2269,7 +2269,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
>>                  usage += htab->elem_size * num_entries;
>>
>>                  if (percpu)
>> -                       usage += value_size * num_possible_cpus() * num_entries;
>> +                       usage += (u64)value_size * num_possible_cpus() * num_entries;
>>                  else if (!lru)
>>                          usage += sizeof(struct htab_elem *) * num_possible_cpus();
>>          } else {
>> @@ -2281,7 +2281,7 @@ static u64 htab_map_mem_usage(const struct bpf_map *map)
>>                  usage += (htab->elem_size + LLIST_NODE_SZ) * num_entries;
>>                  if (percpu) {
>>                          usage += (LLIST_NODE_SZ + sizeof(void *)) * num_entries;
>> -                       usage += value_size * num_possible_cpus() * num_entries;
>> +                       usage += (u64)value_size * num_possible_cpus() * num_entries;
>>                  }
>>          }
>>          return usage;
>> --
>> 2.50.1 (Apple Git-155)
>>
> Thanks for the fix. What do you think about this change?
>
> diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
> index 4a9eeb7aef85..f9084158bfe2 100644
> --- a/kernel/bpf/hashtab.c
> +++ b/kernel/bpf/hashtab.c
> @@ -2251,7 +2251,7 @@ static long bpf_for_each_hash_elem(struct
> bpf_map *map, bpf_callback_t callback_
>   static u64 htab_map_mem_usage(const struct bpf_map *map)
>   {
>          struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
> -       u32 value_size = round_up(htab->map.value_size, 8);
> +       u64 value_size = round_up(htab->map.value_size, 8);
>          bool prealloc = htab_is_prealloc(htab);
>          bool percpu = htab_is_percpu(htab);
>          bool lru = htab_is_lru(htab);
>
>
Re: [PATCH] bpf: hashtab: fix 32-bit overflow in memory usage calculation
Posted by Yafang Shao 1 month, 1 week ago
On Fri, Nov 7, 2025 at 2:58 PM Алексей Сафин <a.safin@rosa.ru> wrote:
>
> Yes, that looks even better to me. Changing value_size to u64 at declaration
> makes the arithmetic safe everywhere and keeps the code cleaner.
>
> I agree with this version.
>
> Should I prepare a v2 patch with this modification, or will you take it
> from here?

Pls send a v2

-- 
Regards
Yafang