From: Shivam Kalra <shivamkalra98@zohomail.in>
The /proc/vmallocinfo readers, specifically show_numa_info() and
vmalloc_info_show(), currently read v->nr_pages and the v->pages
array without any concurrent protection.
In preparation for vrealloc() shrink support, where v->nr_pages can
be decreased and entries in the v->pages array can be nulled out
concurrently, these readers must be protected to prevent use-after-free
or NULL pointer dereferences.
Update show_numa_info() to use READ_ONCE(v->nr_pages) and
READ_ONCE(v->pages[nr]), explicitly checking for NULL before
dereferencing the page. Similarly, update vmalloc_info_show() to
read nr_pages safely to avoid parsing a torn or inconsistent value.
Signed-off-by: Shivam Kalra <shivamkalra98@zohomail.in>
---
mm/vmalloc.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 64f5d1088281..7658fdc087d2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -5204,7 +5204,7 @@ bool vmalloc_dump_obj(void *object)
static void show_numa_info(struct seq_file *m, struct vm_struct *v,
unsigned int *counters)
{
- unsigned int nr;
+ unsigned int nr, nr_pages;
unsigned int step = 1U << vm_area_page_order(v);
if (!counters)
@@ -5212,8 +5212,13 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v,
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
- for (nr = 0; nr < v->nr_pages; nr += step)
- counters[page_to_nid(v->pages[nr])] += step;
+ nr_pages = READ_ONCE(v->nr_pages);
+ for (nr = 0; nr < nr_pages; nr += step) {
+ struct page *page = READ_ONCE(v->pages[nr]);
+
+ if (page)
+ counters[page_to_nid(page)] += step;
+ }
for_each_node_state(nr, N_HIGH_MEMORY)
if (counters[nr])
seq_printf(m, " N%u=%u", nr, counters[nr]);
@@ -5241,6 +5246,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
struct vmap_area *va;
struct vm_struct *v;
unsigned int *counters;
+ unsigned int nr_pages;
if (IS_ENABLED(CONFIG_NUMA))
counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
@@ -5270,8 +5276,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
if (v->caller)
seq_printf(m, " %pS", v->caller);
- if (v->nr_pages)
- seq_printf(m, " pages=%d", v->nr_pages);
+ nr_pages = READ_ONCE(v->nr_pages);
+ if (nr_pages)
+ seq_printf(m, " pages=%d", nr_pages);
if (v->phys_addr)
seq_printf(m, " phys=%pa", &v->phys_addr);
--
2.43.0
On Sat, Mar 21, 2026 at 11:35:49PM +0530, Shivam Kalra via B4 Relay wrote:
> From: Shivam Kalra <shivamkalra98@zohomail.in>
>
> The /proc/vmallocinfo readers, specifically show_numa_info() and
> vmalloc_info_show(), currently read v->nr_pages and the v->pages
> array without any concurrent protection.
>
> In preparation for vrealloc() shrink support, where v->nr_pages can
> be decreased and entries in the v->pages array can be nulled out
> concurrently, these readers must be protected to prevent use-after-free
> or NULL pointer dereferences.
>
> Update show_numa_info() to use READ_ONCE(v->nr_pages) and
> READ_ONCE(v->pages[nr]), explicitly checking for NULL before
> dereferencing the page. Similarly, update vmalloc_info_show() to
> read nr_pages safely to avoid parsing a torn or inconsistent value.
>
> Signed-off-by: Shivam Kalra <shivamkalra98@zohomail.in>
> ---
> mm/vmalloc.c | 17 ++++++++++++-----
> 1 file changed, 12 insertions(+), 5 deletions(-)
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 64f5d1088281..7658fdc087d2 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -5204,7 +5204,7 @@ bool vmalloc_dump_obj(void *object)
> static void show_numa_info(struct seq_file *m, struct vm_struct *v,
> unsigned int *counters)
> {
> - unsigned int nr;
> + unsigned int nr, nr_pages;
> unsigned int step = 1U << vm_area_page_order(v);
>
> if (!counters)
> @@ -5212,8 +5212,13 @@ static void show_numa_info(struct seq_file *m, struct vm_struct *v,
>
> memset(counters, 0, nr_node_ids * sizeof(unsigned int));
>
> - for (nr = 0; nr < v->nr_pages; nr += step)
> - counters[page_to_nid(v->pages[nr])] += step;
> + nr_pages = READ_ONCE(v->nr_pages);
> + for (nr = 0; nr < nr_pages; nr += step) {
> + struct page *page = READ_ONCE(v->pages[nr]);
> +
> + if (page)
> + counters[page_to_nid(page)] += step;
> + }
> for_each_node_state(nr, N_HIGH_MEMORY)
> if (counters[nr])
> seq_printf(m, " N%u=%u", nr, counters[nr]);
> @@ -5241,6 +5246,7 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> struct vmap_area *va;
> struct vm_struct *v;
> unsigned int *counters;
> + unsigned int nr_pages;
>
> if (IS_ENABLED(CONFIG_NUMA))
> counters = kmalloc_array(nr_node_ids, sizeof(unsigned int), GFP_KERNEL);
> @@ -5270,8 +5276,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
> if (v->caller)
> seq_printf(m, " %pS", v->caller);
>
> - if (v->nr_pages)
> - seq_printf(m, " pages=%d", v->nr_pages);
> + nr_pages = READ_ONCE(v->nr_pages);
> + if (nr_pages)
> + seq_printf(m, " pages=%d", nr_pages);
>
> if (v->phys_addr)
> seq_printf(m, " phys=%pa", &v->phys_addr);
>
It is protected by the vn->busy.lock. When you update the page counter
in the vrealloc, we should do it under the lock if i do not miss anything.
--
Uladzislau Rezki
© 2016 - 2026 Red Hat, Inc.