include/linux/pgalloc_tag.h | 7 +++++++ mm/page_alloc.c | 15 ++++++--------- 2 files changed, 13 insertions(+), 9 deletions(-)
Commit 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled()
checks") introduces a possible use-after-free scenario, when page
is non-compound, page[0] could be released by other thread right
after put_page_testzero failed in current thread, pgalloc_tag_sub_pages
afterwards would manipulate an invalid page for accounting remaining
pages:
[timeline] [thread1] [thread2]
| alloc_page non-compound
V
| get_page, rf counter inc
V
| in ___free_pages
| put_page_testzero fails
V
| put_page, page released
V
| in ___free_pages,
| pgalloc_tag_sub_pages
| manipulate an invalid page
V
Restore __free_pages() to its state before, retrieve alloc tag
beforehand.
Fixes: 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled() checks")
Signed-off-by: David Wang <00107082@163.com>
---
include/linux/pgalloc_tag.h | 7 +++++++
mm/page_alloc.c | 15 ++++++---------
2 files changed, 13 insertions(+), 9 deletions(-)
diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
index c74077977830..97eb4835568e 100644
--- a/include/linux/pgalloc_tag.h
+++ b/include/linux/pgalloc_tag.h
@@ -188,6 +188,13 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
return tag;
}
+static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
+{
+ if (mem_alloc_profiling_enabled())
+ return __pgalloc_tag_get(page);
+ return NULL;
+}
+
void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
void pgalloc_tag_swap(struct folio *new, struct folio *old);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5669baf2a6fe..1b00e14a9780 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1151,14 +1151,9 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
__pgalloc_tag_sub(page, nr);
}
-static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr)
+/* When tag is not NULL, assuming mem_alloc_profiling_enabled */
+static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
{
- struct alloc_tag *tag;
-
- if (!mem_alloc_profiling_enabled())
- return;
-
- tag = __pgalloc_tag_get(page);
if (tag)
this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
}
@@ -1168,7 +1163,7 @@ static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr)
static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
unsigned int nr) {}
static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
-static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {}
+static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
#endif /* CONFIG_MEM_ALLOC_PROFILING */
@@ -5065,11 +5060,13 @@ static void ___free_pages(struct page *page, unsigned int order,
{
/* get PageHead before we drop reference */
int head = PageHead(page);
+ /* get alloc tag in case the page is released by others */
+ struct alloc_tag *tag = pgalloc_tag_get(page);
if (put_page_testzero(page))
__free_frozen_pages(page, order, fpi_flags);
else if (!head) {
- pgalloc_tag_sub_pages(page, (1 << order) - 1);
+ pgalloc_tag_sub_pages(tag, (1 << order) - 1);
while (order-- > 0)
__free_frozen_pages(page + (1 << order), order,
fpi_flags);
--
2.39.2
Sorry, I made a mistake, pgalloc_tag_get should have a dummy definition
when CONFIG_MEM_ALLOC_PROFILING not defined, will send another patch.
At 2025-05-06 02:34:23, "David Wang" <00107082@163.com> wrote:
>Commit 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled()
> checks") introduces a possible use-after-free scenario, when page
>is non-compound, page[0] could be released by other thread right
>after put_page_testzero failed in current thread, pgalloc_tag_sub_pages
>afterwards would manipulate an invalid page for accounting remaining
>pages:
>
>[timeline] [thread1] [thread2]
> | alloc_page non-compound
> V
> | get_page, rf counter inc
> V
> | in ___free_pages
> | put_page_testzero fails
> V
> | put_page, page released
> V
> | in ___free_pages,
> | pgalloc_tag_sub_pages
> | manipulate an invalid page
> V
>
>Restore __free_pages() to its state before, retrieve alloc tag
>beforehand.
>
>Fixes: 51ff4d7486f0 ("mm: avoid extra mem_alloc_profiling_enabled() checks")
>Signed-off-by: David Wang <00107082@163.com>
>---
> include/linux/pgalloc_tag.h | 7 +++++++
> mm/page_alloc.c | 15 ++++++---------
> 2 files changed, 13 insertions(+), 9 deletions(-)
>
>diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h
>index c74077977830..97eb4835568e 100644
>--- a/include/linux/pgalloc_tag.h
>+++ b/include/linux/pgalloc_tag.h
>@@ -188,6 +188,13 @@ static inline struct alloc_tag *__pgalloc_tag_get(struct page *page)
> return tag;
> }
>
>+static inline struct alloc_tag *pgalloc_tag_get(struct page *page)
>+{
>+ if (mem_alloc_profiling_enabled())
>+ return __pgalloc_tag_get(page);
>+ return NULL;
>+}
>+
> void pgalloc_tag_split(struct folio *folio, int old_order, int new_order);
> void pgalloc_tag_swap(struct folio *new, struct folio *old);
>
>diff --git a/mm/page_alloc.c b/mm/page_alloc.c
>index 5669baf2a6fe..1b00e14a9780 100644
>--- a/mm/page_alloc.c
>+++ b/mm/page_alloc.c
>@@ -1151,14 +1151,9 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr)
> __pgalloc_tag_sub(page, nr);
> }
>
>-static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr)
>+/* When tag is not NULL, assuming mem_alloc_profiling_enabled */
>+static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr)
> {
>- struct alloc_tag *tag;
>-
>- if (!mem_alloc_profiling_enabled())
>- return;
>-
>- tag = __pgalloc_tag_get(page);
> if (tag)
> this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr);
> }
>@@ -1168,7 +1163,7 @@ static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr)
> static inline void pgalloc_tag_add(struct page *page, struct task_struct *task,
> unsigned int nr) {}
> static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {}
>-static inline void pgalloc_tag_sub_pages(struct page *page, unsigned int nr) {}
>+static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {}
>
> #endif /* CONFIG_MEM_ALLOC_PROFILING */
>
>@@ -5065,11 +5060,13 @@ static void ___free_pages(struct page *page, unsigned int order,
> {
> /* get PageHead before we drop reference */
> int head = PageHead(page);
>+ /* get alloc tag in case the page is released by others */
>+ struct alloc_tag *tag = pgalloc_tag_get(page);
>
> if (put_page_testzero(page))
> __free_frozen_pages(page, order, fpi_flags);
> else if (!head) {
>- pgalloc_tag_sub_pages(page, (1 << order) - 1);
>+ pgalloc_tag_sub_pages(tag, (1 << order) - 1);
> while (order-- > 0)
> __free_frozen_pages(page + (1 << order), order,
> fpi_flags);
>--
>2.39.2
© 2016 - 2025 Red Hat, Inc.