It is more logical to update the stat before the page is freed, to avoid
use after free scenarios.
Fixes: 15995a352474 ("mm: report per-page metadata information")
Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
---
mm/hugetlb_vmemmap.c | 4 ++--
mm/page_ext.c | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 829112b0a914..fa83a7b38199 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
static inline void free_vmemmap_page(struct page *page)
{
if (PageReserved(page)) {
- free_bootmem_page(page);
mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
+ free_bootmem_page(page);
} else {
- __free_page(page);
mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
+ __free_page(page);
}
}
diff --git a/mm/page_ext.c b/mm/page_ext.c
index c191e490c401..962d45eee1f8 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -330,18 +330,18 @@ static void free_page_ext(void *addr)
if (is_vmalloc_addr(addr)) {
page = vmalloc_to_page(addr);
pgdat = page_pgdat(page);
+ mod_node_page_state(pgdat, NR_MEMMAP,
+ -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
vfree(addr);
} else {
page = virt_to_page(addr);
pgdat = page_pgdat(page);
+ mod_node_page_state(pgdat, NR_MEMMAP,
+ -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
BUG_ON(PageReserved(page));
kmemleak_free(addr);
free_pages_exact(addr, table_size);
}
-
- mod_node_page_state(pgdat, NR_MEMMAP,
- -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
-
}
static void __free_page_ext(unsigned long pfn)
--
2.46.0.76.ge559c4bf1a-goog
On Thu, Aug 8, 2024 at 8:42 AM Pasha Tatashin <pasha.tatashin@soleen.com> wrote:
>
> It is more logical to update the stat before the page is freed, to avoid
> use after free scenarios.
>
> Fixes: 15995a352474 ("mm: report per-page metadata information")
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
> ---
> mm/hugetlb_vmemmap.c | 4 ++--
> mm/page_ext.c | 8 ++++----
> 2 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index 829112b0a914..fa83a7b38199 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
> static inline void free_vmemmap_page(struct page *page)
> {
> if (PageReserved(page)) {
> - free_bootmem_page(page);
> mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
> + free_bootmem_page(page);
> } else {
> - __free_page(page);
> mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
> + __free_page(page);
> }
> }
>
> diff --git a/mm/page_ext.c b/mm/page_ext.c
> index c191e490c401..962d45eee1f8 100644
> --- a/mm/page_ext.c
> +++ b/mm/page_ext.c
> @@ -330,18 +330,18 @@ static void free_page_ext(void *addr)
> if (is_vmalloc_addr(addr)) {
> page = vmalloc_to_page(addr);
> pgdat = page_pgdat(page);
> + mod_node_page_state(pgdat, NR_MEMMAP,
> + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> vfree(addr);
> } else {
> page = virt_to_page(addr);
> pgdat = page_pgdat(page);
> + mod_node_page_state(pgdat, NR_MEMMAP,
> + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> BUG_ON(PageReserved(page));
> kmemleak_free(addr);
> free_pages_exact(addr, table_size);
> }
> -
> - mod_node_page_state(pgdat, NR_MEMMAP,
> - -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> -
> }
>
> static void __free_page_ext(unsigned long pfn)
> --
> 2.46.0.76.ge559c4bf1a-goog
>
On Thu, Aug 8, 2024 at 10:17 AM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Thu, Aug 8, 2024 at 8:42 AM Pasha Tatashin <pasha.tatashin@soleen.com> wrote:
> >
> > It is more logical to update the stat before the page is freed, to avoid
> > use after free scenarios.
> >
> > Fixes: 15995a352474 ("mm: report per-page metadata information")
> > Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> > Reviewed-by: David Hildenbrand <david@redhat.com>
>
> Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
Actually although I think this patch is correct, it shouldn't be
needed after patch 4 because we no longer use the page or pgdat to
update the stats.
>
> > ---
> > mm/hugetlb_vmemmap.c | 4 ++--
> > mm/page_ext.c | 8 ++++----
> > 2 files changed, 6 insertions(+), 6 deletions(-)
> >
> > diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> > index 829112b0a914..fa83a7b38199 100644
> > --- a/mm/hugetlb_vmemmap.c
> > +++ b/mm/hugetlb_vmemmap.c
> > @@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
> > static inline void free_vmemmap_page(struct page *page)
> > {
> > if (PageReserved(page)) {
> > - free_bootmem_page(page);
> > mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
> > + free_bootmem_page(page);
> > } else {
> > - __free_page(page);
> > mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
> > + __free_page(page);
> > }
> > }
> >
> > diff --git a/mm/page_ext.c b/mm/page_ext.c
> > index c191e490c401..962d45eee1f8 100644
> > --- a/mm/page_ext.c
> > +++ b/mm/page_ext.c
> > @@ -330,18 +330,18 @@ static void free_page_ext(void *addr)
> > if (is_vmalloc_addr(addr)) {
> > page = vmalloc_to_page(addr);
> > pgdat = page_pgdat(page);
> > + mod_node_page_state(pgdat, NR_MEMMAP,
> > + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> > vfree(addr);
> > } else {
> > page = virt_to_page(addr);
> > pgdat = page_pgdat(page);
> > + mod_node_page_state(pgdat, NR_MEMMAP,
> > + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> > BUG_ON(PageReserved(page));
> > kmemleak_free(addr);
> > free_pages_exact(addr, table_size);
> > }
> > -
> > - mod_node_page_state(pgdat, NR_MEMMAP,
> > - -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> > -
> > }
> >
> > static void __free_page_ext(unsigned long pfn)
> > --
> > 2.46.0.76.ge559c4bf1a-goog
> >
On Thu, Aug 8, 2024 at 1:21 PM Yosry Ahmed <yosryahmed@google.com> wrote:
>
> On Thu, Aug 8, 2024 at 10:17 AM Yosry Ahmed <yosryahmed@google.com> wrote:
> >
> > On Thu, Aug 8, 2024 at 8:42 AM Pasha Tatashin <pasha.tatashin@soleen.com> wrote:
> > >
> > > It is more logical to update the stat before the page is freed, to avoid
> > > use after free scenarios.
> > >
> > > Fixes: 15995a352474 ("mm: report per-page metadata information")
> > > Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> > > Reviewed-by: David Hildenbrand <david@redhat.com>
> >
> > Reviewed-by: Yosry Ahmed <yosryahmed@google.com>
>
> Actually although I think this patch is correct, it shouldn't be
> needed after patch 4 because we no longer use the page or pgdat to
> update the stats.
Good point, also there is a warning that pgdat is not used anymore.
I will remove this patch from the series, and clean-up free_page_ext()
by removing pgdat, once patches 3/4 and 4/4 are reviewed.
Pasha
Pasha
On Thu, Aug 08, 2024 at 03:42:34PM +0000, Pasha Tatashin wrote:
> It is more logical to update the stat before the page is freed, to avoid
> use after free scenarios.
>
> Fixes: 15995a352474 ("mm: report per-page metadata information")
> Signed-off-by: Pasha Tatashin <pasha.tatashin@soleen.com>
> Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Fan Ni <fan.ni@samsung.com>
> ---
> mm/hugetlb_vmemmap.c | 4 ++--
> mm/page_ext.c | 8 ++++----
> 2 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index 829112b0a914..fa83a7b38199 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -185,11 +185,11 @@ static int vmemmap_remap_range(unsigned long start, unsigned long end,
> static inline void free_vmemmap_page(struct page *page)
> {
> if (PageReserved(page)) {
> - free_bootmem_page(page);
> mod_node_page_state(page_pgdat(page), NR_MEMMAP_BOOT, -1);
> + free_bootmem_page(page);
> } else {
> - __free_page(page);
> mod_node_page_state(page_pgdat(page), NR_MEMMAP, -1);
> + __free_page(page);
> }
> }
>
> diff --git a/mm/page_ext.c b/mm/page_ext.c
> index c191e490c401..962d45eee1f8 100644
> --- a/mm/page_ext.c
> +++ b/mm/page_ext.c
> @@ -330,18 +330,18 @@ static void free_page_ext(void *addr)
> if (is_vmalloc_addr(addr)) {
> page = vmalloc_to_page(addr);
> pgdat = page_pgdat(page);
> + mod_node_page_state(pgdat, NR_MEMMAP,
> + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> vfree(addr);
> } else {
> page = virt_to_page(addr);
> pgdat = page_pgdat(page);
> + mod_node_page_state(pgdat, NR_MEMMAP,
> + -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> BUG_ON(PageReserved(page));
> kmemleak_free(addr);
> free_pages_exact(addr, table_size);
> }
> -
> - mod_node_page_state(pgdat, NR_MEMMAP,
> - -1L * (DIV_ROUND_UP(table_size, PAGE_SIZE)));
> -
> }
>
> static void __free_page_ext(unsigned long pfn)
> --
> 2.46.0.76.ge559c4bf1a-goog
>
© 2016 - 2026 Red Hat, Inc.