With fake head pages eliminated in the previous commit, remove the
supporting infrastructure:
- page_fixed_fake_head(): no longer needed to detect fake heads;
- page_is_fake_head(): no longer needed;
- page_count_writable(): no longer needed for RCU protection;
- RCU read_lock in page_ref_add_unless(): no longer needed;
This substantially simplifies compound_head() and page_ref_add_unless(),
removing both branches and RCU overhead from these hot paths.
Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
---
include/linux/page-flags.h | 93 ++------------------------------------
include/linux/page_ref.h | 8 +---
2 files changed, 4 insertions(+), 97 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e16a4bc82856..660f9154a211 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -221,102 +221,15 @@ static __always_inline bool compound_info_has_mask(void)
return is_power_of_2(sizeof(struct page));
}
-#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
-/*
- * Return the real head page struct iff the @page is a fake head page, otherwise
- * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
- */
-static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return page;
-
- /* Fake heads only exists if compound_info_has_mask() is true */
- if (!compound_info_has_mask())
- return page;
-
- /*
- * Only addresses aligned with PAGE_SIZE of struct page may be fake head
- * struct page. The alignment check aims to avoid access the fields (
- * e.g. compound_info) of the @page[1]. It can avoid touch a (possibly)
- * cold cacheline in some cases.
- */
- if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
- test_bit(PG_head, &page->flags.f)) {
- /*
- * We can safely access the field of the @page[1] with PG_head
- * because the @page is a compound page composed with at least
- * two contiguous pages.
- */
- unsigned long info = READ_ONCE(page[1].compound_info);
-
- /* See set_compound_head() */
- if (likely(info & 1)) {
- unsigned long p = (unsigned long)page;
-
- return (const struct page *)(p & info);
- }
- }
- return page;
-}
-
-static __always_inline bool page_count_writable(const struct page *page, int u)
-{
- if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
- return true;
-
- /*
- * The refcount check is ordered before the fake-head check to prevent
- * the following race:
- * CPU 1 (HVO) CPU 2 (speculative PFN walker)
- *
- * page_ref_freeze()
- * synchronize_rcu()
- * rcu_read_lock()
- * page_is_fake_head() is false
- * vmemmap_remap_pte()
- * XXX: struct page[] becomes r/o
- *
- * page_ref_unfreeze()
- * page_ref_count() is not zero
- *
- * atomic_add_unless(&page->_refcount)
- * XXX: try to modify r/o struct page[]
- *
- * The refcount check also prevents modification attempts to other (r/o)
- * tail pages that are not fake heads.
- */
- if (atomic_read_acquire(&page->_refcount) == u)
- return false;
-
- return page_fixed_fake_head(page) == page;
-}
-#else
-static inline const struct page *page_fixed_fake_head(const struct page *page)
-{
- return page;
-}
-
-static inline bool page_count_writable(const struct page *page, int u)
-{
- return true;
-}
-#endif
-
-static __always_inline int page_is_fake_head(const struct page *page)
-{
- return page_fixed_fake_head(page) != page;
-}
-
static __always_inline unsigned long _compound_head(const struct page *page)
{
unsigned long info = READ_ONCE(page->compound_info);
/* Bit 0 encodes PageTail() */
if (!(info & 1))
- return (unsigned long)page_fixed_fake_head(page);
+ return (unsigned long)page;
/*
* If compound_info_has_mask() is false, the rest of compound_info is
@@ -397,7 +310,7 @@ static __always_inline void clear_compound_head(struct page *page)
static __always_inline int PageTail(const struct page *page)
{
- return READ_ONCE(page->compound_info) & 1 || page_is_fake_head(page);
+ return READ_ONCE(page->compound_info) & 1;
}
static __always_inline int PageCompound(const struct page *page)
@@ -924,7 +837,7 @@ static __always_inline bool folio_test_head(const struct folio *folio)
static __always_inline int PageHead(const struct page *page)
{
PF_POISONED_CHECK(page);
- return test_bit(PG_head, &page->flags.f) && !page_is_fake_head(page);
+ return test_bit(PG_head, &page->flags.f);
}
__SETPAGEFLAG(Head, head, PF_ANY)
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index 544150d1d5fd..490d0ad6e56d 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -230,13 +230,7 @@ static inline int folio_ref_dec_return(struct folio *folio)
static inline bool page_ref_add_unless(struct page *page, int nr, int u)
{
- bool ret = false;
-
- rcu_read_lock();
- /* avoid writing to the vmemmap area being remapped */
- if (page_count_writable(page, u))
- ret = atomic_add_unless(&page->_refcount, nr, u);
- rcu_read_unlock();
+ bool ret = atomic_add_unless(&page->_refcount, nr, u);
if (page_ref_tracepoint_active(page_ref_mod_unless))
__page_ref_mod_unless(page, nr, ret);
--
2.51.2
On 21 Jan 2026, at 11:22, Kiryl Shutsemau wrote:
> With fake head pages eliminated in the previous commit, remove the
> supporting infrastructure:
>
> - page_fixed_fake_head(): no longer needed to detect fake heads;
> - page_is_fake_head(): no longer needed;
> - page_count_writable(): no longer needed for RCU protection;
> - RCU read_lock in page_ref_add_unless(): no longer needed;
>
> This substantially simplifies compound_head() and page_ref_add_unless(),
> removing both branches and RCU overhead from these hot paths.
>
> Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
> Reviewed-by: Muchun Song <muchun.song@linux.dev>
> ---
> include/linux/page-flags.h | 93 ++------------------------------------
> include/linux/page_ref.h | 8 +---
> 2 files changed, 4 insertions(+), 97 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index e16a4bc82856..660f9154a211 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -221,102 +221,15 @@ static __always_inline bool compound_info_has_mask(void)
> return is_power_of_2(sizeof(struct page));
> }
>
> -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
>
> -/*
> - * Return the real head page struct iff the @page is a fake head page, otherwise
> - * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
> - */
> -static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
> -{
> - if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
> - return page;
> -
> - /* Fake heads only exists if compound_info_has_mask() is true */
> - if (!compound_info_has_mask())
> - return page;
> -
> - /*
> - * Only addresses aligned with PAGE_SIZE of struct page may be fake head
> - * struct page. The alignment check aims to avoid access the fields (
> - * e.g. compound_info) of the @page[1]. It can avoid touch a (possibly)
> - * cold cacheline in some cases.
> - */
> - if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
> - test_bit(PG_head, &page->flags.f)) {
> - /*
> - * We can safely access the field of the @page[1] with PG_head
> - * because the @page is a compound page composed with at least
> - * two contiguous pages.
> - */
> - unsigned long info = READ_ONCE(page[1].compound_info);
> -
> - /* See set_compound_head() */
> - if (likely(info & 1)) {
> - unsigned long p = (unsigned long)page;
> -
> - return (const struct page *)(p & info);
> - }
> - }
> - return page;
> -}
> -
<snip>
> static __always_inline unsigned long _compound_head(const struct page *page)
> {
> unsigned long info = READ_ONCE(page->compound_info);
>
> /* Bit 0 encodes PageTail() */
> if (!(info & 1))
> - return (unsigned long)page_fixed_fake_head(page);
> + return (unsigned long)page;
Is this right? Assuming 64B struct page and 4KB page size, thus 64 struct pages
in a page, the 64th struct page (0-indexed) is mapped to the head page and
has !(info & 1). But _compound_head() should return page & info here.
Am I missing something? Thanks.
Best Regards,
Yan, Zi
On Wed, Jan 21, 2026 at 01:16:23PM -0500, Zi Yan wrote:
> On 21 Jan 2026, at 11:22, Kiryl Shutsemau wrote:
>
> > With fake head pages eliminated in the previous commit, remove the
> > supporting infrastructure:
> >
> > - page_fixed_fake_head(): no longer needed to detect fake heads;
> > - page_is_fake_head(): no longer needed;
> > - page_count_writable(): no longer needed for RCU protection;
> > - RCU read_lock in page_ref_add_unless(): no longer needed;
> >
> > This substantially simplifies compound_head() and page_ref_add_unless(),
> > removing both branches and RCU overhead from these hot paths.
> >
> > Signed-off-by: Kiryl Shutsemau <kas@kernel.org>
> > Reviewed-by: Muchun Song <muchun.song@linux.dev>
> > ---
> > include/linux/page-flags.h | 93 ++------------------------------------
> > include/linux/page_ref.h | 8 +---
> > 2 files changed, 4 insertions(+), 97 deletions(-)
> >
> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> > index e16a4bc82856..660f9154a211 100644
> > --- a/include/linux/page-flags.h
> > +++ b/include/linux/page-flags.h
> > @@ -221,102 +221,15 @@ static __always_inline bool compound_info_has_mask(void)
> > return is_power_of_2(sizeof(struct page));
> > }
> >
> > -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
> > DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
> >
> > -/*
> > - * Return the real head page struct iff the @page is a fake head page, otherwise
> > - * return the @page itself. See Documentation/mm/vmemmap_dedup.rst.
> > - */
> > -static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
> > -{
> > - if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
> > - return page;
> > -
> > - /* Fake heads only exists if compound_info_has_mask() is true */
> > - if (!compound_info_has_mask())
> > - return page;
> > -
> > - /*
> > - * Only addresses aligned with PAGE_SIZE of struct page may be fake head
> > - * struct page. The alignment check aims to avoid access the fields (
> > - * e.g. compound_info) of the @page[1]. It can avoid touch a (possibly)
> > - * cold cacheline in some cases.
> > - */
> > - if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) &&
> > - test_bit(PG_head, &page->flags.f)) {
> > - /*
> > - * We can safely access the field of the @page[1] with PG_head
> > - * because the @page is a compound page composed with at least
> > - * two contiguous pages.
> > - */
> > - unsigned long info = READ_ONCE(page[1].compound_info);
> > -
> > - /* See set_compound_head() */
> > - if (likely(info & 1)) {
> > - unsigned long p = (unsigned long)page;
> > -
> > - return (const struct page *)(p & info);
> > - }
> > - }
> > - return page;
> > -}
> > -
>
> <snip>
>
> > static __always_inline unsigned long _compound_head(const struct page *page)
> > {
> > unsigned long info = READ_ONCE(page->compound_info);
> >
> > /* Bit 0 encodes PageTail() */
> > if (!(info & 1))
> > - return (unsigned long)page_fixed_fake_head(page);
> > + return (unsigned long)page;
>
> Is this right? Assuming 64B struct page and 4KB page size, thus 64 struct pages
> in a page, the 64th struct page (0-indexed) is mapped to the head page and
> has !(info & 1). But _compound_head() should return page & info here.
> Am I missing something? Thanks.
The point of removing fake heads is the we don't have head aliases
anymore. 64-th struct page will be a tail page that. No special
treatment is required.
--
Kiryl Shutsemau / Kirill A. Shutemov
© 2016 - 2026 Red Hat, Inc.